codegen_util.cc revision e45fb9e7976c8462b94a58ad60b006b0eacec49f
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_internals.h" 18#include "dex_file-inl.h" 19#include "gc_map.h" 20#include "gc_map_builder.h" 21#include "mapping_table.h" 22#include "mir_to_lir-inl.h" 23#include "dex/quick/dex_file_method_inliner.h" 24#include "dex/quick/dex_file_to_method_inliner_map.h" 25#include "dex/verification_results.h" 26#include "dex/verified_method.h" 27#include "verifier/dex_gc_map.h" 28#include "verifier/method_verifier.h" 29#include "vmap_table.h" 30 31namespace art { 32 33namespace { 34 35/* Dump a mapping table */ 36template <typename It> 37void DumpMappingTable(const char* table_name, const char* descriptor, const char* name, 38 const Signature& signature, uint32_t size, It first) { 39 if (size != 0) { 40 std::string line(StringPrintf("\n %s %s%s_%s_table[%u] = {", table_name, 41 descriptor, name, signature.ToString().c_str(), size)); 42 std::replace(line.begin(), line.end(), ';', '_'); 43 LOG(INFO) << line; 44 for (uint32_t i = 0; i != size; ++i) { 45 line = StringPrintf(" {0x%05x, 0x%04x},", first.NativePcOffset(), first.DexPc()); 46 ++first; 47 LOG(INFO) << line; 48 } 49 LOG(INFO) <<" };\n\n"; 50 } 51} 52 53} // anonymous namespace 54 55bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) { 56 bool res = false; 57 if (rl_src.is_const) { 58 if (rl_src.wide) { 59 if (rl_src.fp) { 60 res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src)); 61 } else { 62 res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src)); 63 } 64 } else { 65 if (rl_src.fp) { 66 res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src)); 67 } else { 68 res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src)); 69 } 70 } 71 } 72 return res; 73} 74 75void Mir2Lir::MarkSafepointPC(LIR* inst) { 76 DCHECK(!inst->flags.use_def_invalid); 77 inst->u.m.def_mask = ENCODE_ALL; 78 LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC); 79 DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL); 80} 81 82/* Remove a LIR from the list. */ 83void Mir2Lir::UnlinkLIR(LIR* lir) { 84 if (UNLIKELY(lir == first_lir_insn_)) { 85 first_lir_insn_ = lir->next; 86 if (lir->next != NULL) { 87 lir->next->prev = NULL; 88 } else { 89 DCHECK(lir->next == NULL); 90 DCHECK(lir == last_lir_insn_); 91 last_lir_insn_ = NULL; 92 } 93 } else if (lir == last_lir_insn_) { 94 last_lir_insn_ = lir->prev; 95 lir->prev->next = NULL; 96 } else if ((lir->prev != NULL) && (lir->next != NULL)) { 97 lir->prev->next = lir->next; 98 lir->next->prev = lir->prev; 99 } 100} 101 102/* Convert an instruction to a NOP */ 103void Mir2Lir::NopLIR(LIR* lir) { 104 lir->flags.is_nop = true; 105 if (!cu_->verbose) { 106 UnlinkLIR(lir); 107 } 108} 109 110void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) { 111 uint64_t *mask_ptr; 112 uint64_t mask = ENCODE_MEM; 113 DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); 114 DCHECK(!lir->flags.use_def_invalid); 115 if (is_load) { 116 mask_ptr = &lir->u.m.use_mask; 117 } else { 118 mask_ptr = &lir->u.m.def_mask; 119 } 120 /* Clear out the memref flags */ 121 *mask_ptr &= ~mask; 122 /* ..and then add back the one we need */ 123 switch (mem_type) { 124 case kLiteral: 125 DCHECK(is_load); 126 *mask_ptr |= ENCODE_LITERAL; 127 break; 128 case kDalvikReg: 129 *mask_ptr |= ENCODE_DALVIK_REG; 130 break; 131 case kHeapRef: 132 *mask_ptr |= ENCODE_HEAP_REF; 133 break; 134 case kMustNotAlias: 135 /* Currently only loads can be marked as kMustNotAlias */ 136 DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE)); 137 *mask_ptr |= ENCODE_MUST_NOT_ALIAS; 138 break; 139 default: 140 LOG(FATAL) << "Oat: invalid memref kind - " << mem_type; 141 } 142} 143 144/* 145 * Mark load/store instructions that access Dalvik registers through the stack. 146 */ 147void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, 148 bool is64bit) { 149 SetMemRefType(lir, is_load, kDalvikReg); 150 151 /* 152 * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit 153 * access. 154 */ 155 lir->flags.alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit); 156} 157 158/* 159 * Debugging macros 160 */ 161#define DUMP_RESOURCE_MASK(X) 162 163/* Pretty-print a LIR instruction */ 164void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { 165 int offset = lir->offset; 166 int dest = lir->operands[0]; 167 const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops)); 168 169 /* Handle pseudo-ops individually, and all regular insns as a group */ 170 switch (lir->opcode) { 171 case kPseudoMethodEntry: 172 LOG(INFO) << "-------- method entry " 173 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 174 break; 175 case kPseudoMethodExit: 176 LOG(INFO) << "-------- Method_Exit"; 177 break; 178 case kPseudoBarrier: 179 LOG(INFO) << "-------- BARRIER"; 180 break; 181 case kPseudoEntryBlock: 182 LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest; 183 break; 184 case kPseudoDalvikByteCodeBoundary: 185 if (lir->operands[0] == 0) { 186 // NOTE: only used for debug listings. 187 lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string")); 188 } 189 LOG(INFO) << "-------- dalvik offset: 0x" << std::hex 190 << lir->dalvik_offset << " @ " 191 << reinterpret_cast<char*>(UnwrapPointer(lir->operands[0])); 192 break; 193 case kPseudoExitBlock: 194 LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest; 195 break; 196 case kPseudoPseudoAlign4: 197 LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex 198 << offset << "): .align4"; 199 break; 200 case kPseudoEHBlockLabel: 201 LOG(INFO) << "Exception_Handling:"; 202 break; 203 case kPseudoTargetLabel: 204 case kPseudoNormalBlockLabel: 205 LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":"; 206 break; 207 case kPseudoThrowTarget: 208 LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":"; 209 break; 210 case kPseudoIntrinsicRetry: 211 LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":"; 212 break; 213 case kPseudoSuspendTarget: 214 LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":"; 215 break; 216 case kPseudoSafepointPC: 217 LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 218 break; 219 case kPseudoExportedPC: 220 LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 221 break; 222 case kPseudoCaseLabel: 223 LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x" 224 << std::hex << lir->operands[0] << "|" << std::dec << 225 lir->operands[0]; 226 break; 227 default: 228 if (lir->flags.is_nop && !dump_nop) { 229 break; 230 } else { 231 std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode), 232 lir, base_addr)); 233 std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode), 234 lir, base_addr)); 235 LOG(INFO) << StringPrintf("%5p: %-9s%s%s", 236 base_addr + offset, 237 op_name.c_str(), op_operands.c_str(), 238 lir->flags.is_nop ? "(nop)" : ""); 239 } 240 break; 241 } 242 243 if (lir->u.m.use_mask && (!lir->flags.is_nop || dump_nop)) { 244 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.use_mask, "use")); 245 } 246 if (lir->u.m.def_mask && (!lir->flags.is_nop || dump_nop)) { 247 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.def_mask, "def")); 248 } 249} 250 251void Mir2Lir::DumpPromotionMap() { 252 int num_regs = cu_->num_dalvik_registers + mir_graph_->GetNumUsedCompilerTemps(); 253 for (int i = 0; i < num_regs; i++) { 254 PromotionMap v_reg_map = promotion_map_[i]; 255 std::string buf; 256 if (v_reg_map.fp_location == kLocPhysReg) { 257 StringAppendF(&buf, " : s%d", RegStorage::RegNum(v_reg_map.FpReg)); 258 } 259 260 std::string buf3; 261 if (i < cu_->num_dalvik_registers) { 262 StringAppendF(&buf3, "%02d", i); 263 } else if (i == mir_graph_->GetMethodSReg()) { 264 buf3 = "Method*"; 265 } else { 266 StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers); 267 } 268 269 LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(), 270 v_reg_map.core_location == kLocPhysReg ? 271 "r" : "SP+", v_reg_map.core_location == kLocPhysReg ? 272 v_reg_map.core_reg : SRegOffset(i), 273 buf.c_str()); 274 } 275} 276 277void Mir2Lir::UpdateLIROffsets() { 278 // Only used for code listings. 279 size_t offset = 0; 280 for (LIR* lir = first_lir_insn_; lir != nullptr; lir = lir->next) { 281 lir->offset = offset; 282 if (!lir->flags.is_nop && !IsPseudoLirOp(lir->opcode)) { 283 offset += GetInsnSize(lir); 284 } else if (lir->opcode == kPseudoPseudoAlign4) { 285 offset += (offset & 0x2); 286 } 287 } 288} 289 290/* Dump instructions and constant pool contents */ 291void Mir2Lir::CodegenDump() { 292 LOG(INFO) << "Dumping LIR insns for " 293 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 294 LIR* lir_insn; 295 int insns_size = cu_->code_item->insns_size_in_code_units_; 296 297 LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs; 298 LOG(INFO) << "Ins : " << cu_->num_ins; 299 LOG(INFO) << "Outs : " << cu_->num_outs; 300 LOG(INFO) << "CoreSpills : " << num_core_spills_; 301 LOG(INFO) << "FPSpills : " << num_fp_spills_; 302 LOG(INFO) << "CompilerTemps : " << mir_graph_->GetNumUsedCompilerTemps(); 303 LOG(INFO) << "Frame size : " << frame_size_; 304 LOG(INFO) << "code size is " << total_size_ << 305 " bytes, Dalvik size is " << insns_size * 2; 306 LOG(INFO) << "expansion factor: " 307 << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2); 308 DumpPromotionMap(); 309 UpdateLIROffsets(); 310 for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) { 311 DumpLIRInsn(lir_insn, 0); 312 } 313 for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) { 314 LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset, 315 lir_insn->operands[0]); 316 } 317 318 const DexFile::MethodId& method_id = 319 cu_->dex_file->GetMethodId(cu_->method_idx); 320 const Signature signature = cu_->dex_file->GetMethodSignature(method_id); 321 const char* name = cu_->dex_file->GetMethodName(method_id); 322 const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id)); 323 324 // Dump mapping tables 325 if (!encoded_mapping_table_.empty()) { 326 MappingTable table(&encoded_mapping_table_[0]); 327 DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, 328 table.PcToDexSize(), table.PcToDexBegin()); 329 DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, 330 table.DexToPcSize(), table.DexToPcBegin()); 331 } 332} 333 334/* 335 * Search the existing constants in the literal pool for an exact or close match 336 * within specified delta (greater or equal to 0). 337 */ 338LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) { 339 while (data_target) { 340 if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta) 341 return data_target; 342 data_target = data_target->next; 343 } 344 return NULL; 345} 346 347/* Search the existing constants in the literal pool for an exact wide match */ 348LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) { 349 bool lo_match = false; 350 LIR* lo_target = NULL; 351 while (data_target) { 352 if (lo_match && (data_target->operands[0] == val_hi)) { 353 // Record high word in case we need to expand this later. 354 lo_target->operands[1] = val_hi; 355 return lo_target; 356 } 357 lo_match = false; 358 if (data_target->operands[0] == val_lo) { 359 lo_match = true; 360 lo_target = data_target; 361 } 362 data_target = data_target->next; 363 } 364 return NULL; 365} 366 367/* 368 * The following are building blocks to insert constants into the pool or 369 * instruction streams. 370 */ 371 372/* Add a 32-bit constant to the constant pool */ 373LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) { 374 /* Add the constant to the literal pool */ 375 if (constant_list_p) { 376 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 377 new_value->operands[0] = value; 378 new_value->next = *constant_list_p; 379 *constant_list_p = new_value; 380 estimated_native_code_size_ += sizeof(value); 381 return new_value; 382 } 383 return NULL; 384} 385 386/* Add a 64-bit constant to the constant pool or mixed with code */ 387LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) { 388 AddWordData(constant_list_p, val_hi); 389 return AddWordData(constant_list_p, val_lo); 390} 391 392static void Push32(std::vector<uint8_t>&buf, int data) { 393 buf.push_back(data & 0xff); 394 buf.push_back((data >> 8) & 0xff); 395 buf.push_back((data >> 16) & 0xff); 396 buf.push_back((data >> 24) & 0xff); 397} 398 399// Push 8 bytes on 64-bit target systems; 4 on 32-bit target systems. 400static void PushPointer(std::vector<uint8_t>&buf, const void* pointer, bool target64) { 401 uint64_t data = reinterpret_cast<uintptr_t>(pointer); 402 if (target64) { 403 Push32(buf, data & 0xFFFFFFFF); 404 Push32(buf, (data >> 32) & 0xFFFFFFFF); 405 } else { 406 Push32(buf, static_cast<uint32_t>(data)); 407 } 408} 409 410static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) { 411 while (buf.size() < offset) { 412 buf.push_back(0); 413 } 414} 415 416/* Write the literal pool to the output stream */ 417void Mir2Lir::InstallLiteralPools() { 418 AlignBuffer(code_buffer_, data_offset_); 419 LIR* data_lir = literal_list_; 420 while (data_lir != NULL) { 421 Push32(code_buffer_, data_lir->operands[0]); 422 data_lir = NEXT_LIR(data_lir); 423 } 424 // Push code and method literals, record offsets for the compiler to patch. 425 data_lir = code_literal_list_; 426 while (data_lir != NULL) { 427 uint32_t target_method_idx = data_lir->operands[0]; 428 const DexFile* target_dex_file = 429 reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1])); 430 cu_->compiler_driver->AddCodePatch(cu_->dex_file, 431 cu_->class_def_idx, 432 cu_->method_idx, 433 cu_->invoke_type, 434 target_method_idx, 435 target_dex_file, 436 static_cast<InvokeType>(data_lir->operands[2]), 437 code_buffer_.size()); 438 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 439 // unique value based on target to ensure code deduplication works 440 PushPointer(code_buffer_, &target_method_id, cu_->target64); 441 data_lir = NEXT_LIR(data_lir); 442 } 443 data_lir = method_literal_list_; 444 while (data_lir != NULL) { 445 uint32_t target_method_idx = data_lir->operands[0]; 446 const DexFile* target_dex_file = 447 reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1])); 448 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, 449 cu_->class_def_idx, 450 cu_->method_idx, 451 cu_->invoke_type, 452 target_method_idx, 453 target_dex_file, 454 static_cast<InvokeType>(data_lir->operands[2]), 455 code_buffer_.size()); 456 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 457 // unique value based on target to ensure code deduplication works 458 PushPointer(code_buffer_, &target_method_id, cu_->target64); 459 data_lir = NEXT_LIR(data_lir); 460 } 461 // Push class literals. 462 data_lir = class_literal_list_; 463 while (data_lir != NULL) { 464 uint32_t target_method_idx = data_lir->operands[0]; 465 cu_->compiler_driver->AddClassPatch(cu_->dex_file, 466 cu_->class_def_idx, 467 cu_->method_idx, 468 target_method_idx, 469 code_buffer_.size()); 470 const DexFile::TypeId& target_method_id = cu_->dex_file->GetTypeId(target_method_idx); 471 // unique value based on target to ensure code deduplication works 472 PushPointer(code_buffer_, &target_method_id, cu_->target64); 473 data_lir = NEXT_LIR(data_lir); 474 } 475} 476 477/* Write the switch tables to the output stream */ 478void Mir2Lir::InstallSwitchTables() { 479 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 480 while (true) { 481 Mir2Lir::SwitchTable* tab_rec = iterator.Next(); 482 if (tab_rec == NULL) break; 483 AlignBuffer(code_buffer_, tab_rec->offset); 484 /* 485 * For Arm, our reference point is the address of the bx 486 * instruction that does the launch, so we have to subtract 487 * the auto pc-advance. For other targets the reference point 488 * is a label, so we can use the offset as-is. 489 */ 490 int bx_offset = INVALID_OFFSET; 491 switch (cu_->instruction_set) { 492 case kThumb2: 493 DCHECK(tab_rec->anchor->flags.fixup != kFixupNone); 494 bx_offset = tab_rec->anchor->offset + 4; 495 break; 496 case kX86: 497 case kX86_64: 498 bx_offset = 0; 499 break; 500 case kArm64: 501 case kMips: 502 bx_offset = tab_rec->anchor->offset; 503 break; 504 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 505 } 506 if (cu_->verbose) { 507 LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset; 508 } 509 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 510 const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2])); 511 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 512 int disp = tab_rec->targets[elems]->offset - bx_offset; 513 if (cu_->verbose) { 514 LOG(INFO) << " Case[" << elems << "] key: 0x" 515 << std::hex << keys[elems] << ", disp: 0x" 516 << std::hex << disp; 517 } 518 Push32(code_buffer_, keys[elems]); 519 Push32(code_buffer_, 520 tab_rec->targets[elems]->offset - bx_offset); 521 } 522 } else { 523 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 524 static_cast<int>(Instruction::kPackedSwitchSignature)); 525 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 526 int disp = tab_rec->targets[elems]->offset - bx_offset; 527 if (cu_->verbose) { 528 LOG(INFO) << " Case[" << elems << "] disp: 0x" 529 << std::hex << disp; 530 } 531 Push32(code_buffer_, tab_rec->targets[elems]->offset - bx_offset); 532 } 533 } 534 } 535} 536 537/* Write the fill array dta to the output stream */ 538void Mir2Lir::InstallFillArrayData() { 539 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 540 while (true) { 541 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 542 if (tab_rec == NULL) break; 543 AlignBuffer(code_buffer_, tab_rec->offset); 544 for (int i = 0; i < (tab_rec->size + 1) / 2; i++) { 545 code_buffer_.push_back(tab_rec->table[i] & 0xFF); 546 code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF); 547 } 548 } 549} 550 551static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) { 552 for (; lir != NULL; lir = lir->next) { 553 lir->offset = offset; 554 offset += 4; 555 } 556 return offset; 557} 558 559static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset, 560 unsigned int element_size) { 561 // Align to natural pointer size. 562 offset = RoundUp(offset, element_size); 563 for (; lir != NULL; lir = lir->next) { 564 lir->offset = offset; 565 offset += element_size; 566 } 567 return offset; 568} 569 570// Make sure we have a code address for every declared catch entry 571bool Mir2Lir::VerifyCatchEntries() { 572 MappingTable table(&encoded_mapping_table_[0]); 573 std::vector<uint32_t> dex_pcs; 574 dex_pcs.reserve(table.DexToPcSize()); 575 for (auto it = table.DexToPcBegin(), end = table.DexToPcEnd(); it != end; ++it) { 576 dex_pcs.push_back(it.DexPc()); 577 } 578 // Sort dex_pcs, so that we can quickly check it against the ordered mir_graph_->catches_. 579 std::sort(dex_pcs.begin(), dex_pcs.end()); 580 581 bool success = true; 582 auto it = dex_pcs.begin(), end = dex_pcs.end(); 583 for (uint32_t dex_pc : mir_graph_->catches_) { 584 while (it != end && *it < dex_pc) { 585 LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << *it; 586 ++it; 587 success = false; 588 } 589 if (it == end || *it > dex_pc) { 590 LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc; 591 success = false; 592 } else { 593 ++it; 594 } 595 } 596 if (!success) { 597 LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 598 LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: " 599 << table.DexToPcSize(); 600 } 601 return success; 602} 603 604 605void Mir2Lir::CreateMappingTables() { 606 uint32_t pc2dex_data_size = 0u; 607 uint32_t pc2dex_entries = 0u; 608 uint32_t pc2dex_offset = 0u; 609 uint32_t pc2dex_dalvik_offset = 0u; 610 uint32_t dex2pc_data_size = 0u; 611 uint32_t dex2pc_entries = 0u; 612 uint32_t dex2pc_offset = 0u; 613 uint32_t dex2pc_dalvik_offset = 0u; 614 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 615 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 616 pc2dex_entries += 1; 617 DCHECK(pc2dex_offset <= tgt_lir->offset); 618 pc2dex_data_size += UnsignedLeb128Size(tgt_lir->offset - pc2dex_offset); 619 pc2dex_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) - 620 static_cast<int32_t>(pc2dex_dalvik_offset)); 621 pc2dex_offset = tgt_lir->offset; 622 pc2dex_dalvik_offset = tgt_lir->dalvik_offset; 623 } 624 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 625 dex2pc_entries += 1; 626 DCHECK(dex2pc_offset <= tgt_lir->offset); 627 dex2pc_data_size += UnsignedLeb128Size(tgt_lir->offset - dex2pc_offset); 628 dex2pc_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) - 629 static_cast<int32_t>(dex2pc_dalvik_offset)); 630 dex2pc_offset = tgt_lir->offset; 631 dex2pc_dalvik_offset = tgt_lir->dalvik_offset; 632 } 633 } 634 635 uint32_t total_entries = pc2dex_entries + dex2pc_entries; 636 uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries); 637 uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size; 638 encoded_mapping_table_.resize(data_size); 639 uint8_t* write_pos = &encoded_mapping_table_[0]; 640 write_pos = EncodeUnsignedLeb128(write_pos, total_entries); 641 write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries); 642 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), hdr_data_size); 643 uint8_t* write_pos2 = write_pos + pc2dex_data_size; 644 645 pc2dex_offset = 0u; 646 pc2dex_dalvik_offset = 0u; 647 dex2pc_offset = 0u; 648 dex2pc_dalvik_offset = 0u; 649 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 650 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 651 DCHECK(pc2dex_offset <= tgt_lir->offset); 652 write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset); 653 write_pos = EncodeSignedLeb128(write_pos, static_cast<int32_t>(tgt_lir->dalvik_offset) - 654 static_cast<int32_t>(pc2dex_dalvik_offset)); 655 pc2dex_offset = tgt_lir->offset; 656 pc2dex_dalvik_offset = tgt_lir->dalvik_offset; 657 } 658 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 659 DCHECK(dex2pc_offset <= tgt_lir->offset); 660 write_pos2 = EncodeUnsignedLeb128(write_pos2, tgt_lir->offset - dex2pc_offset); 661 write_pos2 = EncodeSignedLeb128(write_pos2, static_cast<int32_t>(tgt_lir->dalvik_offset) - 662 static_cast<int32_t>(dex2pc_dalvik_offset)); 663 dex2pc_offset = tgt_lir->offset; 664 dex2pc_dalvik_offset = tgt_lir->dalvik_offset; 665 } 666 } 667 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), 668 hdr_data_size + pc2dex_data_size); 669 DCHECK_EQ(static_cast<size_t>(write_pos2 - &encoded_mapping_table_[0]), data_size); 670 671 if (kIsDebugBuild) { 672 CHECK(VerifyCatchEntries()); 673 674 // Verify the encoded table holds the expected data. 675 MappingTable table(&encoded_mapping_table_[0]); 676 CHECK_EQ(table.TotalSize(), total_entries); 677 CHECK_EQ(table.PcToDexSize(), pc2dex_entries); 678 auto it = table.PcToDexBegin(); 679 auto it2 = table.DexToPcBegin(); 680 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 681 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 682 CHECK_EQ(tgt_lir->offset, it.NativePcOffset()); 683 CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc()); 684 ++it; 685 } 686 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 687 CHECK_EQ(tgt_lir->offset, it2.NativePcOffset()); 688 CHECK_EQ(tgt_lir->dalvik_offset, it2.DexPc()); 689 ++it2; 690 } 691 } 692 CHECK(it == table.PcToDexEnd()); 693 CHECK(it2 == table.DexToPcEnd()); 694 } 695} 696 697void Mir2Lir::CreateNativeGcMap() { 698 DCHECK(!encoded_mapping_table_.empty()); 699 MappingTable mapping_table(&encoded_mapping_table_[0]); 700 uint32_t max_native_offset = 0; 701 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) { 702 uint32_t native_offset = it.NativePcOffset(); 703 if (native_offset > max_native_offset) { 704 max_native_offset = native_offset; 705 } 706 } 707 MethodReference method_ref(cu_->dex_file, cu_->method_idx); 708 const std::vector<uint8_t>& gc_map_raw = 709 mir_graph_->GetCurrentDexCompilationUnit()->GetVerifiedMethod()->GetDexGcMap(); 710 verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]); 711 DCHECK_EQ(gc_map_raw.size(), dex_gc_map.RawSize()); 712 // Compute native offset to references size. 713 GcMapBuilder native_gc_map_builder(&native_gc_map_, 714 mapping_table.PcToDexSize(), 715 max_native_offset, dex_gc_map.RegWidth()); 716 717 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) { 718 uint32_t native_offset = it.NativePcOffset(); 719 uint32_t dex_pc = it.DexPc(); 720 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); 721 CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc << 722 ": " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 723 native_gc_map_builder.AddEntry(native_offset, references); 724 } 725} 726 727/* Determine the offset of each literal field */ 728int Mir2Lir::AssignLiteralOffset(CodeOffset offset) { 729 offset = AssignLiteralOffsetCommon(literal_list_, offset); 730 unsigned int ptr_size = GetInstructionSetPointerSize(cu_->instruction_set); 731 offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset, ptr_size); 732 offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset, ptr_size); 733 offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset, ptr_size); 734 return offset; 735} 736 737int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) { 738 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 739 while (true) { 740 Mir2Lir::SwitchTable* tab_rec = iterator.Next(); 741 if (tab_rec == NULL) break; 742 tab_rec->offset = offset; 743 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 744 offset += tab_rec->table[1] * (sizeof(int) * 2); 745 } else { 746 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 747 static_cast<int>(Instruction::kPackedSwitchSignature)); 748 offset += tab_rec->table[1] * sizeof(int); 749 } 750 } 751 return offset; 752} 753 754int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) { 755 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 756 while (true) { 757 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 758 if (tab_rec == NULL) break; 759 tab_rec->offset = offset; 760 offset += tab_rec->size; 761 // word align 762 offset = RoundUp(offset, 4); 763 } 764 return offset; 765} 766 767/* 768 * Insert a kPseudoCaseLabel at the beginning of the Dalvik 769 * offset vaddr if pretty-printing, otherise use the standard block 770 * label. The selected label will be used to fix up the case 771 * branch table during the assembly phase. All resource flags 772 * are set to prevent code motion. KeyVal is just there for debugging. 773 */ 774LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) { 775 LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id]; 776 LIR* res = boundary_lir; 777 if (cu_->verbose) { 778 // Only pay the expense if we're pretty-printing. 779 LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR)); 780 new_label->dalvik_offset = vaddr; 781 new_label->opcode = kPseudoCaseLabel; 782 new_label->operands[0] = keyVal; 783 new_label->flags.fixup = kFixupLabel; 784 DCHECK(!new_label->flags.use_def_invalid); 785 new_label->u.m.def_mask = ENCODE_ALL; 786 InsertLIRAfter(boundary_lir, new_label); 787 res = new_label; 788 } 789 return res; 790} 791 792void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) { 793 const uint16_t* table = tab_rec->table; 794 DexOffset base_vaddr = tab_rec->vaddr; 795 const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]); 796 int entries = table[1]; 797 int low_key = s4FromSwitchData(&table[2]); 798 for (int i = 0; i < entries; i++) { 799 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key); 800 } 801} 802 803void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) { 804 const uint16_t* table = tab_rec->table; 805 DexOffset base_vaddr = tab_rec->vaddr; 806 int entries = table[1]; 807 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 808 const int32_t* targets = &keys[entries]; 809 for (int i = 0; i < entries; i++) { 810 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]); 811 } 812} 813 814void Mir2Lir::ProcessSwitchTables() { 815 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 816 while (true) { 817 Mir2Lir::SwitchTable *tab_rec = iterator.Next(); 818 if (tab_rec == NULL) break; 819 if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) { 820 MarkPackedCaseLabels(tab_rec); 821 } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 822 MarkSparseCaseLabels(tab_rec); 823 } else { 824 LOG(FATAL) << "Invalid switch table"; 825 } 826 } 827} 828 829void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) { 830 /* 831 * Sparse switch data format: 832 * ushort ident = 0x0200 magic value 833 * ushort size number of entries in the table; > 0 834 * int keys[size] keys, sorted low-to-high; 32-bit aligned 835 * int targets[size] branch targets, relative to switch opcode 836 * 837 * Total size is (2+size*4) 16-bit code units. 838 */ 839 uint16_t ident = table[0]; 840 int entries = table[1]; 841 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 842 const int32_t* targets = &keys[entries]; 843 LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident 844 << ", entries: " << std::dec << entries; 845 for (int i = 0; i < entries; i++) { 846 LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i]; 847 } 848} 849 850void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) { 851 /* 852 * Packed switch data format: 853 * ushort ident = 0x0100 magic value 854 * ushort size number of entries in the table 855 * int first_key first (and lowest) switch case value 856 * int targets[size] branch targets, relative to switch opcode 857 * 858 * Total size is (4+size*2) 16-bit code units. 859 */ 860 uint16_t ident = table[0]; 861 const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]); 862 int entries = table[1]; 863 int low_key = s4FromSwitchData(&table[2]); 864 LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident 865 << ", entries: " << std::dec << entries << ", low_key: " << low_key; 866 for (int i = 0; i < entries; i++) { 867 LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex 868 << targets[i]; 869 } 870} 871 872/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */ 873void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) { 874 // NOTE: only used for debug listings. 875 NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str))); 876} 877 878bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) { 879 bool is_taken; 880 switch (opcode) { 881 case Instruction::IF_EQ: is_taken = (src1 == src2); break; 882 case Instruction::IF_NE: is_taken = (src1 != src2); break; 883 case Instruction::IF_LT: is_taken = (src1 < src2); break; 884 case Instruction::IF_GE: is_taken = (src1 >= src2); break; 885 case Instruction::IF_GT: is_taken = (src1 > src2); break; 886 case Instruction::IF_LE: is_taken = (src1 <= src2); break; 887 case Instruction::IF_EQZ: is_taken = (src1 == 0); break; 888 case Instruction::IF_NEZ: is_taken = (src1 != 0); break; 889 case Instruction::IF_LTZ: is_taken = (src1 < 0); break; 890 case Instruction::IF_GEZ: is_taken = (src1 >= 0); break; 891 case Instruction::IF_GTZ: is_taken = (src1 > 0); break; 892 case Instruction::IF_LEZ: is_taken = (src1 <= 0); break; 893 default: 894 LOG(FATAL) << "Unexpected opcode " << opcode; 895 is_taken = false; 896 } 897 return is_taken; 898} 899 900// Convert relation of src1/src2 to src2/src1 901ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) { 902 ConditionCode res; 903 switch (before) { 904 case kCondEq: res = kCondEq; break; 905 case kCondNe: res = kCondNe; break; 906 case kCondLt: res = kCondGt; break; 907 case kCondGt: res = kCondLt; break; 908 case kCondLe: res = kCondGe; break; 909 case kCondGe: res = kCondLe; break; 910 default: 911 res = static_cast<ConditionCode>(0); 912 LOG(FATAL) << "Unexpected ccode " << before; 913 } 914 return res; 915} 916 917ConditionCode Mir2Lir::NegateComparison(ConditionCode before) { 918 ConditionCode res; 919 switch (before) { 920 case kCondEq: res = kCondNe; break; 921 case kCondNe: res = kCondEq; break; 922 case kCondLt: res = kCondGe; break; 923 case kCondGt: res = kCondLe; break; 924 case kCondLe: res = kCondGt; break; 925 case kCondGe: res = kCondLt; break; 926 default: 927 res = static_cast<ConditionCode>(0); 928 LOG(FATAL) << "Unexpected ccode " << before; 929 } 930 return res; 931} 932 933// TODO: move to mir_to_lir.cc 934Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 935 : Backend(arena), 936 literal_list_(NULL), 937 method_literal_list_(NULL), 938 class_literal_list_(NULL), 939 code_literal_list_(NULL), 940 first_fixup_(NULL), 941 cu_(cu), 942 mir_graph_(mir_graph), 943 switch_tables_(arena, 4, kGrowableArraySwitchTables), 944 fill_array_data_(arena, 4, kGrowableArrayFillArrayData), 945 tempreg_info_(arena, 20, kGrowableArrayMisc), 946 reginfo_map_(arena, RegStorage::kMaxRegs, kGrowableArrayMisc), 947 pointer_storage_(arena, 128, kGrowableArrayMisc), 948 data_offset_(0), 949 total_size_(0), 950 block_label_list_(NULL), 951 promotion_map_(NULL), 952 current_dalvik_offset_(0), 953 estimated_native_code_size_(0), 954 reg_pool_(NULL), 955 live_sreg_(0), 956 num_core_spills_(0), 957 num_fp_spills_(0), 958 frame_size_(0), 959 core_spill_mask_(0), 960 fp_spill_mask_(0), 961 first_lir_insn_(NULL), 962 last_lir_insn_(NULL), 963 slow_paths_(arena, 32, kGrowableArraySlowPaths) { 964 // Reserve pointer id 0 for NULL. 965 size_t null_idx = WrapPointer(NULL); 966 DCHECK_EQ(null_idx, 0U); 967} 968 969void Mir2Lir::Materialize() { 970 cu_->NewTimingSplit("RegisterAllocation"); 971 CompilerInitializeRegAlloc(); // Needs to happen after SSA naming 972 973 /* Allocate Registers using simple local allocation scheme */ 974 SimpleRegAlloc(); 975 976 /* First try the custom light codegen for special cases. */ 977 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); 978 bool special_worked = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) 979 ->GenSpecial(this, cu_->method_idx); 980 981 /* Take normal path for converting MIR to LIR only if the special codegen did not succeed. */ 982 if (special_worked == false) { 983 MethodMIR2LIR(); 984 } 985 986 /* Method is not empty */ 987 if (first_lir_insn_) { 988 // mark the targets of switch statement case labels 989 ProcessSwitchTables(); 990 991 /* Convert LIR into machine code. */ 992 AssembleLIR(); 993 994 if (cu_->verbose) { 995 CodegenDump(); 996 } 997 } 998} 999 1000CompiledMethod* Mir2Lir::GetCompiledMethod() { 1001 // Combine vmap tables - core regs, then fp regs - into vmap_table. 1002 Leb128EncodingVector vmap_encoder; 1003 if (frame_size_ > 0) { 1004 // Prefix the encoded data with its size. 1005 size_t size = core_vmap_table_.size() + 1 /* marker */ + fp_vmap_table_.size(); 1006 vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128). 1007 vmap_encoder.PushBackUnsigned(size); 1008 // Core regs may have been inserted out of order - sort first. 1009 std::sort(core_vmap_table_.begin(), core_vmap_table_.end()); 1010 for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) { 1011 // Copy, stripping out the phys register sort key. 1012 vmap_encoder.PushBackUnsigned( 1013 ~(-1 << VREG_NUM_WIDTH) & (core_vmap_table_[i] + VmapTable::kEntryAdjustment)); 1014 } 1015 // Push a marker to take place of lr. 1016 vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker); 1017 // fp regs already sorted. 1018 for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) { 1019 vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment); 1020 } 1021 } else { 1022 DCHECK_EQ(POPCOUNT(core_spill_mask_), 0); 1023 DCHECK_EQ(POPCOUNT(fp_spill_mask_), 0); 1024 DCHECK_EQ(core_vmap_table_.size(), 0u); 1025 DCHECK_EQ(fp_vmap_table_.size(), 0u); 1026 vmap_encoder.PushBackUnsigned(0u); // Size is 0. 1027 } 1028 1029 UniquePtr<std::vector<uint8_t> > cfi_info(ReturnCallFrameInformation()); 1030 CompiledMethod* result = 1031 new CompiledMethod(cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_, 1032 core_spill_mask_, fp_spill_mask_, encoded_mapping_table_, 1033 vmap_encoder.GetData(), native_gc_map_, cfi_info.get()); 1034 return result; 1035} 1036 1037size_t Mir2Lir::GetMaxPossibleCompilerTemps() const { 1038 // Chose a reasonably small value in order to contain stack growth. 1039 // Backends that are smarter about spill region can return larger values. 1040 const size_t max_compiler_temps = 10; 1041 return max_compiler_temps; 1042} 1043 1044size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() { 1045 // By default assume that the Mir2Lir will need one slot for each temporary. 1046 // If the backend can better determine temps that have non-overlapping ranges and 1047 // temps that do not need spilled, it can actually provide a small region. 1048 return (mir_graph_->GetNumUsedCompilerTemps() * sizeof(uint32_t)); 1049} 1050 1051int Mir2Lir::ComputeFrameSize() { 1052 /* Figure out the frame size */ 1053 uint32_t size = num_core_spills_ * GetBytesPerGprSpillLocation(cu_->instruction_set) 1054 + num_fp_spills_ * GetBytesPerFprSpillLocation(cu_->instruction_set) 1055 + sizeof(uint32_t) // Filler. 1056 + (cu_->num_regs + cu_->num_outs) * sizeof(uint32_t) 1057 + GetNumBytesForCompilerTempSpillRegion(); 1058 /* Align and set */ 1059 return RoundUp(size, kStackAlignment); 1060} 1061 1062/* 1063 * Append an LIR instruction to the LIR list maintained by a compilation 1064 * unit 1065 */ 1066void Mir2Lir::AppendLIR(LIR* lir) { 1067 if (first_lir_insn_ == NULL) { 1068 DCHECK(last_lir_insn_ == NULL); 1069 last_lir_insn_ = first_lir_insn_ = lir; 1070 lir->prev = lir->next = NULL; 1071 } else { 1072 last_lir_insn_->next = lir; 1073 lir->prev = last_lir_insn_; 1074 lir->next = NULL; 1075 last_lir_insn_ = lir; 1076 } 1077} 1078 1079/* 1080 * Insert an LIR instruction before the current instruction, which cannot be the 1081 * first instruction. 1082 * 1083 * prev_lir <-> new_lir <-> current_lir 1084 */ 1085void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) { 1086 DCHECK(current_lir->prev != NULL); 1087 LIR *prev_lir = current_lir->prev; 1088 1089 prev_lir->next = new_lir; 1090 new_lir->prev = prev_lir; 1091 new_lir->next = current_lir; 1092 current_lir->prev = new_lir; 1093} 1094 1095/* 1096 * Insert an LIR instruction after the current instruction, which cannot be the 1097 * first instruction. 1098 * 1099 * current_lir -> new_lir -> old_next 1100 */ 1101void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) { 1102 new_lir->prev = current_lir; 1103 new_lir->next = current_lir->next; 1104 current_lir->next = new_lir; 1105 new_lir->next->prev = new_lir; 1106} 1107 1108bool Mir2Lir::IsPowerOfTwo(uint64_t x) { 1109 return (x & (x - 1)) == 0; 1110} 1111 1112// Returns the index of the lowest set bit in 'x'. 1113int32_t Mir2Lir::LowestSetBit(uint64_t x) { 1114 int bit_posn = 0; 1115 while ((x & 0xf) == 0) { 1116 bit_posn += 4; 1117 x >>= 4; 1118 } 1119 while ((x & 1) == 0) { 1120 bit_posn++; 1121 x >>= 1; 1122 } 1123 return bit_posn; 1124} 1125 1126bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) { 1127 DCHECK(rl_src.wide); 1128 DCHECK(rl_dest.wide); 1129 return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1); 1130} 1131 1132LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 1133 int offset, int check_value, LIR* target) { 1134 // Handle this for architectures that can't compare to memory. 1135 Load32Disp(base_reg, offset, temp_reg); 1136 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); 1137 return branch; 1138} 1139 1140void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) { 1141 slow_paths_.Insert(slowpath); 1142} 1143 1144void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type, 1145 SpecialTargetRegister symbolic_reg) { 1146 int target_method_idx = target_method.dex_method_index; 1147 LIR* data_target = ScanLiteralPool(code_literal_list_, target_method_idx, 0); 1148 if (data_target == NULL) { 1149 data_target = AddWordData(&code_literal_list_, target_method_idx); 1150 data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file)); 1151 data_target->operands[2] = type; 1152 } 1153 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target); 1154 AppendLIR(load_pc_rel); 1155 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); 1156} 1157 1158void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 1159 SpecialTargetRegister symbolic_reg) { 1160 int target_method_idx = target_method.dex_method_index; 1161 LIR* data_target = ScanLiteralPool(method_literal_list_, target_method_idx, 0); 1162 if (data_target == NULL) { 1163 data_target = AddWordData(&method_literal_list_, target_method_idx); 1164 data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file)); 1165 data_target->operands[2] = type; 1166 } 1167 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target); 1168 AppendLIR(load_pc_rel); 1169 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); 1170} 1171 1172void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 1173 // Use the literal pool and a PC-relative load from a data word. 1174 LIR* data_target = ScanLiteralPool(class_literal_list_, type_idx, 0); 1175 if (data_target == nullptr) { 1176 data_target = AddWordData(&class_literal_list_, type_idx); 1177 } 1178 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target); 1179 AppendLIR(load_pc_rel); 1180} 1181 1182std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() { 1183 // Default case is to do nothing. 1184 return nullptr; 1185} 1186 1187RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) { 1188 loc.wide = false; 1189 if (loc.location == kLocPhysReg) { 1190 if (loc.reg.IsPair()) { 1191 loc.reg = loc.reg.GetLow(); 1192 } else { 1193 // FIXME: temp workaround. 1194 // Issue here: how do we narrow to a 32-bit value in 64-bit container? 1195 // Probably the wrong thing to narrow the RegStorage container here. That 1196 // should be a target decision. At the RegLocation level, we're only 1197 // modifying the view of the Dalvik value - this is orthogonal to the storage 1198 // container size. Consider this a temp workaround. 1199 DCHECK(loc.reg.IsDouble()); 1200 loc.reg = loc.reg.DoubleToLowSingle(); 1201 } 1202 } 1203 return loc; 1204} 1205 1206} // namespace art 1207