codegen_util.cc revision f29a4244bbc278843237f0ae242de077e093b580
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_internals.h" 18#include "dex_file-inl.h" 19#include "gc_map.h" 20#include "gc_map_builder.h" 21#include "mapping_table.h" 22#include "mir_to_lir-inl.h" 23#include "dex/quick/dex_file_method_inliner.h" 24#include "dex/quick/dex_file_to_method_inliner_map.h" 25#include "dex/verification_results.h" 26#include "dex/verified_method.h" 27#include "verifier/dex_gc_map.h" 28#include "verifier/method_verifier.h" 29#include "vmap_table.h" 30 31namespace art { 32 33namespace { 34 35/* Dump a mapping table */ 36template <typename It> 37void DumpMappingTable(const char* table_name, const char* descriptor, const char* name, 38 const Signature& signature, uint32_t size, It first) { 39 if (size != 0) { 40 std::string line(StringPrintf("\n %s %s%s_%s_table[%u] = {", table_name, 41 descriptor, name, signature.ToString().c_str(), size)); 42 std::replace(line.begin(), line.end(), ';', '_'); 43 LOG(INFO) << line; 44 for (uint32_t i = 0; i != size; ++i) { 45 line = StringPrintf(" {0x%05x, 0x%04x},", first.NativePcOffset(), first.DexPc()); 46 ++first; 47 LOG(INFO) << line; 48 } 49 LOG(INFO) <<" };\n\n"; 50 } 51} 52 53} // anonymous namespace 54 55bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) { 56 bool res = false; 57 if (rl_src.is_const) { 58 if (rl_src.wide) { 59 if (rl_src.fp) { 60 res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src)); 61 } else { 62 res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src)); 63 } 64 } else { 65 if (rl_src.fp) { 66 res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src)); 67 } else { 68 res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src)); 69 } 70 } 71 } 72 return res; 73} 74 75void Mir2Lir::MarkSafepointPC(LIR* inst) { 76 DCHECK(!inst->flags.use_def_invalid); 77 inst->u.m.def_mask = ENCODE_ALL; 78 LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC); 79 DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL); 80} 81 82/* Remove a LIR from the list. */ 83void Mir2Lir::UnlinkLIR(LIR* lir) { 84 if (UNLIKELY(lir == first_lir_insn_)) { 85 first_lir_insn_ = lir->next; 86 if (lir->next != NULL) { 87 lir->next->prev = NULL; 88 } else { 89 DCHECK(lir->next == NULL); 90 DCHECK(lir == last_lir_insn_); 91 last_lir_insn_ = NULL; 92 } 93 } else if (lir == last_lir_insn_) { 94 last_lir_insn_ = lir->prev; 95 lir->prev->next = NULL; 96 } else if ((lir->prev != NULL) && (lir->next != NULL)) { 97 lir->prev->next = lir->next; 98 lir->next->prev = lir->prev; 99 } 100} 101 102/* Convert an instruction to a NOP */ 103void Mir2Lir::NopLIR(LIR* lir) { 104 lir->flags.is_nop = true; 105 if (!cu_->verbose) { 106 UnlinkLIR(lir); 107 } 108} 109 110void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) { 111 uint64_t *mask_ptr; 112 uint64_t mask = ENCODE_MEM; 113 DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); 114 DCHECK(!lir->flags.use_def_invalid); 115 if (is_load) { 116 mask_ptr = &lir->u.m.use_mask; 117 } else { 118 mask_ptr = &lir->u.m.def_mask; 119 } 120 /* Clear out the memref flags */ 121 *mask_ptr &= ~mask; 122 /* ..and then add back the one we need */ 123 switch (mem_type) { 124 case kLiteral: 125 DCHECK(is_load); 126 *mask_ptr |= ENCODE_LITERAL; 127 break; 128 case kDalvikReg: 129 *mask_ptr |= ENCODE_DALVIK_REG; 130 break; 131 case kHeapRef: 132 *mask_ptr |= ENCODE_HEAP_REF; 133 break; 134 case kMustNotAlias: 135 /* Currently only loads can be marked as kMustNotAlias */ 136 DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE)); 137 *mask_ptr |= ENCODE_MUST_NOT_ALIAS; 138 break; 139 default: 140 LOG(FATAL) << "Oat: invalid memref kind - " << mem_type; 141 } 142} 143 144/* 145 * Mark load/store instructions that access Dalvik registers through the stack. 146 */ 147void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, 148 bool is64bit) { 149 SetMemRefType(lir, is_load, kDalvikReg); 150 151 /* 152 * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit 153 * access. 154 */ 155 lir->flags.alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit); 156} 157 158/* 159 * Debugging macros 160 */ 161#define DUMP_RESOURCE_MASK(X) 162 163/* Pretty-print a LIR instruction */ 164void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { 165 int offset = lir->offset; 166 int dest = lir->operands[0]; 167 const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops)); 168 169 /* Handle pseudo-ops individually, and all regular insns as a group */ 170 switch (lir->opcode) { 171 case kPseudoMethodEntry: 172 LOG(INFO) << "-------- method entry " 173 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 174 break; 175 case kPseudoMethodExit: 176 LOG(INFO) << "-------- Method_Exit"; 177 break; 178 case kPseudoBarrier: 179 LOG(INFO) << "-------- BARRIER"; 180 break; 181 case kPseudoEntryBlock: 182 LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest; 183 break; 184 case kPseudoDalvikByteCodeBoundary: 185 if (lir->operands[0] == 0) { 186 // NOTE: only used for debug listings. 187 lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string")); 188 } 189 LOG(INFO) << "-------- dalvik offset: 0x" << std::hex 190 << lir->dalvik_offset << " @ " 191 << reinterpret_cast<char*>(UnwrapPointer(lir->operands[0])); 192 break; 193 case kPseudoExitBlock: 194 LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest; 195 break; 196 case kPseudoPseudoAlign4: 197 LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex 198 << offset << "): .align4"; 199 break; 200 case kPseudoEHBlockLabel: 201 LOG(INFO) << "Exception_Handling:"; 202 break; 203 case kPseudoTargetLabel: 204 case kPseudoNormalBlockLabel: 205 LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":"; 206 break; 207 case kPseudoThrowTarget: 208 LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":"; 209 break; 210 case kPseudoIntrinsicRetry: 211 LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":"; 212 break; 213 case kPseudoSuspendTarget: 214 LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":"; 215 break; 216 case kPseudoSafepointPC: 217 LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 218 break; 219 case kPseudoExportedPC: 220 LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 221 break; 222 case kPseudoCaseLabel: 223 LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x" 224 << std::hex << lir->operands[0] << "|" << std::dec << 225 lir->operands[0]; 226 break; 227 default: 228 if (lir->flags.is_nop && !dump_nop) { 229 break; 230 } else { 231 std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode), 232 lir, base_addr)); 233 std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode), 234 lir, base_addr)); 235 LOG(INFO) << StringPrintf("%5p: %-9s%s%s", 236 base_addr + offset, 237 op_name.c_str(), op_operands.c_str(), 238 lir->flags.is_nop ? "(nop)" : ""); 239 } 240 break; 241 } 242 243 if (lir->u.m.use_mask && (!lir->flags.is_nop || dump_nop)) { 244 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.use_mask, "use")); 245 } 246 if (lir->u.m.def_mask && (!lir->flags.is_nop || dump_nop)) { 247 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.def_mask, "def")); 248 } 249} 250 251void Mir2Lir::DumpPromotionMap() { 252 int num_regs = cu_->num_dalvik_registers + mir_graph_->GetNumUsedCompilerTemps(); 253 for (int i = 0; i < num_regs; i++) { 254 PromotionMap v_reg_map = promotion_map_[i]; 255 std::string buf; 256 if (v_reg_map.fp_location == kLocPhysReg) { 257 StringAppendF(&buf, " : s%d", RegStorage::RegNum(v_reg_map.FpReg)); 258 } 259 260 std::string buf3; 261 if (i < cu_->num_dalvik_registers) { 262 StringAppendF(&buf3, "%02d", i); 263 } else if (i == mir_graph_->GetMethodSReg()) { 264 buf3 = "Method*"; 265 } else { 266 StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers); 267 } 268 269 LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(), 270 v_reg_map.core_location == kLocPhysReg ? 271 "r" : "SP+", v_reg_map.core_location == kLocPhysReg ? 272 v_reg_map.core_reg : SRegOffset(i), 273 buf.c_str()); 274 } 275} 276 277void Mir2Lir::UpdateLIROffsets() { 278 // Only used for code listings. 279 size_t offset = 0; 280 for (LIR* lir = first_lir_insn_; lir != nullptr; lir = lir->next) { 281 lir->offset = offset; 282 if (!lir->flags.is_nop && !IsPseudoLirOp(lir->opcode)) { 283 offset += GetInsnSize(lir); 284 } else if (lir->opcode == kPseudoPseudoAlign4) { 285 offset += (offset & 0x2); 286 } 287 } 288} 289 290/* Dump instructions and constant pool contents */ 291void Mir2Lir::CodegenDump() { 292 LOG(INFO) << "Dumping LIR insns for " 293 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 294 LIR* lir_insn; 295 int insns_size = cu_->code_item->insns_size_in_code_units_; 296 297 LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs; 298 LOG(INFO) << "Ins : " << cu_->num_ins; 299 LOG(INFO) << "Outs : " << cu_->num_outs; 300 LOG(INFO) << "CoreSpills : " << num_core_spills_; 301 LOG(INFO) << "FPSpills : " << num_fp_spills_; 302 LOG(INFO) << "CompilerTemps : " << mir_graph_->GetNumUsedCompilerTemps(); 303 LOG(INFO) << "Frame size : " << frame_size_; 304 LOG(INFO) << "code size is " << total_size_ << 305 " bytes, Dalvik size is " << insns_size * 2; 306 LOG(INFO) << "expansion factor: " 307 << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2); 308 DumpPromotionMap(); 309 UpdateLIROffsets(); 310 for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) { 311 DumpLIRInsn(lir_insn, 0); 312 } 313 for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) { 314 LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset, 315 lir_insn->operands[0]); 316 } 317 318 const DexFile::MethodId& method_id = 319 cu_->dex_file->GetMethodId(cu_->method_idx); 320 const Signature signature = cu_->dex_file->GetMethodSignature(method_id); 321 const char* name = cu_->dex_file->GetMethodName(method_id); 322 const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id)); 323 324 // Dump mapping tables 325 if (!encoded_mapping_table_.empty()) { 326 MappingTable table(&encoded_mapping_table_[0]); 327 DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, 328 table.PcToDexSize(), table.PcToDexBegin()); 329 DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, 330 table.DexToPcSize(), table.DexToPcBegin()); 331 } 332} 333 334/* 335 * Search the existing constants in the literal pool for an exact or close match 336 * within specified delta (greater or equal to 0). 337 */ 338LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) { 339 while (data_target) { 340 if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta) 341 return data_target; 342 data_target = data_target->next; 343 } 344 return NULL; 345} 346 347/* Search the existing constants in the literal pool for an exact wide match */ 348LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) { 349 bool lo_match = false; 350 LIR* lo_target = NULL; 351 while (data_target) { 352 if (lo_match && (data_target->operands[0] == val_hi)) { 353 // Record high word in case we need to expand this later. 354 lo_target->operands[1] = val_hi; 355 return lo_target; 356 } 357 lo_match = false; 358 if (data_target->operands[0] == val_lo) { 359 lo_match = true; 360 lo_target = data_target; 361 } 362 data_target = data_target->next; 363 } 364 return NULL; 365} 366 367/* 368 * The following are building blocks to insert constants into the pool or 369 * instruction streams. 370 */ 371 372/* Add a 32-bit constant to the constant pool */ 373LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) { 374 /* Add the constant to the literal pool */ 375 if (constant_list_p) { 376 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 377 new_value->operands[0] = value; 378 new_value->next = *constant_list_p; 379 *constant_list_p = new_value; 380 estimated_native_code_size_ += sizeof(value); 381 return new_value; 382 } 383 return NULL; 384} 385 386/* Add a 64-bit constant to the constant pool or mixed with code */ 387LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) { 388 AddWordData(constant_list_p, val_hi); 389 return AddWordData(constant_list_p, val_lo); 390} 391 392static void Push32(std::vector<uint8_t>&buf, int data) { 393 buf.push_back(data & 0xff); 394 buf.push_back((data >> 8) & 0xff); 395 buf.push_back((data >> 16) & 0xff); 396 buf.push_back((data >> 24) & 0xff); 397} 398 399// Push 8 bytes on 64-bit target systems; 4 on 32-bit target systems. 400static void PushPointer(std::vector<uint8_t>&buf, const void* pointer, bool target64) { 401 uint64_t data = reinterpret_cast<uintptr_t>(pointer); 402 if (target64) { 403 Push32(buf, data & 0xFFFFFFFF); 404 Push32(buf, (data >> 32) & 0xFFFFFFFF); 405 } else { 406 Push32(buf, static_cast<uint32_t>(data)); 407 } 408} 409 410static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) { 411 while (buf.size() < offset) { 412 buf.push_back(0); 413 } 414} 415 416/* Write the literal pool to the output stream */ 417void Mir2Lir::InstallLiteralPools() { 418 AlignBuffer(code_buffer_, data_offset_); 419 LIR* data_lir = literal_list_; 420 while (data_lir != NULL) { 421 Push32(code_buffer_, data_lir->operands[0]); 422 data_lir = NEXT_LIR(data_lir); 423 } 424 // Push code and method literals, record offsets for the compiler to patch. 425 data_lir = code_literal_list_; 426 while (data_lir != NULL) { 427 uint32_t target_method_idx = data_lir->operands[0]; 428 const DexFile* target_dex_file = 429 reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1])); 430 cu_->compiler_driver->AddCodePatch(cu_->dex_file, 431 cu_->class_def_idx, 432 cu_->method_idx, 433 cu_->invoke_type, 434 target_method_idx, 435 target_dex_file, 436 static_cast<InvokeType>(data_lir->operands[2]), 437 code_buffer_.size()); 438 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 439 // unique value based on target to ensure code deduplication works 440 PushPointer(code_buffer_, &target_method_id, cu_->target64); 441 data_lir = NEXT_LIR(data_lir); 442 } 443 data_lir = method_literal_list_; 444 while (data_lir != NULL) { 445 uint32_t target_method_idx = data_lir->operands[0]; 446 const DexFile* target_dex_file = 447 reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1])); 448 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, 449 cu_->class_def_idx, 450 cu_->method_idx, 451 cu_->invoke_type, 452 target_method_idx, 453 target_dex_file, 454 static_cast<InvokeType>(data_lir->operands[2]), 455 code_buffer_.size()); 456 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 457 // unique value based on target to ensure code deduplication works 458 PushPointer(code_buffer_, &target_method_id, cu_->target64); 459 data_lir = NEXT_LIR(data_lir); 460 } 461 // Push class literals. 462 data_lir = class_literal_list_; 463 while (data_lir != NULL) { 464 uint32_t target_method_idx = data_lir->operands[0]; 465 cu_->compiler_driver->AddClassPatch(cu_->dex_file, 466 cu_->class_def_idx, 467 cu_->method_idx, 468 target_method_idx, 469 code_buffer_.size()); 470 const DexFile::TypeId& target_method_id = cu_->dex_file->GetTypeId(target_method_idx); 471 // unique value based on target to ensure code deduplication works 472 PushPointer(code_buffer_, &target_method_id, cu_->target64); 473 data_lir = NEXT_LIR(data_lir); 474 } 475} 476 477/* Write the switch tables to the output stream */ 478void Mir2Lir::InstallSwitchTables() { 479 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 480 while (true) { 481 Mir2Lir::SwitchTable* tab_rec = iterator.Next(); 482 if (tab_rec == NULL) break; 483 AlignBuffer(code_buffer_, tab_rec->offset); 484 /* 485 * For Arm, our reference point is the address of the bx 486 * instruction that does the launch, so we have to subtract 487 * the auto pc-advance. For other targets the reference point 488 * is a label, so we can use the offset as-is. 489 */ 490 int bx_offset = INVALID_OFFSET; 491 switch (cu_->instruction_set) { 492 case kThumb2: 493 DCHECK(tab_rec->anchor->flags.fixup != kFixupNone); 494 bx_offset = tab_rec->anchor->offset + 4; 495 break; 496 case kX86: 497 case kX86_64: 498 bx_offset = 0; 499 break; 500 case kMips: 501 bx_offset = tab_rec->anchor->offset; 502 break; 503 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 504 } 505 if (cu_->verbose) { 506 LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset; 507 } 508 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 509 const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2])); 510 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 511 int disp = tab_rec->targets[elems]->offset - bx_offset; 512 if (cu_->verbose) { 513 LOG(INFO) << " Case[" << elems << "] key: 0x" 514 << std::hex << keys[elems] << ", disp: 0x" 515 << std::hex << disp; 516 } 517 Push32(code_buffer_, keys[elems]); 518 Push32(code_buffer_, 519 tab_rec->targets[elems]->offset - bx_offset); 520 } 521 } else { 522 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 523 static_cast<int>(Instruction::kPackedSwitchSignature)); 524 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 525 int disp = tab_rec->targets[elems]->offset - bx_offset; 526 if (cu_->verbose) { 527 LOG(INFO) << " Case[" << elems << "] disp: 0x" 528 << std::hex << disp; 529 } 530 Push32(code_buffer_, tab_rec->targets[elems]->offset - bx_offset); 531 } 532 } 533 } 534} 535 536/* Write the fill array dta to the output stream */ 537void Mir2Lir::InstallFillArrayData() { 538 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 539 while (true) { 540 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 541 if (tab_rec == NULL) break; 542 AlignBuffer(code_buffer_, tab_rec->offset); 543 for (int i = 0; i < (tab_rec->size + 1) / 2; i++) { 544 code_buffer_.push_back(tab_rec->table[i] & 0xFF); 545 code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF); 546 } 547 } 548} 549 550static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) { 551 for (; lir != NULL; lir = lir->next) { 552 lir->offset = offset; 553 offset += 4; 554 } 555 return offset; 556} 557 558static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset, 559 unsigned int element_size) { 560 // Align to natural pointer size. 561 offset = (offset + (element_size - 1)) & ~(element_size - 1); 562 for (; lir != NULL; lir = lir->next) { 563 lir->offset = offset; 564 offset += element_size; 565 } 566 return offset; 567} 568 569// Make sure we have a code address for every declared catch entry 570bool Mir2Lir::VerifyCatchEntries() { 571 MappingTable table(&encoded_mapping_table_[0]); 572 std::vector<uint32_t> dex_pcs; 573 dex_pcs.reserve(table.DexToPcSize()); 574 for (auto it = table.DexToPcBegin(), end = table.DexToPcEnd(); it != end; ++it) { 575 dex_pcs.push_back(it.DexPc()); 576 } 577 // Sort dex_pcs, so that we can quickly check it against the ordered mir_graph_->catches_. 578 std::sort(dex_pcs.begin(), dex_pcs.end()); 579 580 bool success = true; 581 auto it = dex_pcs.begin(), end = dex_pcs.end(); 582 for (uint32_t dex_pc : mir_graph_->catches_) { 583 while (it != end && *it < dex_pc) { 584 LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << *it; 585 ++it; 586 success = false; 587 } 588 if (it == end || *it > dex_pc) { 589 LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc; 590 success = false; 591 } else { 592 ++it; 593 } 594 } 595 if (!success) { 596 LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 597 LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: " 598 << table.DexToPcSize(); 599 } 600 return success; 601} 602 603 604void Mir2Lir::CreateMappingTables() { 605 uint32_t pc2dex_data_size = 0u; 606 uint32_t pc2dex_entries = 0u; 607 uint32_t pc2dex_offset = 0u; 608 uint32_t pc2dex_dalvik_offset = 0u; 609 uint32_t dex2pc_data_size = 0u; 610 uint32_t dex2pc_entries = 0u; 611 uint32_t dex2pc_offset = 0u; 612 uint32_t dex2pc_dalvik_offset = 0u; 613 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 614 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 615 pc2dex_entries += 1; 616 DCHECK(pc2dex_offset <= tgt_lir->offset); 617 pc2dex_data_size += UnsignedLeb128Size(tgt_lir->offset - pc2dex_offset); 618 pc2dex_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) - 619 static_cast<int32_t>(pc2dex_dalvik_offset)); 620 pc2dex_offset = tgt_lir->offset; 621 pc2dex_dalvik_offset = tgt_lir->dalvik_offset; 622 } 623 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 624 dex2pc_entries += 1; 625 DCHECK(dex2pc_offset <= tgt_lir->offset); 626 dex2pc_data_size += UnsignedLeb128Size(tgt_lir->offset - dex2pc_offset); 627 dex2pc_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) - 628 static_cast<int32_t>(dex2pc_dalvik_offset)); 629 dex2pc_offset = tgt_lir->offset; 630 dex2pc_dalvik_offset = tgt_lir->dalvik_offset; 631 } 632 } 633 634 uint32_t total_entries = pc2dex_entries + dex2pc_entries; 635 uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries); 636 uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size; 637 encoded_mapping_table_.resize(data_size); 638 uint8_t* write_pos = &encoded_mapping_table_[0]; 639 write_pos = EncodeUnsignedLeb128(write_pos, total_entries); 640 write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries); 641 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), hdr_data_size); 642 uint8_t* write_pos2 = write_pos + pc2dex_data_size; 643 644 pc2dex_offset = 0u; 645 pc2dex_dalvik_offset = 0u; 646 dex2pc_offset = 0u; 647 dex2pc_dalvik_offset = 0u; 648 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 649 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 650 DCHECK(pc2dex_offset <= tgt_lir->offset); 651 write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset); 652 write_pos = EncodeSignedLeb128(write_pos, static_cast<int32_t>(tgt_lir->dalvik_offset) - 653 static_cast<int32_t>(pc2dex_dalvik_offset)); 654 pc2dex_offset = tgt_lir->offset; 655 pc2dex_dalvik_offset = tgt_lir->dalvik_offset; 656 } 657 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 658 DCHECK(dex2pc_offset <= tgt_lir->offset); 659 write_pos2 = EncodeUnsignedLeb128(write_pos2, tgt_lir->offset - dex2pc_offset); 660 write_pos2 = EncodeSignedLeb128(write_pos2, static_cast<int32_t>(tgt_lir->dalvik_offset) - 661 static_cast<int32_t>(dex2pc_dalvik_offset)); 662 dex2pc_offset = tgt_lir->offset; 663 dex2pc_dalvik_offset = tgt_lir->dalvik_offset; 664 } 665 } 666 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), 667 hdr_data_size + pc2dex_data_size); 668 DCHECK_EQ(static_cast<size_t>(write_pos2 - &encoded_mapping_table_[0]), data_size); 669 670 if (kIsDebugBuild) { 671 CHECK(VerifyCatchEntries()); 672 673 // Verify the encoded table holds the expected data. 674 MappingTable table(&encoded_mapping_table_[0]); 675 CHECK_EQ(table.TotalSize(), total_entries); 676 CHECK_EQ(table.PcToDexSize(), pc2dex_entries); 677 auto it = table.PcToDexBegin(); 678 auto it2 = table.DexToPcBegin(); 679 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 680 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 681 CHECK_EQ(tgt_lir->offset, it.NativePcOffset()); 682 CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc()); 683 ++it; 684 } 685 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 686 CHECK_EQ(tgt_lir->offset, it2.NativePcOffset()); 687 CHECK_EQ(tgt_lir->dalvik_offset, it2.DexPc()); 688 ++it2; 689 } 690 } 691 CHECK(it == table.PcToDexEnd()); 692 CHECK(it2 == table.DexToPcEnd()); 693 } 694} 695 696void Mir2Lir::CreateNativeGcMap() { 697 DCHECK(!encoded_mapping_table_.empty()); 698 MappingTable mapping_table(&encoded_mapping_table_[0]); 699 uint32_t max_native_offset = 0; 700 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) { 701 uint32_t native_offset = it.NativePcOffset(); 702 if (native_offset > max_native_offset) { 703 max_native_offset = native_offset; 704 } 705 } 706 MethodReference method_ref(cu_->dex_file, cu_->method_idx); 707 const std::vector<uint8_t>& gc_map_raw = 708 mir_graph_->GetCurrentDexCompilationUnit()->GetVerifiedMethod()->GetDexGcMap(); 709 verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]); 710 DCHECK_EQ(gc_map_raw.size(), dex_gc_map.RawSize()); 711 // Compute native offset to references size. 712 GcMapBuilder native_gc_map_builder(&native_gc_map_, 713 mapping_table.PcToDexSize(), 714 max_native_offset, dex_gc_map.RegWidth()); 715 716 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) { 717 uint32_t native_offset = it.NativePcOffset(); 718 uint32_t dex_pc = it.DexPc(); 719 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); 720 CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc << 721 ": " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 722 native_gc_map_builder.AddEntry(native_offset, references); 723 } 724} 725 726/* Determine the offset of each literal field */ 727int Mir2Lir::AssignLiteralOffset(CodeOffset offset) { 728 offset = AssignLiteralOffsetCommon(literal_list_, offset); 729 unsigned int ptr_size = GetInstructionSetPointerSize(cu_->instruction_set); 730 offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset, ptr_size); 731 offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset, ptr_size); 732 offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset, ptr_size); 733 return offset; 734} 735 736int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) { 737 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 738 while (true) { 739 Mir2Lir::SwitchTable* tab_rec = iterator.Next(); 740 if (tab_rec == NULL) break; 741 tab_rec->offset = offset; 742 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 743 offset += tab_rec->table[1] * (sizeof(int) * 2); 744 } else { 745 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 746 static_cast<int>(Instruction::kPackedSwitchSignature)); 747 offset += tab_rec->table[1] * sizeof(int); 748 } 749 } 750 return offset; 751} 752 753int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) { 754 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 755 while (true) { 756 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 757 if (tab_rec == NULL) break; 758 tab_rec->offset = offset; 759 offset += tab_rec->size; 760 // word align 761 offset = (offset + 3) & ~3; 762 } 763 return offset; 764} 765 766/* 767 * Insert a kPseudoCaseLabel at the beginning of the Dalvik 768 * offset vaddr if pretty-printing, otherise use the standard block 769 * label. The selected label will be used to fix up the case 770 * branch table during the assembly phase. All resource flags 771 * are set to prevent code motion. KeyVal is just there for debugging. 772 */ 773LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) { 774 LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id]; 775 LIR* res = boundary_lir; 776 if (cu_->verbose) { 777 // Only pay the expense if we're pretty-printing. 778 LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR)); 779 new_label->dalvik_offset = vaddr; 780 new_label->opcode = kPseudoCaseLabel; 781 new_label->operands[0] = keyVal; 782 new_label->flags.fixup = kFixupLabel; 783 DCHECK(!new_label->flags.use_def_invalid); 784 new_label->u.m.def_mask = ENCODE_ALL; 785 InsertLIRAfter(boundary_lir, new_label); 786 res = new_label; 787 } 788 return res; 789} 790 791void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) { 792 const uint16_t* table = tab_rec->table; 793 DexOffset base_vaddr = tab_rec->vaddr; 794 const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]); 795 int entries = table[1]; 796 int low_key = s4FromSwitchData(&table[2]); 797 for (int i = 0; i < entries; i++) { 798 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key); 799 } 800} 801 802void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) { 803 const uint16_t* table = tab_rec->table; 804 DexOffset base_vaddr = tab_rec->vaddr; 805 int entries = table[1]; 806 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 807 const int32_t* targets = &keys[entries]; 808 for (int i = 0; i < entries; i++) { 809 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]); 810 } 811} 812 813void Mir2Lir::ProcessSwitchTables() { 814 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 815 while (true) { 816 Mir2Lir::SwitchTable *tab_rec = iterator.Next(); 817 if (tab_rec == NULL) break; 818 if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) { 819 MarkPackedCaseLabels(tab_rec); 820 } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 821 MarkSparseCaseLabels(tab_rec); 822 } else { 823 LOG(FATAL) << "Invalid switch table"; 824 } 825 } 826} 827 828void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) { 829 /* 830 * Sparse switch data format: 831 * ushort ident = 0x0200 magic value 832 * ushort size number of entries in the table; > 0 833 * int keys[size] keys, sorted low-to-high; 32-bit aligned 834 * int targets[size] branch targets, relative to switch opcode 835 * 836 * Total size is (2+size*4) 16-bit code units. 837 */ 838 uint16_t ident = table[0]; 839 int entries = table[1]; 840 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 841 const int32_t* targets = &keys[entries]; 842 LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident 843 << ", entries: " << std::dec << entries; 844 for (int i = 0; i < entries; i++) { 845 LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i]; 846 } 847} 848 849void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) { 850 /* 851 * Packed switch data format: 852 * ushort ident = 0x0100 magic value 853 * ushort size number of entries in the table 854 * int first_key first (and lowest) switch case value 855 * int targets[size] branch targets, relative to switch opcode 856 * 857 * Total size is (4+size*2) 16-bit code units. 858 */ 859 uint16_t ident = table[0]; 860 const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]); 861 int entries = table[1]; 862 int low_key = s4FromSwitchData(&table[2]); 863 LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident 864 << ", entries: " << std::dec << entries << ", low_key: " << low_key; 865 for (int i = 0; i < entries; i++) { 866 LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex 867 << targets[i]; 868 } 869} 870 871/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */ 872void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) { 873 // NOTE: only used for debug listings. 874 NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str))); 875} 876 877bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) { 878 bool is_taken; 879 switch (opcode) { 880 case Instruction::IF_EQ: is_taken = (src1 == src2); break; 881 case Instruction::IF_NE: is_taken = (src1 != src2); break; 882 case Instruction::IF_LT: is_taken = (src1 < src2); break; 883 case Instruction::IF_GE: is_taken = (src1 >= src2); break; 884 case Instruction::IF_GT: is_taken = (src1 > src2); break; 885 case Instruction::IF_LE: is_taken = (src1 <= src2); break; 886 case Instruction::IF_EQZ: is_taken = (src1 == 0); break; 887 case Instruction::IF_NEZ: is_taken = (src1 != 0); break; 888 case Instruction::IF_LTZ: is_taken = (src1 < 0); break; 889 case Instruction::IF_GEZ: is_taken = (src1 >= 0); break; 890 case Instruction::IF_GTZ: is_taken = (src1 > 0); break; 891 case Instruction::IF_LEZ: is_taken = (src1 <= 0); break; 892 default: 893 LOG(FATAL) << "Unexpected opcode " << opcode; 894 is_taken = false; 895 } 896 return is_taken; 897} 898 899// Convert relation of src1/src2 to src2/src1 900ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) { 901 ConditionCode res; 902 switch (before) { 903 case kCondEq: res = kCondEq; break; 904 case kCondNe: res = kCondNe; break; 905 case kCondLt: res = kCondGt; break; 906 case kCondGt: res = kCondLt; break; 907 case kCondLe: res = kCondGe; break; 908 case kCondGe: res = kCondLe; break; 909 default: 910 res = static_cast<ConditionCode>(0); 911 LOG(FATAL) << "Unexpected ccode " << before; 912 } 913 return res; 914} 915 916ConditionCode Mir2Lir::NegateComparison(ConditionCode before) { 917 ConditionCode res; 918 switch (before) { 919 case kCondEq: res = kCondNe; break; 920 case kCondNe: res = kCondEq; break; 921 case kCondLt: res = kCondGe; break; 922 case kCondGt: res = kCondLe; break; 923 case kCondLe: res = kCondGt; break; 924 case kCondGe: res = kCondLt; break; 925 default: 926 res = static_cast<ConditionCode>(0); 927 LOG(FATAL) << "Unexpected ccode " << before; 928 } 929 return res; 930} 931 932// TODO: move to mir_to_lir.cc 933Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 934 : Backend(arena), 935 literal_list_(NULL), 936 method_literal_list_(NULL), 937 class_literal_list_(NULL), 938 code_literal_list_(NULL), 939 first_fixup_(NULL), 940 cu_(cu), 941 mir_graph_(mir_graph), 942 switch_tables_(arena, 4, kGrowableArraySwitchTables), 943 fill_array_data_(arena, 4, kGrowableArrayFillArrayData), 944 tempreg_info_(arena, 20, kGrowableArrayMisc), 945 reginfo_map_(arena, RegStorage::kMaxRegs, kGrowableArrayMisc), 946 pointer_storage_(arena, 128, kGrowableArrayMisc), 947 data_offset_(0), 948 total_size_(0), 949 block_label_list_(NULL), 950 promotion_map_(NULL), 951 current_dalvik_offset_(0), 952 estimated_native_code_size_(0), 953 reg_pool_(NULL), 954 live_sreg_(0), 955 num_core_spills_(0), 956 num_fp_spills_(0), 957 frame_size_(0), 958 core_spill_mask_(0), 959 fp_spill_mask_(0), 960 first_lir_insn_(NULL), 961 last_lir_insn_(NULL), 962 slow_paths_(arena, 32, kGrowableArraySlowPaths) { 963 // Reserve pointer id 0 for NULL. 964 size_t null_idx = WrapPointer(NULL); 965 DCHECK_EQ(null_idx, 0U); 966} 967 968void Mir2Lir::Materialize() { 969 cu_->NewTimingSplit("RegisterAllocation"); 970 CompilerInitializeRegAlloc(); // Needs to happen after SSA naming 971 972 /* Allocate Registers using simple local allocation scheme */ 973 SimpleRegAlloc(); 974 975 /* First try the custom light codegen for special cases. */ 976 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); 977 bool special_worked = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) 978 ->GenSpecial(this, cu_->method_idx); 979 980 /* Take normal path for converting MIR to LIR only if the special codegen did not succeed. */ 981 if (special_worked == false) { 982 MethodMIR2LIR(); 983 } 984 985 /* Method is not empty */ 986 if (first_lir_insn_) { 987 // mark the targets of switch statement case labels 988 ProcessSwitchTables(); 989 990 /* Convert LIR into machine code. */ 991 AssembleLIR(); 992 993 if (cu_->verbose) { 994 CodegenDump(); 995 } 996 } 997} 998 999CompiledMethod* Mir2Lir::GetCompiledMethod() { 1000 // Combine vmap tables - core regs, then fp regs - into vmap_table. 1001 Leb128EncodingVector vmap_encoder; 1002 if (frame_size_ > 0) { 1003 // Prefix the encoded data with its size. 1004 size_t size = core_vmap_table_.size() + 1 /* marker */ + fp_vmap_table_.size(); 1005 vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128). 1006 vmap_encoder.PushBackUnsigned(size); 1007 // Core regs may have been inserted out of order - sort first. 1008 std::sort(core_vmap_table_.begin(), core_vmap_table_.end()); 1009 for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) { 1010 // Copy, stripping out the phys register sort key. 1011 vmap_encoder.PushBackUnsigned( 1012 ~(-1 << VREG_NUM_WIDTH) & (core_vmap_table_[i] + VmapTable::kEntryAdjustment)); 1013 } 1014 // Push a marker to take place of lr. 1015 vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker); 1016 // fp regs already sorted. 1017 for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) { 1018 vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment); 1019 } 1020 } else { 1021 DCHECK_EQ(POPCOUNT(core_spill_mask_), 0); 1022 DCHECK_EQ(POPCOUNT(fp_spill_mask_), 0); 1023 DCHECK_EQ(core_vmap_table_.size(), 0u); 1024 DCHECK_EQ(fp_vmap_table_.size(), 0u); 1025 vmap_encoder.PushBackUnsigned(0u); // Size is 0. 1026 } 1027 1028 UniquePtr<std::vector<uint8_t> > cfi_info(ReturnCallFrameInformation()); 1029 CompiledMethod* result = 1030 new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_, 1031 core_spill_mask_, fp_spill_mask_, encoded_mapping_table_, 1032 vmap_encoder.GetData(), native_gc_map_, cfi_info.get()); 1033 return result; 1034} 1035 1036size_t Mir2Lir::GetMaxPossibleCompilerTemps() const { 1037 // Chose a reasonably small value in order to contain stack growth. 1038 // Backends that are smarter about spill region can return larger values. 1039 const size_t max_compiler_temps = 10; 1040 return max_compiler_temps; 1041} 1042 1043size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() { 1044 // By default assume that the Mir2Lir will need one slot for each temporary. 1045 // If the backend can better determine temps that have non-overlapping ranges and 1046 // temps that do not need spilled, it can actually provide a small region. 1047 return (mir_graph_->GetNumUsedCompilerTemps() * sizeof(uint32_t)); 1048} 1049 1050int Mir2Lir::ComputeFrameSize() { 1051 /* Figure out the frame size */ 1052 static const uint32_t kAlignMask = kStackAlignment - 1; 1053 uint32_t size = num_core_spills_ * GetBytesPerGprSpillLocation(cu_->instruction_set) 1054 + num_fp_spills_ * GetBytesPerFprSpillLocation(cu_->instruction_set) 1055 + sizeof(uint32_t) // Filler. 1056 + (cu_->num_regs + cu_->num_outs) * sizeof(uint32_t) 1057 + GetNumBytesForCompilerTempSpillRegion(); 1058 /* Align and set */ 1059 return (size + kAlignMask) & ~(kAlignMask); 1060} 1061 1062/* 1063 * Append an LIR instruction to the LIR list maintained by a compilation 1064 * unit 1065 */ 1066void Mir2Lir::AppendLIR(LIR* lir) { 1067 if (first_lir_insn_ == NULL) { 1068 DCHECK(last_lir_insn_ == NULL); 1069 last_lir_insn_ = first_lir_insn_ = lir; 1070 lir->prev = lir->next = NULL; 1071 } else { 1072 last_lir_insn_->next = lir; 1073 lir->prev = last_lir_insn_; 1074 lir->next = NULL; 1075 last_lir_insn_ = lir; 1076 } 1077} 1078 1079/* 1080 * Insert an LIR instruction before the current instruction, which cannot be the 1081 * first instruction. 1082 * 1083 * prev_lir <-> new_lir <-> current_lir 1084 */ 1085void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) { 1086 DCHECK(current_lir->prev != NULL); 1087 LIR *prev_lir = current_lir->prev; 1088 1089 prev_lir->next = new_lir; 1090 new_lir->prev = prev_lir; 1091 new_lir->next = current_lir; 1092 current_lir->prev = new_lir; 1093} 1094 1095/* 1096 * Insert an LIR instruction after the current instruction, which cannot be the 1097 * first instruction. 1098 * 1099 * current_lir -> new_lir -> old_next 1100 */ 1101void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) { 1102 new_lir->prev = current_lir; 1103 new_lir->next = current_lir->next; 1104 current_lir->next = new_lir; 1105 new_lir->next->prev = new_lir; 1106} 1107 1108bool Mir2Lir::IsPowerOfTwo(uint64_t x) { 1109 return (x & (x - 1)) == 0; 1110} 1111 1112// Returns the index of the lowest set bit in 'x'. 1113int32_t Mir2Lir::LowestSetBit(uint64_t x) { 1114 int bit_posn = 0; 1115 while ((x & 0xf) == 0) { 1116 bit_posn += 4; 1117 x >>= 4; 1118 } 1119 while ((x & 1) == 0) { 1120 bit_posn++; 1121 x >>= 1; 1122 } 1123 return bit_posn; 1124} 1125 1126bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) { 1127 DCHECK(rl_src.wide); 1128 DCHECK(rl_dest.wide); 1129 return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1); 1130} 1131 1132LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 1133 int offset, int check_value, LIR* target) { 1134 // Handle this for architectures that can't compare to memory. 1135 Load32Disp(base_reg, offset, temp_reg); 1136 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); 1137 return branch; 1138} 1139 1140void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) { 1141 slow_paths_.Insert(slowpath); 1142} 1143 1144void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type, 1145 SpecialTargetRegister symbolic_reg) { 1146 int target_method_idx = target_method.dex_method_index; 1147 LIR* data_target = ScanLiteralPool(code_literal_list_, target_method_idx, 0); 1148 if (data_target == NULL) { 1149 data_target = AddWordData(&code_literal_list_, target_method_idx); 1150 data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file)); 1151 data_target->operands[2] = type; 1152 } 1153 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target); 1154 AppendLIR(load_pc_rel); 1155 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); 1156} 1157 1158void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 1159 SpecialTargetRegister symbolic_reg) { 1160 int target_method_idx = target_method.dex_method_index; 1161 LIR* data_target = ScanLiteralPool(method_literal_list_, target_method_idx, 0); 1162 if (data_target == NULL) { 1163 data_target = AddWordData(&method_literal_list_, target_method_idx); 1164 data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file)); 1165 data_target->operands[2] = type; 1166 } 1167 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target); 1168 AppendLIR(load_pc_rel); 1169 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); 1170} 1171 1172void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 1173 // Use the literal pool and a PC-relative load from a data word. 1174 LIR* data_target = ScanLiteralPool(class_literal_list_, type_idx, 0); 1175 if (data_target == nullptr) { 1176 data_target = AddWordData(&class_literal_list_, type_idx); 1177 } 1178 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target); 1179 AppendLIR(load_pc_rel); 1180} 1181 1182std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() { 1183 // Default case is to do nothing. 1184 return nullptr; 1185} 1186 1187RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) { 1188 loc.wide = false; 1189 if (loc.location == kLocPhysReg) { 1190 if (loc.reg.IsPair()) { 1191 loc.reg = loc.reg.GetLow(); 1192 } else { 1193 // FIXME: temp workaround. 1194 // Issue here: how do we narrow to a 32-bit value in 64-bit container? 1195 // Probably the wrong thing to narrow the RegStorage container here. That 1196 // should be a target decision. At the RegLocation level, we're only 1197 // modifying the view of the Dalvik value - this is orthogonal to the storage 1198 // container size. Consider this a temp workaround. 1199 DCHECK(loc.reg.IsDouble()); 1200 loc.reg = loc.reg.DoubleToLowSingle(); 1201 } 1202 } 1203 return loc; 1204} 1205 1206} // namespace art 1207