codegen_util.cc revision 3bc8615332b7848dec8c2297a40f7e4d176c0efb
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_internals.h" 18#include "dex_file-inl.h" 19#include "gc_map.h" 20#include "mapping_table.h" 21#include "mir_to_lir-inl.h" 22#include "dex/quick/dex_file_method_inliner.h" 23#include "dex/quick/dex_file_to_method_inliner_map.h" 24#include "dex/verification_results.h" 25#include "dex/verified_method.h" 26#include "verifier/dex_gc_map.h" 27#include "verifier/method_verifier.h" 28#include "vmap_table.h" 29 30namespace art { 31 32namespace { 33 34/* Dump a mapping table */ 35template <typename It> 36void DumpMappingTable(const char* table_name, const char* descriptor, const char* name, 37 const Signature& signature, uint32_t size, It first) { 38 if (size != 0) { 39 std::string line(StringPrintf("\n %s %s%s_%s_table[%u] = {", table_name, 40 descriptor, name, signature.ToString().c_str(), size)); 41 std::replace(line.begin(), line.end(), ';', '_'); 42 LOG(INFO) << line; 43 for (uint32_t i = 0; i != size; ++i) { 44 line = StringPrintf(" {0x%05x, 0x%04x},", first.NativePcOffset(), first.DexPc()); 45 ++first; 46 LOG(INFO) << line; 47 } 48 LOG(INFO) <<" };\n\n"; 49 } 50} 51 52} // anonymous namespace 53 54bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) { 55 bool res = false; 56 if (rl_src.is_const) { 57 if (rl_src.wide) { 58 if (rl_src.fp) { 59 res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src)); 60 } else { 61 res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src)); 62 } 63 } else { 64 if (rl_src.fp) { 65 res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src)); 66 } else { 67 res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src)); 68 } 69 } 70 } 71 return res; 72} 73 74void Mir2Lir::MarkSafepointPC(LIR* inst) { 75 DCHECK(!inst->flags.use_def_invalid); 76 inst->u.m.def_mask = ENCODE_ALL; 77 LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC); 78 DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL); 79} 80 81/* Remove a LIR from the list. */ 82void Mir2Lir::UnlinkLIR(LIR* lir) { 83 if (UNLIKELY(lir == first_lir_insn_)) { 84 first_lir_insn_ = lir->next; 85 if (lir->next != NULL) { 86 lir->next->prev = NULL; 87 } else { 88 DCHECK(lir->next == NULL); 89 DCHECK(lir == last_lir_insn_); 90 last_lir_insn_ = NULL; 91 } 92 } else if (lir == last_lir_insn_) { 93 last_lir_insn_ = lir->prev; 94 lir->prev->next = NULL; 95 } else if ((lir->prev != NULL) && (lir->next != NULL)) { 96 lir->prev->next = lir->next; 97 lir->next->prev = lir->prev; 98 } 99} 100 101/* Convert an instruction to a NOP */ 102void Mir2Lir::NopLIR(LIR* lir) { 103 lir->flags.is_nop = true; 104 if (!cu_->verbose) { 105 UnlinkLIR(lir); 106 } 107} 108 109void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) { 110 uint64_t *mask_ptr; 111 uint64_t mask = ENCODE_MEM; 112 DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); 113 DCHECK(!lir->flags.use_def_invalid); 114 if (is_load) { 115 mask_ptr = &lir->u.m.use_mask; 116 } else { 117 mask_ptr = &lir->u.m.def_mask; 118 } 119 /* Clear out the memref flags */ 120 *mask_ptr &= ~mask; 121 /* ..and then add back the one we need */ 122 switch (mem_type) { 123 case kLiteral: 124 DCHECK(is_load); 125 *mask_ptr |= ENCODE_LITERAL; 126 break; 127 case kDalvikReg: 128 *mask_ptr |= ENCODE_DALVIK_REG; 129 break; 130 case kHeapRef: 131 *mask_ptr |= ENCODE_HEAP_REF; 132 break; 133 case kMustNotAlias: 134 /* Currently only loads can be marked as kMustNotAlias */ 135 DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE)); 136 *mask_ptr |= ENCODE_MUST_NOT_ALIAS; 137 break; 138 default: 139 LOG(FATAL) << "Oat: invalid memref kind - " << mem_type; 140 } 141} 142 143/* 144 * Mark load/store instructions that access Dalvik registers through the stack. 145 */ 146void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, 147 bool is64bit) { 148 SetMemRefType(lir, is_load, kDalvikReg); 149 150 /* 151 * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit 152 * access. 153 */ 154 lir->flags.alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit); 155} 156 157/* 158 * Debugging macros 159 */ 160#define DUMP_RESOURCE_MASK(X) 161 162/* Pretty-print a LIR instruction */ 163void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { 164 int offset = lir->offset; 165 int dest = lir->operands[0]; 166 const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops)); 167 168 /* Handle pseudo-ops individually, and all regular insns as a group */ 169 switch (lir->opcode) { 170 case kPseudoMethodEntry: 171 LOG(INFO) << "-------- method entry " 172 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 173 break; 174 case kPseudoMethodExit: 175 LOG(INFO) << "-------- Method_Exit"; 176 break; 177 case kPseudoBarrier: 178 LOG(INFO) << "-------- BARRIER"; 179 break; 180 case kPseudoEntryBlock: 181 LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest; 182 break; 183 case kPseudoDalvikByteCodeBoundary: 184 if (lir->operands[0] == 0) { 185 // NOTE: only used for debug listings. 186 lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string")); 187 } 188 LOG(INFO) << "-------- dalvik offset: 0x" << std::hex 189 << lir->dalvik_offset << " @ " 190 << reinterpret_cast<char*>(UnwrapPointer(lir->operands[0])); 191 break; 192 case kPseudoExitBlock: 193 LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest; 194 break; 195 case kPseudoPseudoAlign4: 196 LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex 197 << offset << "): .align4"; 198 break; 199 case kPseudoEHBlockLabel: 200 LOG(INFO) << "Exception_Handling:"; 201 break; 202 case kPseudoTargetLabel: 203 case kPseudoNormalBlockLabel: 204 LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":"; 205 break; 206 case kPseudoThrowTarget: 207 LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":"; 208 break; 209 case kPseudoIntrinsicRetry: 210 LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":"; 211 break; 212 case kPseudoSuspendTarget: 213 LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":"; 214 break; 215 case kPseudoSafepointPC: 216 LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 217 break; 218 case kPseudoExportedPC: 219 LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 220 break; 221 case kPseudoCaseLabel: 222 LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x" 223 << std::hex << lir->operands[0] << "|" << std::dec << 224 lir->operands[0]; 225 break; 226 default: 227 if (lir->flags.is_nop && !dump_nop) { 228 break; 229 } else { 230 std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode), 231 lir, base_addr)); 232 std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode), 233 lir, base_addr)); 234 LOG(INFO) << StringPrintf("%5p: %-9s%s%s", 235 base_addr + offset, 236 op_name.c_str(), op_operands.c_str(), 237 lir->flags.is_nop ? "(nop)" : ""); 238 } 239 break; 240 } 241 242 if (lir->u.m.use_mask && (!lir->flags.is_nop || dump_nop)) { 243 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.use_mask, "use")); 244 } 245 if (lir->u.m.def_mask && (!lir->flags.is_nop || dump_nop)) { 246 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.def_mask, "def")); 247 } 248} 249 250void Mir2Lir::DumpPromotionMap() { 251 int num_regs = cu_->num_dalvik_registers + mir_graph_->GetNumUsedCompilerTemps(); 252 for (int i = 0; i < num_regs; i++) { 253 PromotionMap v_reg_map = promotion_map_[i]; 254 std::string buf; 255 if (v_reg_map.fp_location == kLocPhysReg) { 256 StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask()); 257 } 258 259 std::string buf3; 260 if (i < cu_->num_dalvik_registers) { 261 StringAppendF(&buf3, "%02d", i); 262 } else if (i == mir_graph_->GetMethodSReg()) { 263 buf3 = "Method*"; 264 } else { 265 StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers); 266 } 267 268 LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(), 269 v_reg_map.core_location == kLocPhysReg ? 270 "r" : "SP+", v_reg_map.core_location == kLocPhysReg ? 271 v_reg_map.core_reg : SRegOffset(i), 272 buf.c_str()); 273 } 274} 275 276/* Dump instructions and constant pool contents */ 277void Mir2Lir::CodegenDump() { 278 LOG(INFO) << "Dumping LIR insns for " 279 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 280 LIR* lir_insn; 281 int insns_size = cu_->code_item->insns_size_in_code_units_; 282 283 LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs; 284 LOG(INFO) << "Ins : " << cu_->num_ins; 285 LOG(INFO) << "Outs : " << cu_->num_outs; 286 LOG(INFO) << "CoreSpills : " << num_core_spills_; 287 LOG(INFO) << "FPSpills : " << num_fp_spills_; 288 LOG(INFO) << "CompilerTemps : " << mir_graph_->GetNumUsedCompilerTemps(); 289 LOG(INFO) << "Frame size : " << frame_size_; 290 LOG(INFO) << "code size is " << total_size_ << 291 " bytes, Dalvik size is " << insns_size * 2; 292 LOG(INFO) << "expansion factor: " 293 << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2); 294 DumpPromotionMap(); 295 for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) { 296 DumpLIRInsn(lir_insn, 0); 297 } 298 for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) { 299 LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset, 300 lir_insn->operands[0]); 301 } 302 303 const DexFile::MethodId& method_id = 304 cu_->dex_file->GetMethodId(cu_->method_idx); 305 const Signature signature = cu_->dex_file->GetMethodSignature(method_id); 306 const char* name = cu_->dex_file->GetMethodName(method_id); 307 const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id)); 308 309 // Dump mapping tables 310 if (!encoded_mapping_table_.empty()) { 311 MappingTable table(&encoded_mapping_table_[0]); 312 DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, 313 table.PcToDexSize(), table.PcToDexBegin()); 314 DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, 315 table.DexToPcSize(), table.DexToPcBegin()); 316 } 317} 318 319/* 320 * Search the existing constants in the literal pool for an exact or close match 321 * within specified delta (greater or equal to 0). 322 */ 323LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) { 324 while (data_target) { 325 if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta) 326 return data_target; 327 data_target = data_target->next; 328 } 329 return NULL; 330} 331 332/* Search the existing constants in the literal pool for an exact wide match */ 333LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) { 334 bool lo_match = false; 335 LIR* lo_target = NULL; 336 while (data_target) { 337 if (lo_match && (data_target->operands[0] == val_hi)) { 338 // Record high word in case we need to expand this later. 339 lo_target->operands[1] = val_hi; 340 return lo_target; 341 } 342 lo_match = false; 343 if (data_target->operands[0] == val_lo) { 344 lo_match = true; 345 lo_target = data_target; 346 } 347 data_target = data_target->next; 348 } 349 return NULL; 350} 351 352/* 353 * The following are building blocks to insert constants into the pool or 354 * instruction streams. 355 */ 356 357/* Add a 32-bit constant to the constant pool */ 358LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) { 359 /* Add the constant to the literal pool */ 360 if (constant_list_p) { 361 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 362 new_value->operands[0] = value; 363 new_value->next = *constant_list_p; 364 *constant_list_p = new_value; 365 estimated_native_code_size_ += sizeof(value); 366 return new_value; 367 } 368 return NULL; 369} 370 371/* Add a 64-bit constant to the constant pool or mixed with code */ 372LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) { 373 AddWordData(constant_list_p, val_hi); 374 return AddWordData(constant_list_p, val_lo); 375} 376 377static void Push32(std::vector<uint8_t>&buf, int data) { 378 buf.push_back(data & 0xff); 379 buf.push_back((data >> 8) & 0xff); 380 buf.push_back((data >> 16) & 0xff); 381 buf.push_back((data >> 24) & 0xff); 382} 383 384// Push 8 bytes on 64-bit target systems; 4 on 32-bit target systems. 385static void PushPointer(std::vector<uint8_t>&buf, const void* pointer, bool target64) { 386 uint64_t data = reinterpret_cast<uintptr_t>(pointer); 387 if (target64) { 388 Push32(buf, data & 0xFFFFFFFF); 389 Push32(buf, (data >> 32) & 0xFFFFFFFF); 390 } else { 391 Push32(buf, static_cast<uint32_t>(data)); 392 } 393} 394 395static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) { 396 while (buf.size() < offset) { 397 buf.push_back(0); 398 } 399} 400 401/* Write the literal pool to the output stream */ 402void Mir2Lir::InstallLiteralPools() { 403 AlignBuffer(code_buffer_, data_offset_); 404 LIR* data_lir = literal_list_; 405 while (data_lir != NULL) { 406 Push32(code_buffer_, data_lir->operands[0]); 407 data_lir = NEXT_LIR(data_lir); 408 } 409 // Push code and method literals, record offsets for the compiler to patch. 410 data_lir = code_literal_list_; 411 while (data_lir != NULL) { 412 uint32_t target = data_lir->operands[0]; 413 cu_->compiler_driver->AddCodePatch(cu_->dex_file, 414 cu_->class_def_idx, 415 cu_->method_idx, 416 cu_->invoke_type, 417 target, 418 static_cast<InvokeType>(data_lir->operands[1]), 419 code_buffer_.size()); 420 const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target); 421 // unique value based on target to ensure code deduplication works 422 PushPointer(code_buffer_, &id, cu_->target64); 423 data_lir = NEXT_LIR(data_lir); 424 } 425 data_lir = method_literal_list_; 426 while (data_lir != NULL) { 427 uint32_t target = data_lir->operands[0]; 428 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, 429 cu_->class_def_idx, 430 cu_->method_idx, 431 cu_->invoke_type, 432 target, 433 static_cast<InvokeType>(data_lir->operands[1]), 434 code_buffer_.size()); 435 const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target); 436 // unique value based on target to ensure code deduplication works 437 PushPointer(code_buffer_, &id, cu_->target64); 438 data_lir = NEXT_LIR(data_lir); 439 } 440 // Push class literals. 441 data_lir = class_literal_list_; 442 while (data_lir != NULL) { 443 uint32_t target = data_lir->operands[0]; 444 cu_->compiler_driver->AddClassPatch(cu_->dex_file, 445 cu_->class_def_idx, 446 cu_->method_idx, 447 target, 448 code_buffer_.size()); 449 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(target); 450 // unique value based on target to ensure code deduplication works 451 PushPointer(code_buffer_, &id, cu_->target64); 452 data_lir = NEXT_LIR(data_lir); 453 } 454} 455 456/* Write the switch tables to the output stream */ 457void Mir2Lir::InstallSwitchTables() { 458 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 459 while (true) { 460 Mir2Lir::SwitchTable* tab_rec = iterator.Next(); 461 if (tab_rec == NULL) break; 462 AlignBuffer(code_buffer_, tab_rec->offset); 463 /* 464 * For Arm, our reference point is the address of the bx 465 * instruction that does the launch, so we have to subtract 466 * the auto pc-advance. For other targets the reference point 467 * is a label, so we can use the offset as-is. 468 */ 469 int bx_offset = INVALID_OFFSET; 470 switch (cu_->instruction_set) { 471 case kThumb2: 472 DCHECK(tab_rec->anchor->flags.fixup != kFixupNone); 473 bx_offset = tab_rec->anchor->offset + 4; 474 break; 475 case kX86: 476 bx_offset = 0; 477 break; 478 case kMips: 479 bx_offset = tab_rec->anchor->offset; 480 break; 481 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 482 } 483 if (cu_->verbose) { 484 LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset; 485 } 486 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 487 const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2])); 488 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 489 int disp = tab_rec->targets[elems]->offset - bx_offset; 490 if (cu_->verbose) { 491 LOG(INFO) << " Case[" << elems << "] key: 0x" 492 << std::hex << keys[elems] << ", disp: 0x" 493 << std::hex << disp; 494 } 495 Push32(code_buffer_, keys[elems]); 496 Push32(code_buffer_, 497 tab_rec->targets[elems]->offset - bx_offset); 498 } 499 } else { 500 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 501 static_cast<int>(Instruction::kPackedSwitchSignature)); 502 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 503 int disp = tab_rec->targets[elems]->offset - bx_offset; 504 if (cu_->verbose) { 505 LOG(INFO) << " Case[" << elems << "] disp: 0x" 506 << std::hex << disp; 507 } 508 Push32(code_buffer_, tab_rec->targets[elems]->offset - bx_offset); 509 } 510 } 511 } 512} 513 514/* Write the fill array dta to the output stream */ 515void Mir2Lir::InstallFillArrayData() { 516 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 517 while (true) { 518 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 519 if (tab_rec == NULL) break; 520 AlignBuffer(code_buffer_, tab_rec->offset); 521 for (int i = 0; i < (tab_rec->size + 1) / 2; i++) { 522 code_buffer_.push_back(tab_rec->table[i] & 0xFF); 523 code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF); 524 } 525 } 526} 527 528static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) { 529 for (; lir != NULL; lir = lir->next) { 530 lir->offset = offset; 531 offset += 4; 532 } 533 return offset; 534} 535 536static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset) { 537 unsigned int element_size = sizeof(void*); 538 // Align to natural pointer size. 539 offset = (offset + (element_size - 1)) & ~(element_size - 1); 540 for (; lir != NULL; lir = lir->next) { 541 lir->offset = offset; 542 offset += element_size; 543 } 544 return offset; 545} 546 547// Make sure we have a code address for every declared catch entry 548bool Mir2Lir::VerifyCatchEntries() { 549 MappingTable table(&encoded_mapping_table_[0]); 550 std::vector<uint32_t> dex_pcs; 551 dex_pcs.reserve(table.DexToPcSize()); 552 for (auto it = table.DexToPcBegin(), end = table.DexToPcEnd(); it != end; ++it) { 553 dex_pcs.push_back(it.DexPc()); 554 } 555 // Sort dex_pcs, so that we can quickly check it against the ordered mir_graph_->catches_. 556 std::sort(dex_pcs.begin(), dex_pcs.end()); 557 558 bool success = true; 559 auto it = dex_pcs.begin(), end = dex_pcs.end(); 560 for (uint32_t dex_pc : mir_graph_->catches_) { 561 while (it != end && *it < dex_pc) { 562 LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << *it; 563 ++it; 564 success = false; 565 } 566 if (it == end || *it > dex_pc) { 567 LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc; 568 success = false; 569 } else { 570 ++it; 571 } 572 } 573 if (!success) { 574 LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 575 LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: " 576 << table.DexToPcSize(); 577 } 578 return success; 579} 580 581 582void Mir2Lir::CreateMappingTables() { 583 uint32_t pc2dex_data_size = 0u; 584 uint32_t pc2dex_entries = 0u; 585 uint32_t pc2dex_offset = 0u; 586 uint32_t pc2dex_dalvik_offset = 0u; 587 uint32_t dex2pc_data_size = 0u; 588 uint32_t dex2pc_entries = 0u; 589 uint32_t dex2pc_offset = 0u; 590 uint32_t dex2pc_dalvik_offset = 0u; 591 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 592 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 593 pc2dex_entries += 1; 594 DCHECK(pc2dex_offset <= tgt_lir->offset); 595 pc2dex_data_size += UnsignedLeb128Size(tgt_lir->offset - pc2dex_offset); 596 pc2dex_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) - 597 static_cast<int32_t>(pc2dex_dalvik_offset)); 598 pc2dex_offset = tgt_lir->offset; 599 pc2dex_dalvik_offset = tgt_lir->dalvik_offset; 600 } 601 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 602 dex2pc_entries += 1; 603 DCHECK(dex2pc_offset <= tgt_lir->offset); 604 dex2pc_data_size += UnsignedLeb128Size(tgt_lir->offset - dex2pc_offset); 605 dex2pc_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) - 606 static_cast<int32_t>(dex2pc_dalvik_offset)); 607 dex2pc_offset = tgt_lir->offset; 608 dex2pc_dalvik_offset = tgt_lir->dalvik_offset; 609 } 610 } 611 612 uint32_t total_entries = pc2dex_entries + dex2pc_entries; 613 uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries); 614 uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size; 615 encoded_mapping_table_.resize(data_size); 616 uint8_t* write_pos = &encoded_mapping_table_[0]; 617 write_pos = EncodeUnsignedLeb128(write_pos, total_entries); 618 write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries); 619 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), hdr_data_size); 620 uint8_t* write_pos2 = write_pos + pc2dex_data_size; 621 622 pc2dex_offset = 0u; 623 pc2dex_dalvik_offset = 0u; 624 dex2pc_offset = 0u; 625 dex2pc_dalvik_offset = 0u; 626 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 627 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 628 DCHECK(pc2dex_offset <= tgt_lir->offset); 629 write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset); 630 write_pos = EncodeSignedLeb128(write_pos, static_cast<int32_t>(tgt_lir->dalvik_offset) - 631 static_cast<int32_t>(pc2dex_dalvik_offset)); 632 pc2dex_offset = tgt_lir->offset; 633 pc2dex_dalvik_offset = tgt_lir->dalvik_offset; 634 } 635 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 636 DCHECK(dex2pc_offset <= tgt_lir->offset); 637 write_pos2 = EncodeUnsignedLeb128(write_pos2, tgt_lir->offset - dex2pc_offset); 638 write_pos2 = EncodeSignedLeb128(write_pos2, static_cast<int32_t>(tgt_lir->dalvik_offset) - 639 static_cast<int32_t>(dex2pc_dalvik_offset)); 640 dex2pc_offset = tgt_lir->offset; 641 dex2pc_dalvik_offset = tgt_lir->dalvik_offset; 642 } 643 } 644 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), 645 hdr_data_size + pc2dex_data_size); 646 DCHECK_EQ(static_cast<size_t>(write_pos2 - &encoded_mapping_table_[0]), data_size); 647 648 if (kIsDebugBuild) { 649 CHECK(VerifyCatchEntries()); 650 651 // Verify the encoded table holds the expected data. 652 MappingTable table(&encoded_mapping_table_[0]); 653 CHECK_EQ(table.TotalSize(), total_entries); 654 CHECK_EQ(table.PcToDexSize(), pc2dex_entries); 655 auto it = table.PcToDexBegin(); 656 auto it2 = table.DexToPcBegin(); 657 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 658 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 659 CHECK_EQ(tgt_lir->offset, it.NativePcOffset()); 660 CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc()); 661 ++it; 662 } 663 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 664 CHECK_EQ(tgt_lir->offset, it2.NativePcOffset()); 665 CHECK_EQ(tgt_lir->dalvik_offset, it2.DexPc()); 666 ++it2; 667 } 668 } 669 CHECK(it == table.PcToDexEnd()); 670 CHECK(it2 == table.DexToPcEnd()); 671 } 672} 673 674class NativePcToReferenceMapBuilder { 675 public: 676 NativePcToReferenceMapBuilder(std::vector<uint8_t>* table, 677 size_t entries, uint32_t max_native_offset, 678 size_t references_width) : entries_(entries), 679 references_width_(references_width), in_use_(entries), 680 table_(table) { 681 // Compute width in bytes needed to hold max_native_offset. 682 native_offset_width_ = 0; 683 while (max_native_offset != 0) { 684 native_offset_width_++; 685 max_native_offset >>= 8; 686 } 687 // Resize table and set up header. 688 table->resize((EntryWidth() * entries) + sizeof(uint32_t)); 689 CHECK_LT(native_offset_width_, 1U << 3); 690 (*table)[0] = native_offset_width_ & 7; 691 CHECK_LT(references_width_, 1U << 13); 692 (*table)[0] |= (references_width_ << 3) & 0xFF; 693 (*table)[1] = (references_width_ >> 5) & 0xFF; 694 CHECK_LT(entries, 1U << 16); 695 (*table)[2] = entries & 0xFF; 696 (*table)[3] = (entries >> 8) & 0xFF; 697 } 698 699 void AddEntry(uint32_t native_offset, const uint8_t* references) { 700 size_t table_index = TableIndex(native_offset); 701 while (in_use_[table_index]) { 702 table_index = (table_index + 1) % entries_; 703 } 704 in_use_[table_index] = true; 705 SetCodeOffset(table_index, native_offset); 706 DCHECK_EQ(native_offset, GetCodeOffset(table_index)); 707 SetReferences(table_index, references); 708 } 709 710 private: 711 size_t TableIndex(uint32_t native_offset) { 712 return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_; 713 } 714 715 uint32_t GetCodeOffset(size_t table_index) { 716 uint32_t native_offset = 0; 717 size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); 718 for (size_t i = 0; i < native_offset_width_; i++) { 719 native_offset |= (*table_)[table_offset + i] << (i * 8); 720 } 721 return native_offset; 722 } 723 724 void SetCodeOffset(size_t table_index, uint32_t native_offset) { 725 size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); 726 for (size_t i = 0; i < native_offset_width_; i++) { 727 (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF; 728 } 729 } 730 731 void SetReferences(size_t table_index, const uint8_t* references) { 732 size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); 733 memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_); 734 } 735 736 size_t EntryWidth() const { 737 return native_offset_width_ + references_width_; 738 } 739 740 // Number of entries in the table. 741 const size_t entries_; 742 // Number of bytes used to encode the reference bitmap. 743 const size_t references_width_; 744 // Number of bytes used to encode a native offset. 745 size_t native_offset_width_; 746 // Entries that are in use. 747 std::vector<bool> in_use_; 748 // The table we're building. 749 std::vector<uint8_t>* const table_; 750}; 751 752void Mir2Lir::CreateNativeGcMap() { 753 DCHECK(!encoded_mapping_table_.empty()); 754 MappingTable mapping_table(&encoded_mapping_table_[0]); 755 uint32_t max_native_offset = 0; 756 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) { 757 uint32_t native_offset = it.NativePcOffset(); 758 if (native_offset > max_native_offset) { 759 max_native_offset = native_offset; 760 } 761 } 762 MethodReference method_ref(cu_->dex_file, cu_->method_idx); 763 const std::vector<uint8_t>& gc_map_raw = 764 mir_graph_->GetCurrentDexCompilationUnit()->GetVerifiedMethod()->GetDexGcMap(); 765 verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]); 766 DCHECK_EQ(gc_map_raw.size(), dex_gc_map.RawSize()); 767 // Compute native offset to references size. 768 NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_, 769 mapping_table.PcToDexSize(), 770 max_native_offset, dex_gc_map.RegWidth()); 771 772 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) { 773 uint32_t native_offset = it.NativePcOffset(); 774 uint32_t dex_pc = it.DexPc(); 775 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); 776 CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc; 777 native_gc_map_builder.AddEntry(native_offset, references); 778 } 779} 780 781/* Determine the offset of each literal field */ 782int Mir2Lir::AssignLiteralOffset(CodeOffset offset) { 783 offset = AssignLiteralOffsetCommon(literal_list_, offset); 784 offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset); 785 offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset); 786 offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset); 787 return offset; 788} 789 790int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) { 791 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 792 while (true) { 793 Mir2Lir::SwitchTable* tab_rec = iterator.Next(); 794 if (tab_rec == NULL) break; 795 tab_rec->offset = offset; 796 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 797 offset += tab_rec->table[1] * (sizeof(int) * 2); 798 } else { 799 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 800 static_cast<int>(Instruction::kPackedSwitchSignature)); 801 offset += tab_rec->table[1] * sizeof(int); 802 } 803 } 804 return offset; 805} 806 807int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) { 808 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 809 while (true) { 810 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 811 if (tab_rec == NULL) break; 812 tab_rec->offset = offset; 813 offset += tab_rec->size; 814 // word align 815 offset = (offset + 3) & ~3; 816 } 817 return offset; 818} 819 820/* 821 * Insert a kPseudoCaseLabel at the beginning of the Dalvik 822 * offset vaddr if pretty-printing, otherise use the standard block 823 * label. The selected label will be used to fix up the case 824 * branch table during the assembly phase. All resource flags 825 * are set to prevent code motion. KeyVal is just there for debugging. 826 */ 827LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) { 828 LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id]; 829 LIR* res = boundary_lir; 830 if (cu_->verbose) { 831 // Only pay the expense if we're pretty-printing. 832 LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR)); 833 new_label->dalvik_offset = vaddr; 834 new_label->opcode = kPseudoCaseLabel; 835 new_label->operands[0] = keyVal; 836 new_label->flags.fixup = kFixupLabel; 837 DCHECK(!new_label->flags.use_def_invalid); 838 new_label->u.m.def_mask = ENCODE_ALL; 839 InsertLIRAfter(boundary_lir, new_label); 840 res = new_label; 841 } 842 return res; 843} 844 845void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) { 846 const uint16_t* table = tab_rec->table; 847 DexOffset base_vaddr = tab_rec->vaddr; 848 const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]); 849 int entries = table[1]; 850 int low_key = s4FromSwitchData(&table[2]); 851 for (int i = 0; i < entries; i++) { 852 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key); 853 } 854} 855 856void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) { 857 const uint16_t* table = tab_rec->table; 858 DexOffset base_vaddr = tab_rec->vaddr; 859 int entries = table[1]; 860 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 861 const int32_t* targets = &keys[entries]; 862 for (int i = 0; i < entries; i++) { 863 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]); 864 } 865} 866 867void Mir2Lir::ProcessSwitchTables() { 868 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 869 while (true) { 870 Mir2Lir::SwitchTable *tab_rec = iterator.Next(); 871 if (tab_rec == NULL) break; 872 if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) { 873 MarkPackedCaseLabels(tab_rec); 874 } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 875 MarkSparseCaseLabels(tab_rec); 876 } else { 877 LOG(FATAL) << "Invalid switch table"; 878 } 879 } 880} 881 882void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) { 883 /* 884 * Sparse switch data format: 885 * ushort ident = 0x0200 magic value 886 * ushort size number of entries in the table; > 0 887 * int keys[size] keys, sorted low-to-high; 32-bit aligned 888 * int targets[size] branch targets, relative to switch opcode 889 * 890 * Total size is (2+size*4) 16-bit code units. 891 */ 892 uint16_t ident = table[0]; 893 int entries = table[1]; 894 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 895 const int32_t* targets = &keys[entries]; 896 LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident 897 << ", entries: " << std::dec << entries; 898 for (int i = 0; i < entries; i++) { 899 LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i]; 900 } 901} 902 903void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) { 904 /* 905 * Packed switch data format: 906 * ushort ident = 0x0100 magic value 907 * ushort size number of entries in the table 908 * int first_key first (and lowest) switch case value 909 * int targets[size] branch targets, relative to switch opcode 910 * 911 * Total size is (4+size*2) 16-bit code units. 912 */ 913 uint16_t ident = table[0]; 914 const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]); 915 int entries = table[1]; 916 int low_key = s4FromSwitchData(&table[2]); 917 LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident 918 << ", entries: " << std::dec << entries << ", low_key: " << low_key; 919 for (int i = 0; i < entries; i++) { 920 LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex 921 << targets[i]; 922 } 923} 924 925/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */ 926void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) { 927 // NOTE: only used for debug listings. 928 NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str))); 929} 930 931bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) { 932 bool is_taken; 933 switch (opcode) { 934 case Instruction::IF_EQ: is_taken = (src1 == src2); break; 935 case Instruction::IF_NE: is_taken = (src1 != src2); break; 936 case Instruction::IF_LT: is_taken = (src1 < src2); break; 937 case Instruction::IF_GE: is_taken = (src1 >= src2); break; 938 case Instruction::IF_GT: is_taken = (src1 > src2); break; 939 case Instruction::IF_LE: is_taken = (src1 <= src2); break; 940 case Instruction::IF_EQZ: is_taken = (src1 == 0); break; 941 case Instruction::IF_NEZ: is_taken = (src1 != 0); break; 942 case Instruction::IF_LTZ: is_taken = (src1 < 0); break; 943 case Instruction::IF_GEZ: is_taken = (src1 >= 0); break; 944 case Instruction::IF_GTZ: is_taken = (src1 > 0); break; 945 case Instruction::IF_LEZ: is_taken = (src1 <= 0); break; 946 default: 947 LOG(FATAL) << "Unexpected opcode " << opcode; 948 is_taken = false; 949 } 950 return is_taken; 951} 952 953// Convert relation of src1/src2 to src2/src1 954ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) { 955 ConditionCode res; 956 switch (before) { 957 case kCondEq: res = kCondEq; break; 958 case kCondNe: res = kCondNe; break; 959 case kCondLt: res = kCondGt; break; 960 case kCondGt: res = kCondLt; break; 961 case kCondLe: res = kCondGe; break; 962 case kCondGe: res = kCondLe; break; 963 default: 964 res = static_cast<ConditionCode>(0); 965 LOG(FATAL) << "Unexpected ccode " << before; 966 } 967 return res; 968} 969 970ConditionCode Mir2Lir::NegateComparison(ConditionCode before) { 971 ConditionCode res; 972 switch (before) { 973 case kCondEq: res = kCondNe; break; 974 case kCondNe: res = kCondEq; break; 975 case kCondLt: res = kCondGe; break; 976 case kCondGt: res = kCondLe; break; 977 case kCondLe: res = kCondGt; break; 978 case kCondGe: res = kCondLt; break; 979 default: 980 res = static_cast<ConditionCode>(0); 981 LOG(FATAL) << "Unexpected ccode " << before; 982 } 983 return res; 984} 985 986// TODO: move to mir_to_lir.cc 987Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 988 : Backend(arena), 989 literal_list_(NULL), 990 method_literal_list_(NULL), 991 class_literal_list_(NULL), 992 code_literal_list_(NULL), 993 first_fixup_(NULL), 994 cu_(cu), 995 mir_graph_(mir_graph), 996 switch_tables_(arena, 4, kGrowableArraySwitchTables), 997 fill_array_data_(arena, 4, kGrowableArrayFillArrayData), 998 throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads), 999 suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads), 1000 tempreg_info_(arena, 20, kGrowableArrayMisc), 1001 reginfo_map_(arena, 64, kGrowableArrayMisc), 1002 pointer_storage_(arena, 128, kGrowableArrayMisc), 1003 data_offset_(0), 1004 total_size_(0), 1005 block_label_list_(NULL), 1006 promotion_map_(NULL), 1007 current_dalvik_offset_(0), 1008 estimated_native_code_size_(0), 1009 reg_pool_(NULL), 1010 live_sreg_(0), 1011 num_core_spills_(0), 1012 num_fp_spills_(0), 1013 frame_size_(0), 1014 core_spill_mask_(0), 1015 fp_spill_mask_(0), 1016 first_lir_insn_(NULL), 1017 last_lir_insn_(NULL), 1018 slow_paths_(arena, 32, kGrowableArraySlowPaths) { 1019 // Reserve pointer id 0 for NULL. 1020 size_t null_idx = WrapPointer(NULL); 1021 DCHECK_EQ(null_idx, 0U); 1022} 1023 1024void Mir2Lir::Materialize() { 1025 cu_->NewTimingSplit("RegisterAllocation"); 1026 CompilerInitializeRegAlloc(); // Needs to happen after SSA naming 1027 1028 /* Allocate Registers using simple local allocation scheme */ 1029 SimpleRegAlloc(); 1030 1031 /* First try the custom light codegen for special cases. */ 1032 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); 1033 bool special_worked = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) 1034 ->GenSpecial(this, cu_->method_idx); 1035 1036 /* Take normal path for converting MIR to LIR only if the special codegen did not succeed. */ 1037 if (special_worked == false) { 1038 MethodMIR2LIR(); 1039 } 1040 1041 /* Method is not empty */ 1042 if (first_lir_insn_) { 1043 // mark the targets of switch statement case labels 1044 ProcessSwitchTables(); 1045 1046 /* Convert LIR into machine code. */ 1047 AssembleLIR(); 1048 1049 if (cu_->verbose) { 1050 CodegenDump(); 1051 } 1052 } 1053} 1054 1055CompiledMethod* Mir2Lir::GetCompiledMethod() { 1056 // Combine vmap tables - core regs, then fp regs - into vmap_table. 1057 Leb128EncodingVector vmap_encoder; 1058 if (frame_size_ > 0) { 1059 // Prefix the encoded data with its size. 1060 size_t size = core_vmap_table_.size() + 1 /* marker */ + fp_vmap_table_.size(); 1061 vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128). 1062 vmap_encoder.PushBackUnsigned(size); 1063 // Core regs may have been inserted out of order - sort first. 1064 std::sort(core_vmap_table_.begin(), core_vmap_table_.end()); 1065 for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) { 1066 // Copy, stripping out the phys register sort key. 1067 vmap_encoder.PushBackUnsigned( 1068 ~(-1 << VREG_NUM_WIDTH) & (core_vmap_table_[i] + VmapTable::kEntryAdjustment)); 1069 } 1070 // Push a marker to take place of lr. 1071 vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker); 1072 // fp regs already sorted. 1073 for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) { 1074 vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment); 1075 } 1076 } else { 1077 DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0); 1078 DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0); 1079 DCHECK_EQ(core_vmap_table_.size(), 0u); 1080 DCHECK_EQ(fp_vmap_table_.size(), 0u); 1081 vmap_encoder.PushBackUnsigned(0u); // Size is 0. 1082 } 1083 1084 UniquePtr<std::vector<uint8_t> > cfi_info(ReturnCallFrameInformation()); 1085 CompiledMethod* result = 1086 new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_, 1087 core_spill_mask_, fp_spill_mask_, encoded_mapping_table_, 1088 vmap_encoder.GetData(), native_gc_map_, cfi_info.get()); 1089 return result; 1090} 1091 1092size_t Mir2Lir::GetMaxPossibleCompilerTemps() const { 1093 // Chose a reasonably small value in order to contain stack growth. 1094 // Backends that are smarter about spill region can return larger values. 1095 const size_t max_compiler_temps = 10; 1096 return max_compiler_temps; 1097} 1098 1099size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() { 1100 // By default assume that the Mir2Lir will need one slot for each temporary. 1101 // If the backend can better determine temps that have non-overlapping ranges and 1102 // temps that do not need spilled, it can actually provide a small region. 1103 return (mir_graph_->GetNumUsedCompilerTemps() * sizeof(uint32_t)); 1104} 1105 1106int Mir2Lir::ComputeFrameSize() { 1107 /* Figure out the frame size */ 1108 static const uint32_t kAlignMask = kStackAlignment - 1; 1109 uint32_t size = ((num_core_spills_ + num_fp_spills_ + 1110 1 /* filler word */ + cu_->num_regs + cu_->num_outs) 1111 * sizeof(uint32_t)) + 1112 GetNumBytesForCompilerTempSpillRegion(); 1113 /* Align and set */ 1114 return (size + kAlignMask) & ~(kAlignMask); 1115} 1116 1117/* 1118 * Append an LIR instruction to the LIR list maintained by a compilation 1119 * unit 1120 */ 1121void Mir2Lir::AppendLIR(LIR* lir) { 1122 if (first_lir_insn_ == NULL) { 1123 DCHECK(last_lir_insn_ == NULL); 1124 last_lir_insn_ = first_lir_insn_ = lir; 1125 lir->prev = lir->next = NULL; 1126 } else { 1127 last_lir_insn_->next = lir; 1128 lir->prev = last_lir_insn_; 1129 lir->next = NULL; 1130 last_lir_insn_ = lir; 1131 } 1132} 1133 1134/* 1135 * Insert an LIR instruction before the current instruction, which cannot be the 1136 * first instruction. 1137 * 1138 * prev_lir <-> new_lir <-> current_lir 1139 */ 1140void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) { 1141 DCHECK(current_lir->prev != NULL); 1142 LIR *prev_lir = current_lir->prev; 1143 1144 prev_lir->next = new_lir; 1145 new_lir->prev = prev_lir; 1146 new_lir->next = current_lir; 1147 current_lir->prev = new_lir; 1148} 1149 1150/* 1151 * Insert an LIR instruction after the current instruction, which cannot be the 1152 * first instruction. 1153 * 1154 * current_lir -> new_lir -> old_next 1155 */ 1156void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) { 1157 new_lir->prev = current_lir; 1158 new_lir->next = current_lir->next; 1159 current_lir->next = new_lir; 1160 new_lir->next->prev = new_lir; 1161} 1162 1163bool Mir2Lir::IsPowerOfTwo(uint64_t x) { 1164 return (x & (x - 1)) == 0; 1165} 1166 1167// Returns the index of the lowest set bit in 'x'. 1168int32_t Mir2Lir::LowestSetBit(uint64_t x) { 1169 int bit_posn = 0; 1170 while ((x & 0xf) == 0) { 1171 bit_posn += 4; 1172 x >>= 4; 1173 } 1174 while ((x & 1) == 0) { 1175 bit_posn++; 1176 x >>= 1; 1177 } 1178 return bit_posn; 1179} 1180 1181bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) { 1182 DCHECK(rl_src.wide); 1183 DCHECK(rl_dest.wide); 1184 return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1); 1185} 1186 1187LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, int temp_reg, int base_reg, 1188 int offset, int check_value, LIR* target) { 1189 // Handle this for architectures that can't compare to memory. 1190 LoadWordDisp(base_reg, offset, temp_reg); 1191 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); 1192 return branch; 1193} 1194 1195void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) { 1196 slow_paths_.Insert(slowpath); 1197} 1198 1199void Mir2Lir::LoadCodeAddress(int dex_method_index, InvokeType type, SpecialTargetRegister symbolic_reg) { 1200 LIR* data_target = ScanLiteralPool(code_literal_list_, dex_method_index, 0); 1201 if (data_target == NULL) { 1202 data_target = AddWordData(&code_literal_list_, dex_method_index); 1203 data_target->operands[1] = type; 1204 } 1205 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target); 1206 AppendLIR(load_pc_rel); 1207 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); 1208} 1209 1210void Mir2Lir::LoadMethodAddress(int dex_method_index, InvokeType type, SpecialTargetRegister symbolic_reg) { 1211 LIR* data_target = ScanLiteralPool(method_literal_list_, dex_method_index, 0); 1212 if (data_target == NULL) { 1213 data_target = AddWordData(&method_literal_list_, dex_method_index); 1214 data_target->operands[1] = type; 1215 } 1216 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target); 1217 AppendLIR(load_pc_rel); 1218 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); 1219} 1220 1221void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 1222 // Use the literal pool and a PC-relative load from a data word. 1223 LIR* data_target = ScanLiteralPool(class_literal_list_, type_idx, 0); 1224 if (data_target == nullptr) { 1225 data_target = AddWordData(&class_literal_list_, type_idx); 1226 } 1227 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target); 1228 AppendLIR(load_pc_rel); 1229} 1230 1231std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() { 1232 // Default case is to do nothing. 1233 return nullptr; 1234} 1235 1236} // namespace art 1237