codegen_util.cc revision 147eb41b53729ec8d5c188d1cac90964a51afb8a
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_internals.h" 18#include "dex_file-inl.h" 19#include "gc_map.h" 20#include "gc_map_builder.h" 21#include "mapping_table.h" 22#include "mir_to_lir-inl.h" 23#include "dex/quick/dex_file_method_inliner.h" 24#include "dex/quick/dex_file_to_method_inliner_map.h" 25#include "dex/verification_results.h" 26#include "dex/verified_method.h" 27#include "verifier/dex_gc_map.h" 28#include "verifier/method_verifier.h" 29#include "vmap_table.h" 30 31namespace art { 32 33namespace { 34 35/* Dump a mapping table */ 36template <typename It> 37void DumpMappingTable(const char* table_name, const char* descriptor, const char* name, 38 const Signature& signature, uint32_t size, It first) { 39 if (size != 0) { 40 std::string line(StringPrintf("\n %s %s%s_%s_table[%u] = {", table_name, 41 descriptor, name, signature.ToString().c_str(), size)); 42 std::replace(line.begin(), line.end(), ';', '_'); 43 LOG(INFO) << line; 44 for (uint32_t i = 0; i != size; ++i) { 45 line = StringPrintf(" {0x%05x, 0x%04x},", first.NativePcOffset(), first.DexPc()); 46 ++first; 47 LOG(INFO) << line; 48 } 49 LOG(INFO) <<" };\n\n"; 50 } 51} 52 53} // anonymous namespace 54 55bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) { 56 bool res = false; 57 if (rl_src.is_const) { 58 if (rl_src.wide) { 59 if (rl_src.fp) { 60 res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src)); 61 } else { 62 res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src)); 63 } 64 } else { 65 if (rl_src.fp) { 66 res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src)); 67 } else { 68 res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src)); 69 } 70 } 71 } 72 return res; 73} 74 75void Mir2Lir::MarkSafepointPC(LIR* inst) { 76 DCHECK(!inst->flags.use_def_invalid); 77 inst->u.m.def_mask = &kEncodeAll; 78 LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC); 79 DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll)); 80} 81 82void Mir2Lir::MarkSafepointPCAfter(LIR* after) { 83 DCHECK(!after->flags.use_def_invalid); 84 after->u.m.def_mask = &kEncodeAll; 85 // As NewLIR0 uses Append, we need to create the LIR by hand. 86 LIR* safepoint_pc = RawLIR(current_dalvik_offset_, kPseudoSafepointPC); 87 if (after->next == nullptr) { 88 DCHECK_EQ(after, last_lir_insn_); 89 AppendLIR(safepoint_pc); 90 } else { 91 InsertLIRAfter(after, safepoint_pc); 92 } 93 DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll)); 94} 95 96/* Remove a LIR from the list. */ 97void Mir2Lir::UnlinkLIR(LIR* lir) { 98 if (UNLIKELY(lir == first_lir_insn_)) { 99 first_lir_insn_ = lir->next; 100 if (lir->next != NULL) { 101 lir->next->prev = NULL; 102 } else { 103 DCHECK(lir->next == NULL); 104 DCHECK(lir == last_lir_insn_); 105 last_lir_insn_ = NULL; 106 } 107 } else if (lir == last_lir_insn_) { 108 last_lir_insn_ = lir->prev; 109 lir->prev->next = NULL; 110 } else if ((lir->prev != NULL) && (lir->next != NULL)) { 111 lir->prev->next = lir->next; 112 lir->next->prev = lir->prev; 113 } 114} 115 116/* Convert an instruction to a NOP */ 117void Mir2Lir::NopLIR(LIR* lir) { 118 lir->flags.is_nop = true; 119 if (!cu_->verbose) { 120 UnlinkLIR(lir); 121 } 122} 123 124void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) { 125 DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); 126 DCHECK(!lir->flags.use_def_invalid); 127 // TODO: Avoid the extra Arena allocation! 128 const ResourceMask** mask_ptr; 129 ResourceMask mask; 130 if (is_load) { 131 mask_ptr = &lir->u.m.use_mask; 132 } else { 133 mask_ptr = &lir->u.m.def_mask; 134 } 135 mask = **mask_ptr; 136 /* Clear out the memref flags */ 137 mask.ClearBits(kEncodeMem); 138 /* ..and then add back the one we need */ 139 switch (mem_type) { 140 case ResourceMask::kLiteral: 141 DCHECK(is_load); 142 mask.SetBit(ResourceMask::kLiteral); 143 break; 144 case ResourceMask::kDalvikReg: 145 mask.SetBit(ResourceMask::kDalvikReg); 146 break; 147 case ResourceMask::kHeapRef: 148 mask.SetBit(ResourceMask::kHeapRef); 149 break; 150 case ResourceMask::kMustNotAlias: 151 /* Currently only loads can be marked as kMustNotAlias */ 152 DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE)); 153 mask.SetBit(ResourceMask::kMustNotAlias); 154 break; 155 default: 156 LOG(FATAL) << "Oat: invalid memref kind - " << mem_type; 157 } 158 *mask_ptr = mask_cache_.GetMask(mask); 159} 160 161/* 162 * Mark load/store instructions that access Dalvik registers through the stack. 163 */ 164void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, 165 bool is64bit) { 166 DCHECK((is_load ? lir->u.m.use_mask : lir->u.m.def_mask)->Intersection(kEncodeMem).Equals( 167 kEncodeDalvikReg)); 168 169 /* 170 * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit 171 * access. 172 */ 173 lir->flags.alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit); 174} 175 176/* 177 * Debugging macros 178 */ 179#define DUMP_RESOURCE_MASK(X) 180 181/* Pretty-print a LIR instruction */ 182void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { 183 int offset = lir->offset; 184 int dest = lir->operands[0]; 185 const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops)); 186 187 /* Handle pseudo-ops individually, and all regular insns as a group */ 188 switch (lir->opcode) { 189 case kPseudoMethodEntry: 190 LOG(INFO) << "-------- method entry " 191 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 192 break; 193 case kPseudoMethodExit: 194 LOG(INFO) << "-------- Method_Exit"; 195 break; 196 case kPseudoBarrier: 197 LOG(INFO) << "-------- BARRIER"; 198 break; 199 case kPseudoEntryBlock: 200 LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest; 201 break; 202 case kPseudoDalvikByteCodeBoundary: 203 if (lir->operands[0] == 0) { 204 // NOTE: only used for debug listings. 205 lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string")); 206 } 207 LOG(INFO) << "-------- dalvik offset: 0x" << std::hex 208 << lir->dalvik_offset << " @ " 209 << reinterpret_cast<char*>(UnwrapPointer(lir->operands[0])); 210 break; 211 case kPseudoExitBlock: 212 LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest; 213 break; 214 case kPseudoPseudoAlign4: 215 LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex 216 << offset << "): .align4"; 217 break; 218 case kPseudoEHBlockLabel: 219 LOG(INFO) << "Exception_Handling:"; 220 break; 221 case kPseudoTargetLabel: 222 case kPseudoNormalBlockLabel: 223 LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":"; 224 break; 225 case kPseudoThrowTarget: 226 LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":"; 227 break; 228 case kPseudoIntrinsicRetry: 229 LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":"; 230 break; 231 case kPseudoSuspendTarget: 232 LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":"; 233 break; 234 case kPseudoSafepointPC: 235 LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 236 break; 237 case kPseudoExportedPC: 238 LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 239 break; 240 case kPseudoCaseLabel: 241 LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x" 242 << std::hex << lir->operands[0] << "|" << std::dec << 243 lir->operands[0]; 244 break; 245 default: 246 if (lir->flags.is_nop && !dump_nop) { 247 break; 248 } else { 249 std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode), 250 lir, base_addr)); 251 std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode), 252 lir, base_addr)); 253 LOG(INFO) << StringPrintf("%5p: %-9s%s%s", 254 base_addr + offset, 255 op_name.c_str(), op_operands.c_str(), 256 lir->flags.is_nop ? "(nop)" : ""); 257 } 258 break; 259 } 260 261 if (lir->u.m.use_mask && (!lir->flags.is_nop || dump_nop)) { 262 DUMP_RESOURCE_MASK(DumpResourceMask(lir, *lir->u.m.use_mask, "use")); 263 } 264 if (lir->u.m.def_mask && (!lir->flags.is_nop || dump_nop)) { 265 DUMP_RESOURCE_MASK(DumpResourceMask(lir, *lir->u.m.def_mask, "def")); 266 } 267} 268 269void Mir2Lir::DumpPromotionMap() { 270 int num_regs = cu_->num_dalvik_registers + mir_graph_->GetNumUsedCompilerTemps(); 271 for (int i = 0; i < num_regs; i++) { 272 PromotionMap v_reg_map = promotion_map_[i]; 273 std::string buf; 274 if (v_reg_map.fp_location == kLocPhysReg) { 275 StringAppendF(&buf, " : s%d", RegStorage::RegNum(v_reg_map.fp_reg)); 276 } 277 278 std::string buf3; 279 if (i < cu_->num_dalvik_registers) { 280 StringAppendF(&buf3, "%02d", i); 281 } else if (i == mir_graph_->GetMethodSReg()) { 282 buf3 = "Method*"; 283 } else { 284 StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers); 285 } 286 287 LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(), 288 v_reg_map.core_location == kLocPhysReg ? 289 "r" : "SP+", v_reg_map.core_location == kLocPhysReg ? 290 v_reg_map.core_reg : SRegOffset(i), 291 buf.c_str()); 292 } 293} 294 295void Mir2Lir::UpdateLIROffsets() { 296 // Only used for code listings. 297 size_t offset = 0; 298 for (LIR* lir = first_lir_insn_; lir != nullptr; lir = lir->next) { 299 lir->offset = offset; 300 if (!lir->flags.is_nop && !IsPseudoLirOp(lir->opcode)) { 301 offset += GetInsnSize(lir); 302 } else if (lir->opcode == kPseudoPseudoAlign4) { 303 offset += (offset & 0x2); 304 } 305 } 306} 307 308/* Dump instructions and constant pool contents */ 309void Mir2Lir::CodegenDump() { 310 LOG(INFO) << "Dumping LIR insns for " 311 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 312 LIR* lir_insn; 313 int insns_size = cu_->code_item->insns_size_in_code_units_; 314 315 LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs; 316 LOG(INFO) << "Ins : " << cu_->num_ins; 317 LOG(INFO) << "Outs : " << cu_->num_outs; 318 LOG(INFO) << "CoreSpills : " << num_core_spills_; 319 LOG(INFO) << "FPSpills : " << num_fp_spills_; 320 LOG(INFO) << "CompilerTemps : " << mir_graph_->GetNumUsedCompilerTemps(); 321 LOG(INFO) << "Frame size : " << frame_size_; 322 LOG(INFO) << "code size is " << total_size_ << 323 " bytes, Dalvik size is " << insns_size * 2; 324 LOG(INFO) << "expansion factor: " 325 << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2); 326 DumpPromotionMap(); 327 UpdateLIROffsets(); 328 for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) { 329 DumpLIRInsn(lir_insn, 0); 330 } 331 for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) { 332 LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset, 333 lir_insn->operands[0]); 334 } 335 336 const DexFile::MethodId& method_id = 337 cu_->dex_file->GetMethodId(cu_->method_idx); 338 const Signature signature = cu_->dex_file->GetMethodSignature(method_id); 339 const char* name = cu_->dex_file->GetMethodName(method_id); 340 const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id)); 341 342 // Dump mapping tables 343 if (!encoded_mapping_table_.empty()) { 344 MappingTable table(&encoded_mapping_table_[0]); 345 DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, 346 table.PcToDexSize(), table.PcToDexBegin()); 347 DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, 348 table.DexToPcSize(), table.DexToPcBegin()); 349 } 350} 351 352/* 353 * Search the existing constants in the literal pool for an exact or close match 354 * within specified delta (greater or equal to 0). 355 */ 356LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) { 357 while (data_target) { 358 if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta) 359 return data_target; 360 data_target = data_target->next; 361 } 362 return NULL; 363} 364 365/* Search the existing constants in the literal pool for an exact wide match */ 366LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) { 367 bool lo_match = false; 368 LIR* lo_target = NULL; 369 while (data_target) { 370 if (lo_match && (data_target->operands[0] == val_hi)) { 371 // Record high word in case we need to expand this later. 372 lo_target->operands[1] = val_hi; 373 return lo_target; 374 } 375 lo_match = false; 376 if (data_target->operands[0] == val_lo) { 377 lo_match = true; 378 lo_target = data_target; 379 } 380 data_target = data_target->next; 381 } 382 return NULL; 383} 384 385/* Search the existing constants in the literal pool for an exact method match */ 386LIR* Mir2Lir::ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method) { 387 while (data_target) { 388 if (static_cast<uint32_t>(data_target->operands[0]) == method.dex_method_index && 389 UnwrapPointer(data_target->operands[1]) == method.dex_file) { 390 return data_target; 391 } 392 data_target = data_target->next; 393 } 394 return nullptr; 395} 396 397/* 398 * The following are building blocks to insert constants into the pool or 399 * instruction streams. 400 */ 401 402/* Add a 32-bit constant to the constant pool */ 403LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) { 404 /* Add the constant to the literal pool */ 405 if (constant_list_p) { 406 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData)); 407 new_value->operands[0] = value; 408 new_value->next = *constant_list_p; 409 *constant_list_p = new_value; 410 estimated_native_code_size_ += sizeof(value); 411 return new_value; 412 } 413 return NULL; 414} 415 416/* Add a 64-bit constant to the constant pool or mixed with code */ 417LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) { 418 AddWordData(constant_list_p, val_hi); 419 return AddWordData(constant_list_p, val_lo); 420} 421 422static void Push32(std::vector<uint8_t>&buf, int data) { 423 buf.push_back(data & 0xff); 424 buf.push_back((data >> 8) & 0xff); 425 buf.push_back((data >> 16) & 0xff); 426 buf.push_back((data >> 24) & 0xff); 427} 428 429// Push 8 bytes on 64-bit target systems; 4 on 32-bit target systems. 430static void PushPointer(std::vector<uint8_t>&buf, const void* pointer, bool target64) { 431 uint64_t data = reinterpret_cast<uintptr_t>(pointer); 432 if (target64) { 433 Push32(buf, data & 0xFFFFFFFF); 434 Push32(buf, (data >> 32) & 0xFFFFFFFF); 435 } else { 436 Push32(buf, static_cast<uint32_t>(data)); 437 } 438} 439 440static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) { 441 while (buf.size() < offset) { 442 buf.push_back(0); 443 } 444} 445 446/* Write the literal pool to the output stream */ 447void Mir2Lir::InstallLiteralPools() { 448 AlignBuffer(code_buffer_, data_offset_); 449 LIR* data_lir = literal_list_; 450 while (data_lir != NULL) { 451 Push32(code_buffer_, data_lir->operands[0]); 452 data_lir = NEXT_LIR(data_lir); 453 } 454 // Push code and method literals, record offsets for the compiler to patch. 455 data_lir = code_literal_list_; 456 while (data_lir != NULL) { 457 uint32_t target_method_idx = data_lir->operands[0]; 458 const DexFile* target_dex_file = 459 reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1])); 460 cu_->compiler_driver->AddCodePatch(cu_->dex_file, 461 cu_->class_def_idx, 462 cu_->method_idx, 463 cu_->invoke_type, 464 target_method_idx, 465 target_dex_file, 466 static_cast<InvokeType>(data_lir->operands[2]), 467 code_buffer_.size()); 468 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 469 // unique value based on target to ensure code deduplication works 470 PushPointer(code_buffer_, &target_method_id, cu_->target64); 471 data_lir = NEXT_LIR(data_lir); 472 } 473 data_lir = method_literal_list_; 474 while (data_lir != NULL) { 475 uint32_t target_method_idx = data_lir->operands[0]; 476 const DexFile* target_dex_file = 477 reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1])); 478 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, 479 cu_->class_def_idx, 480 cu_->method_idx, 481 cu_->invoke_type, 482 target_method_idx, 483 target_dex_file, 484 static_cast<InvokeType>(data_lir->operands[2]), 485 code_buffer_.size()); 486 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx); 487 // unique value based on target to ensure code deduplication works 488 PushPointer(code_buffer_, &target_method_id, cu_->target64); 489 data_lir = NEXT_LIR(data_lir); 490 } 491 // Push class literals. 492 data_lir = class_literal_list_; 493 while (data_lir != NULL) { 494 uint32_t target_method_idx = data_lir->operands[0]; 495 cu_->compiler_driver->AddClassPatch(cu_->dex_file, 496 cu_->class_def_idx, 497 cu_->method_idx, 498 target_method_idx, 499 code_buffer_.size()); 500 const DexFile::TypeId& target_method_id = cu_->dex_file->GetTypeId(target_method_idx); 501 // unique value based on target to ensure code deduplication works 502 PushPointer(code_buffer_, &target_method_id, cu_->target64); 503 data_lir = NEXT_LIR(data_lir); 504 } 505} 506 507/* Write the switch tables to the output stream */ 508void Mir2Lir::InstallSwitchTables() { 509 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 510 while (true) { 511 Mir2Lir::SwitchTable* tab_rec = iterator.Next(); 512 if (tab_rec == NULL) break; 513 AlignBuffer(code_buffer_, tab_rec->offset); 514 /* 515 * For Arm, our reference point is the address of the bx 516 * instruction that does the launch, so we have to subtract 517 * the auto pc-advance. For other targets the reference point 518 * is a label, so we can use the offset as-is. 519 */ 520 int bx_offset = INVALID_OFFSET; 521 switch (cu_->instruction_set) { 522 case kThumb2: 523 DCHECK(tab_rec->anchor->flags.fixup != kFixupNone); 524 bx_offset = tab_rec->anchor->offset + 4; 525 break; 526 case kX86: 527 case kX86_64: 528 bx_offset = 0; 529 break; 530 case kArm64: 531 case kMips: 532 bx_offset = tab_rec->anchor->offset; 533 break; 534 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 535 } 536 if (cu_->verbose) { 537 LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset; 538 } 539 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 540 const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2])); 541 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 542 int disp = tab_rec->targets[elems]->offset - bx_offset; 543 if (cu_->verbose) { 544 LOG(INFO) << " Case[" << elems << "] key: 0x" 545 << std::hex << keys[elems] << ", disp: 0x" 546 << std::hex << disp; 547 } 548 Push32(code_buffer_, keys[elems]); 549 Push32(code_buffer_, 550 tab_rec->targets[elems]->offset - bx_offset); 551 } 552 } else { 553 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 554 static_cast<int>(Instruction::kPackedSwitchSignature)); 555 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 556 int disp = tab_rec->targets[elems]->offset - bx_offset; 557 if (cu_->verbose) { 558 LOG(INFO) << " Case[" << elems << "] disp: 0x" 559 << std::hex << disp; 560 } 561 Push32(code_buffer_, tab_rec->targets[elems]->offset - bx_offset); 562 } 563 } 564 } 565} 566 567/* Write the fill array dta to the output stream */ 568void Mir2Lir::InstallFillArrayData() { 569 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 570 while (true) { 571 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 572 if (tab_rec == NULL) break; 573 AlignBuffer(code_buffer_, tab_rec->offset); 574 for (int i = 0; i < (tab_rec->size + 1) / 2; i++) { 575 code_buffer_.push_back(tab_rec->table[i] & 0xFF); 576 code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF); 577 } 578 } 579} 580 581static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) { 582 for (; lir != NULL; lir = lir->next) { 583 lir->offset = offset; 584 offset += 4; 585 } 586 return offset; 587} 588 589static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset, 590 unsigned int element_size) { 591 // Align to natural pointer size. 592 offset = RoundUp(offset, element_size); 593 for (; lir != NULL; lir = lir->next) { 594 lir->offset = offset; 595 offset += element_size; 596 } 597 return offset; 598} 599 600// Make sure we have a code address for every declared catch entry 601bool Mir2Lir::VerifyCatchEntries() { 602 MappingTable table(&encoded_mapping_table_[0]); 603 std::vector<uint32_t> dex_pcs; 604 dex_pcs.reserve(table.DexToPcSize()); 605 for (auto it = table.DexToPcBegin(), end = table.DexToPcEnd(); it != end; ++it) { 606 dex_pcs.push_back(it.DexPc()); 607 } 608 // Sort dex_pcs, so that we can quickly check it against the ordered mir_graph_->catches_. 609 std::sort(dex_pcs.begin(), dex_pcs.end()); 610 611 bool success = true; 612 auto it = dex_pcs.begin(), end = dex_pcs.end(); 613 for (uint32_t dex_pc : mir_graph_->catches_) { 614 while (it != end && *it < dex_pc) { 615 LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << *it; 616 ++it; 617 success = false; 618 } 619 if (it == end || *it > dex_pc) { 620 LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc; 621 success = false; 622 } else { 623 ++it; 624 } 625 } 626 if (!success) { 627 LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 628 LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: " 629 << table.DexToPcSize(); 630 } 631 return success; 632} 633 634 635void Mir2Lir::CreateMappingTables() { 636 uint32_t pc2dex_data_size = 0u; 637 uint32_t pc2dex_entries = 0u; 638 uint32_t pc2dex_offset = 0u; 639 uint32_t pc2dex_dalvik_offset = 0u; 640 uint32_t dex2pc_data_size = 0u; 641 uint32_t dex2pc_entries = 0u; 642 uint32_t dex2pc_offset = 0u; 643 uint32_t dex2pc_dalvik_offset = 0u; 644 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 645 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 646 pc2dex_entries += 1; 647 DCHECK(pc2dex_offset <= tgt_lir->offset); 648 pc2dex_data_size += UnsignedLeb128Size(tgt_lir->offset - pc2dex_offset); 649 pc2dex_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) - 650 static_cast<int32_t>(pc2dex_dalvik_offset)); 651 pc2dex_offset = tgt_lir->offset; 652 pc2dex_dalvik_offset = tgt_lir->dalvik_offset; 653 } 654 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 655 dex2pc_entries += 1; 656 DCHECK(dex2pc_offset <= tgt_lir->offset); 657 dex2pc_data_size += UnsignedLeb128Size(tgt_lir->offset - dex2pc_offset); 658 dex2pc_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) - 659 static_cast<int32_t>(dex2pc_dalvik_offset)); 660 dex2pc_offset = tgt_lir->offset; 661 dex2pc_dalvik_offset = tgt_lir->dalvik_offset; 662 } 663 } 664 665 uint32_t total_entries = pc2dex_entries + dex2pc_entries; 666 uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries); 667 uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size; 668 encoded_mapping_table_.resize(data_size); 669 uint8_t* write_pos = &encoded_mapping_table_[0]; 670 write_pos = EncodeUnsignedLeb128(write_pos, total_entries); 671 write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries); 672 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), hdr_data_size); 673 uint8_t* write_pos2 = write_pos + pc2dex_data_size; 674 675 pc2dex_offset = 0u; 676 pc2dex_dalvik_offset = 0u; 677 dex2pc_offset = 0u; 678 dex2pc_dalvik_offset = 0u; 679 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 680 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 681 DCHECK(pc2dex_offset <= tgt_lir->offset); 682 write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset); 683 write_pos = EncodeSignedLeb128(write_pos, static_cast<int32_t>(tgt_lir->dalvik_offset) - 684 static_cast<int32_t>(pc2dex_dalvik_offset)); 685 pc2dex_offset = tgt_lir->offset; 686 pc2dex_dalvik_offset = tgt_lir->dalvik_offset; 687 } 688 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 689 DCHECK(dex2pc_offset <= tgt_lir->offset); 690 write_pos2 = EncodeUnsignedLeb128(write_pos2, tgt_lir->offset - dex2pc_offset); 691 write_pos2 = EncodeSignedLeb128(write_pos2, static_cast<int32_t>(tgt_lir->dalvik_offset) - 692 static_cast<int32_t>(dex2pc_dalvik_offset)); 693 dex2pc_offset = tgt_lir->offset; 694 dex2pc_dalvik_offset = tgt_lir->dalvik_offset; 695 } 696 } 697 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), 698 hdr_data_size + pc2dex_data_size); 699 DCHECK_EQ(static_cast<size_t>(write_pos2 - &encoded_mapping_table_[0]), data_size); 700 701 if (kIsDebugBuild) { 702 CHECK(VerifyCatchEntries()); 703 704 // Verify the encoded table holds the expected data. 705 MappingTable table(&encoded_mapping_table_[0]); 706 CHECK_EQ(table.TotalSize(), total_entries); 707 CHECK_EQ(table.PcToDexSize(), pc2dex_entries); 708 auto it = table.PcToDexBegin(); 709 auto it2 = table.DexToPcBegin(); 710 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 711 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 712 CHECK_EQ(tgt_lir->offset, it.NativePcOffset()); 713 CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc()); 714 ++it; 715 } 716 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 717 CHECK_EQ(tgt_lir->offset, it2.NativePcOffset()); 718 CHECK_EQ(tgt_lir->dalvik_offset, it2.DexPc()); 719 ++it2; 720 } 721 } 722 CHECK(it == table.PcToDexEnd()); 723 CHECK(it2 == table.DexToPcEnd()); 724 } 725} 726 727void Mir2Lir::CreateNativeGcMap() { 728 DCHECK(!encoded_mapping_table_.empty()); 729 MappingTable mapping_table(&encoded_mapping_table_[0]); 730 uint32_t max_native_offset = 0; 731 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) { 732 uint32_t native_offset = it.NativePcOffset(); 733 if (native_offset > max_native_offset) { 734 max_native_offset = native_offset; 735 } 736 } 737 MethodReference method_ref(cu_->dex_file, cu_->method_idx); 738 const std::vector<uint8_t>& gc_map_raw = 739 mir_graph_->GetCurrentDexCompilationUnit()->GetVerifiedMethod()->GetDexGcMap(); 740 verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]); 741 DCHECK_EQ(gc_map_raw.size(), dex_gc_map.RawSize()); 742 // Compute native offset to references size. 743 GcMapBuilder native_gc_map_builder(&native_gc_map_, 744 mapping_table.PcToDexSize(), 745 max_native_offset, dex_gc_map.RegWidth()); 746 747 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) { 748 uint32_t native_offset = it.NativePcOffset(); 749 uint32_t dex_pc = it.DexPc(); 750 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); 751 CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc << 752 ": " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 753 native_gc_map_builder.AddEntry(native_offset, references); 754 } 755} 756 757/* Determine the offset of each literal field */ 758int Mir2Lir::AssignLiteralOffset(CodeOffset offset) { 759 offset = AssignLiteralOffsetCommon(literal_list_, offset); 760 unsigned int ptr_size = GetInstructionSetPointerSize(cu_->instruction_set); 761 offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset, ptr_size); 762 offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset, ptr_size); 763 offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset, ptr_size); 764 return offset; 765} 766 767int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) { 768 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 769 while (true) { 770 Mir2Lir::SwitchTable* tab_rec = iterator.Next(); 771 if (tab_rec == NULL) break; 772 tab_rec->offset = offset; 773 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 774 offset += tab_rec->table[1] * (sizeof(int) * 2); 775 } else { 776 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 777 static_cast<int>(Instruction::kPackedSwitchSignature)); 778 offset += tab_rec->table[1] * sizeof(int); 779 } 780 } 781 return offset; 782} 783 784int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) { 785 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 786 while (true) { 787 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 788 if (tab_rec == NULL) break; 789 tab_rec->offset = offset; 790 offset += tab_rec->size; 791 // word align 792 offset = RoundUp(offset, 4); 793 } 794 return offset; 795} 796 797/* 798 * Insert a kPseudoCaseLabel at the beginning of the Dalvik 799 * offset vaddr if pretty-printing, otherise use the standard block 800 * label. The selected label will be used to fix up the case 801 * branch table during the assembly phase. All resource flags 802 * are set to prevent code motion. KeyVal is just there for debugging. 803 */ 804LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) { 805 LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id]; 806 LIR* res = boundary_lir; 807 if (cu_->verbose) { 808 // Only pay the expense if we're pretty-printing. 809 LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR)); 810 new_label->dalvik_offset = vaddr; 811 new_label->opcode = kPseudoCaseLabel; 812 new_label->operands[0] = keyVal; 813 new_label->flags.fixup = kFixupLabel; 814 DCHECK(!new_label->flags.use_def_invalid); 815 new_label->u.m.def_mask = &kEncodeAll; 816 InsertLIRAfter(boundary_lir, new_label); 817 res = new_label; 818 } 819 return res; 820} 821 822void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) { 823 const uint16_t* table = tab_rec->table; 824 DexOffset base_vaddr = tab_rec->vaddr; 825 const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]); 826 int entries = table[1]; 827 int low_key = s4FromSwitchData(&table[2]); 828 for (int i = 0; i < entries; i++) { 829 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key); 830 } 831} 832 833void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) { 834 const uint16_t* table = tab_rec->table; 835 DexOffset base_vaddr = tab_rec->vaddr; 836 int entries = table[1]; 837 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 838 const int32_t* targets = &keys[entries]; 839 for (int i = 0; i < entries; i++) { 840 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]); 841 } 842} 843 844void Mir2Lir::ProcessSwitchTables() { 845 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 846 while (true) { 847 Mir2Lir::SwitchTable *tab_rec = iterator.Next(); 848 if (tab_rec == NULL) break; 849 if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) { 850 MarkPackedCaseLabels(tab_rec); 851 } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 852 MarkSparseCaseLabels(tab_rec); 853 } else { 854 LOG(FATAL) << "Invalid switch table"; 855 } 856 } 857} 858 859void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) { 860 /* 861 * Sparse switch data format: 862 * ushort ident = 0x0200 magic value 863 * ushort size number of entries in the table; > 0 864 * int keys[size] keys, sorted low-to-high; 32-bit aligned 865 * int targets[size] branch targets, relative to switch opcode 866 * 867 * Total size is (2+size*4) 16-bit code units. 868 */ 869 uint16_t ident = table[0]; 870 int entries = table[1]; 871 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 872 const int32_t* targets = &keys[entries]; 873 LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident 874 << ", entries: " << std::dec << entries; 875 for (int i = 0; i < entries; i++) { 876 LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i]; 877 } 878} 879 880void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) { 881 /* 882 * Packed switch data format: 883 * ushort ident = 0x0100 magic value 884 * ushort size number of entries in the table 885 * int first_key first (and lowest) switch case value 886 * int targets[size] branch targets, relative to switch opcode 887 * 888 * Total size is (4+size*2) 16-bit code units. 889 */ 890 uint16_t ident = table[0]; 891 const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]); 892 int entries = table[1]; 893 int low_key = s4FromSwitchData(&table[2]); 894 LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident 895 << ", entries: " << std::dec << entries << ", low_key: " << low_key; 896 for (int i = 0; i < entries; i++) { 897 LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex 898 << targets[i]; 899 } 900} 901 902/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */ 903void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) { 904 // NOTE: only used for debug listings. 905 NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str))); 906} 907 908bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) { 909 bool is_taken; 910 switch (opcode) { 911 case Instruction::IF_EQ: is_taken = (src1 == src2); break; 912 case Instruction::IF_NE: is_taken = (src1 != src2); break; 913 case Instruction::IF_LT: is_taken = (src1 < src2); break; 914 case Instruction::IF_GE: is_taken = (src1 >= src2); break; 915 case Instruction::IF_GT: is_taken = (src1 > src2); break; 916 case Instruction::IF_LE: is_taken = (src1 <= src2); break; 917 case Instruction::IF_EQZ: is_taken = (src1 == 0); break; 918 case Instruction::IF_NEZ: is_taken = (src1 != 0); break; 919 case Instruction::IF_LTZ: is_taken = (src1 < 0); break; 920 case Instruction::IF_GEZ: is_taken = (src1 >= 0); break; 921 case Instruction::IF_GTZ: is_taken = (src1 > 0); break; 922 case Instruction::IF_LEZ: is_taken = (src1 <= 0); break; 923 default: 924 LOG(FATAL) << "Unexpected opcode " << opcode; 925 is_taken = false; 926 } 927 return is_taken; 928} 929 930// Convert relation of src1/src2 to src2/src1 931ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) { 932 ConditionCode res; 933 switch (before) { 934 case kCondEq: res = kCondEq; break; 935 case kCondNe: res = kCondNe; break; 936 case kCondLt: res = kCondGt; break; 937 case kCondGt: res = kCondLt; break; 938 case kCondLe: res = kCondGe; break; 939 case kCondGe: res = kCondLe; break; 940 default: 941 res = static_cast<ConditionCode>(0); 942 LOG(FATAL) << "Unexpected ccode " << before; 943 } 944 return res; 945} 946 947ConditionCode Mir2Lir::NegateComparison(ConditionCode before) { 948 ConditionCode res; 949 switch (before) { 950 case kCondEq: res = kCondNe; break; 951 case kCondNe: res = kCondEq; break; 952 case kCondLt: res = kCondGe; break; 953 case kCondGt: res = kCondLe; break; 954 case kCondLe: res = kCondGt; break; 955 case kCondGe: res = kCondLt; break; 956 default: 957 res = static_cast<ConditionCode>(0); 958 LOG(FATAL) << "Unexpected ccode " << before; 959 } 960 return res; 961} 962 963// TODO: move to mir_to_lir.cc 964Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 965 : Backend(arena), 966 literal_list_(NULL), 967 method_literal_list_(NULL), 968 class_literal_list_(NULL), 969 code_literal_list_(NULL), 970 first_fixup_(NULL), 971 cu_(cu), 972 mir_graph_(mir_graph), 973 switch_tables_(arena, 4, kGrowableArraySwitchTables), 974 fill_array_data_(arena, 4, kGrowableArrayFillArrayData), 975 tempreg_info_(arena, 20, kGrowableArrayMisc), 976 reginfo_map_(arena, RegStorage::kMaxRegs, kGrowableArrayMisc), 977 pointer_storage_(arena, 128, kGrowableArrayMisc), 978 data_offset_(0), 979 total_size_(0), 980 block_label_list_(NULL), 981 promotion_map_(NULL), 982 current_dalvik_offset_(0), 983 estimated_native_code_size_(0), 984 reg_pool_(NULL), 985 live_sreg_(0), 986 num_core_spills_(0), 987 num_fp_spills_(0), 988 frame_size_(0), 989 core_spill_mask_(0), 990 fp_spill_mask_(0), 991 first_lir_insn_(NULL), 992 last_lir_insn_(NULL), 993 slow_paths_(arena, 32, kGrowableArraySlowPaths), 994 mem_ref_type_(ResourceMask::kHeapRef), 995 mask_cache_(arena) { 996 // Reserve pointer id 0 for NULL. 997 size_t null_idx = WrapPointer(NULL); 998 DCHECK_EQ(null_idx, 0U); 999} 1000 1001void Mir2Lir::Materialize() { 1002 cu_->NewTimingSplit("RegisterAllocation"); 1003 CompilerInitializeRegAlloc(); // Needs to happen after SSA naming 1004 1005 /* Allocate Registers using simple local allocation scheme */ 1006 SimpleRegAlloc(); 1007 1008 /* First try the custom light codegen for special cases. */ 1009 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); 1010 bool special_worked = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) 1011 ->GenSpecial(this, cu_->method_idx); 1012 1013 /* Take normal path for converting MIR to LIR only if the special codegen did not succeed. */ 1014 if (special_worked == false) { 1015 MethodMIR2LIR(); 1016 } 1017 1018 /* Method is not empty */ 1019 if (first_lir_insn_) { 1020 // mark the targets of switch statement case labels 1021 ProcessSwitchTables(); 1022 1023 /* Convert LIR into machine code. */ 1024 AssembleLIR(); 1025 1026 if ((cu_->enable_debug & (1 << kDebugCodegenDump)) != 0) { 1027 CodegenDump(); 1028 } 1029 } 1030} 1031 1032CompiledMethod* Mir2Lir::GetCompiledMethod() { 1033 // Combine vmap tables - core regs, then fp regs - into vmap_table. 1034 Leb128EncodingVector vmap_encoder; 1035 if (frame_size_ > 0) { 1036 // Prefix the encoded data with its size. 1037 size_t size = core_vmap_table_.size() + 1 /* marker */ + fp_vmap_table_.size(); 1038 vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128). 1039 vmap_encoder.PushBackUnsigned(size); 1040 // Core regs may have been inserted out of order - sort first. 1041 std::sort(core_vmap_table_.begin(), core_vmap_table_.end()); 1042 for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) { 1043 // Copy, stripping out the phys register sort key. 1044 vmap_encoder.PushBackUnsigned( 1045 ~(-1 << VREG_NUM_WIDTH) & (core_vmap_table_[i] + VmapTable::kEntryAdjustment)); 1046 } 1047 // Push a marker to take place of lr. 1048 vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker); 1049 if (cu_->instruction_set == kThumb2) { 1050 // fp regs already sorted. 1051 for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) { 1052 vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment); 1053 } 1054 } else { 1055 // For other platforms regs may have been inserted out of order - sort first. 1056 std::sort(fp_vmap_table_.begin(), fp_vmap_table_.end()); 1057 for (size_t i = 0 ; i < fp_vmap_table_.size(); ++i) { 1058 // Copy, stripping out the phys register sort key. 1059 vmap_encoder.PushBackUnsigned( 1060 ~(-1 << VREG_NUM_WIDTH) & (fp_vmap_table_[i] + VmapTable::kEntryAdjustment)); 1061 } 1062 } 1063 } else { 1064 DCHECK_EQ(POPCOUNT(core_spill_mask_), 0); 1065 DCHECK_EQ(POPCOUNT(fp_spill_mask_), 0); 1066 DCHECK_EQ(core_vmap_table_.size(), 0u); 1067 DCHECK_EQ(fp_vmap_table_.size(), 0u); 1068 vmap_encoder.PushBackUnsigned(0u); // Size is 0. 1069 } 1070 1071 std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnCallFrameInformation()); 1072 CompiledMethod* result = 1073 new CompiledMethod(cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_, 1074 core_spill_mask_, fp_spill_mask_, encoded_mapping_table_, 1075 vmap_encoder.GetData(), native_gc_map_, cfi_info.get()); 1076 return result; 1077} 1078 1079size_t Mir2Lir::GetMaxPossibleCompilerTemps() const { 1080 // Chose a reasonably small value in order to contain stack growth. 1081 // Backends that are smarter about spill region can return larger values. 1082 const size_t max_compiler_temps = 10; 1083 return max_compiler_temps; 1084} 1085 1086size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() { 1087 // By default assume that the Mir2Lir will need one slot for each temporary. 1088 // If the backend can better determine temps that have non-overlapping ranges and 1089 // temps that do not need spilled, it can actually provide a small region. 1090 return (mir_graph_->GetNumUsedCompilerTemps() * sizeof(uint32_t)); 1091} 1092 1093int Mir2Lir::ComputeFrameSize() { 1094 /* Figure out the frame size */ 1095 uint32_t size = num_core_spills_ * GetBytesPerGprSpillLocation(cu_->instruction_set) 1096 + num_fp_spills_ * GetBytesPerFprSpillLocation(cu_->instruction_set) 1097 + sizeof(uint32_t) // Filler. 1098 + (cu_->num_regs + cu_->num_outs) * sizeof(uint32_t) 1099 + GetNumBytesForCompilerTempSpillRegion(); 1100 /* Align and set */ 1101 return RoundUp(size, kStackAlignment); 1102} 1103 1104/* 1105 * Append an LIR instruction to the LIR list maintained by a compilation 1106 * unit 1107 */ 1108void Mir2Lir::AppendLIR(LIR* lir) { 1109 if (first_lir_insn_ == NULL) { 1110 DCHECK(last_lir_insn_ == NULL); 1111 last_lir_insn_ = first_lir_insn_ = lir; 1112 lir->prev = lir->next = NULL; 1113 } else { 1114 last_lir_insn_->next = lir; 1115 lir->prev = last_lir_insn_; 1116 lir->next = NULL; 1117 last_lir_insn_ = lir; 1118 } 1119} 1120 1121/* 1122 * Insert an LIR instruction before the current instruction, which cannot be the 1123 * first instruction. 1124 * 1125 * prev_lir <-> new_lir <-> current_lir 1126 */ 1127void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) { 1128 DCHECK(current_lir->prev != NULL); 1129 LIR *prev_lir = current_lir->prev; 1130 1131 prev_lir->next = new_lir; 1132 new_lir->prev = prev_lir; 1133 new_lir->next = current_lir; 1134 current_lir->prev = new_lir; 1135} 1136 1137/* 1138 * Insert an LIR instruction after the current instruction, which cannot be the 1139 * last instruction. 1140 * 1141 * current_lir -> new_lir -> old_next 1142 */ 1143void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) { 1144 new_lir->prev = current_lir; 1145 new_lir->next = current_lir->next; 1146 current_lir->next = new_lir; 1147 new_lir->next->prev = new_lir; 1148} 1149 1150bool Mir2Lir::IsPowerOfTwo(uint64_t x) { 1151 return (x & (x - 1)) == 0; 1152} 1153 1154// Returns the index of the lowest set bit in 'x'. 1155int32_t Mir2Lir::LowestSetBit(uint64_t x) { 1156 int bit_posn = 0; 1157 while ((x & 0xf) == 0) { 1158 bit_posn += 4; 1159 x >>= 4; 1160 } 1161 while ((x & 1) == 0) { 1162 bit_posn++; 1163 x >>= 1; 1164 } 1165 return bit_posn; 1166} 1167 1168bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) { 1169 DCHECK(rl_src.wide); 1170 DCHECK(rl_dest.wide); 1171 return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1); 1172} 1173 1174LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 1175 int offset, int check_value, LIR* target, LIR** compare) { 1176 // Handle this for architectures that can't compare to memory. 1177 LIR* inst = Load32Disp(base_reg, offset, temp_reg); 1178 if (compare != nullptr) { 1179 *compare = inst; 1180 } 1181 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); 1182 return branch; 1183} 1184 1185void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) { 1186 slow_paths_.Insert(slowpath); 1187} 1188 1189void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type, 1190 SpecialTargetRegister symbolic_reg) { 1191 LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method); 1192 if (data_target == NULL) { 1193 data_target = AddWordData(&code_literal_list_, target_method.dex_method_index); 1194 data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file)); 1195 // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have 1196 // the same method invoked with kVirtual, kSuper and kInterface but the class linker will 1197 // resolve these invokes to the same method, so we don't care which one we record here. 1198 data_target->operands[2] = type; 1199 } 1200 // Loads a code pointer. Code from oat file can be mapped anywhere. 1201 LIR* load_pc_rel = OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target); 1202 AppendLIR(load_pc_rel); 1203 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); 1204} 1205 1206void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type, 1207 SpecialTargetRegister symbolic_reg) { 1208 LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method); 1209 if (data_target == NULL) { 1210 data_target = AddWordData(&method_literal_list_, target_method.dex_method_index); 1211 data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file)); 1212 // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have 1213 // the same method invoked with kVirtual, kSuper and kInterface but the class linker will 1214 // resolve these invokes to the same method, so we don't care which one we record here. 1215 data_target->operands[2] = type; 1216 } 1217 // Loads an ArtMethod pointer, which is a reference as it lives in the heap. 1218 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target); 1219 AppendLIR(load_pc_rel); 1220 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); 1221} 1222 1223void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) { 1224 // Use the literal pool and a PC-relative load from a data word. 1225 LIR* data_target = ScanLiteralPool(class_literal_list_, type_idx, 0); 1226 if (data_target == nullptr) { 1227 data_target = AddWordData(&class_literal_list_, type_idx); 1228 } 1229 // Loads a Class pointer, which is a reference as it lives in the heap. 1230 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target); 1231 AppendLIR(load_pc_rel); 1232} 1233 1234std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() { 1235 // Default case is to do nothing. 1236 return nullptr; 1237} 1238 1239RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) { 1240 if (loc.location == kLocPhysReg) { 1241 DCHECK(!loc.reg.Is32Bit()); 1242 if (loc.reg.IsPair()) { 1243 RegisterInfo* info_lo = GetRegInfo(loc.reg.GetLow()); 1244 RegisterInfo* info_hi = GetRegInfo(loc.reg.GetHigh()); 1245 info_lo->SetIsWide(false); 1246 info_hi->SetIsWide(false); 1247 loc.reg = info_lo->GetReg(); 1248 } else { 1249 RegisterInfo* info = GetRegInfo(loc.reg); 1250 RegisterInfo* info_new = info->FindMatchingView(RegisterInfo::k32SoloStorageMask); 1251 DCHECK(info_new != nullptr); 1252 if (info->IsLive() && (info->SReg() == loc.s_reg_low)) { 1253 info->MarkDead(); 1254 info_new->MarkLive(loc.s_reg_low); 1255 } 1256 loc.reg = info_new->GetReg(); 1257 } 1258 DCHECK(loc.reg.Valid()); 1259 } 1260 loc.wide = false; 1261 return loc; 1262} 1263 1264void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) { 1265 LOG(FATAL) << "Unknown MIR opcode not supported on this architecture"; 1266} 1267 1268} // namespace art 1269