codegen_util.cc revision bd663de599b16229085759366c56e2ed5a1dc7ec
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "dex/compiler_internals.h" 18#include "dex_file-inl.h" 19#include "gc_map.h" 20#include "mapping_table.h" 21#include "mir_to_lir-inl.h" 22#include "verifier/dex_gc_map.h" 23#include "verifier/method_verifier.h" 24 25namespace art { 26 27bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) { 28 bool res = false; 29 if (rl_src.is_const) { 30 if (rl_src.wide) { 31 if (rl_src.fp) { 32 res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src)); 33 } else { 34 res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src)); 35 } 36 } else { 37 if (rl_src.fp) { 38 res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src)); 39 } else { 40 res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src)); 41 } 42 } 43 } 44 return res; 45} 46 47void Mir2Lir::MarkSafepointPC(LIR* inst) { 48 inst->def_mask = ENCODE_ALL; 49 LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC); 50 DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL); 51} 52 53bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) { 54 return cu_->compiler_driver->ComputeInstanceFieldInfo( 55 field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile); 56} 57 58/* Remove a LIR from the list. */ 59void Mir2Lir::UnlinkLIR(LIR* lir) { 60 if (UNLIKELY(lir == first_lir_insn_)) { 61 first_lir_insn_ = lir->next; 62 if (lir->next != NULL) { 63 lir->next->prev = NULL; 64 } else { 65 DCHECK(lir->next == NULL); 66 DCHECK(lir == last_lir_insn_); 67 last_lir_insn_ = NULL; 68 } 69 } else if (lir == last_lir_insn_) { 70 last_lir_insn_ = lir->prev; 71 lir->prev->next = NULL; 72 } else if ((lir->prev != NULL) && (lir->next != NULL)) { 73 lir->prev->next = lir->next; 74 lir->next->prev = lir->prev; 75 } 76} 77 78/* Convert an instruction to a NOP */ 79void Mir2Lir::NopLIR(LIR* lir) { 80 lir->flags.is_nop = true; 81 if (!cu_->verbose) { 82 UnlinkLIR(lir); 83 } 84} 85 86void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) { 87 uint64_t *mask_ptr; 88 uint64_t mask = ENCODE_MEM; 89 DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE)); 90 if (is_load) { 91 mask_ptr = &lir->use_mask; 92 } else { 93 mask_ptr = &lir->def_mask; 94 } 95 /* Clear out the memref flags */ 96 *mask_ptr &= ~mask; 97 /* ..and then add back the one we need */ 98 switch (mem_type) { 99 case kLiteral: 100 DCHECK(is_load); 101 *mask_ptr |= ENCODE_LITERAL; 102 break; 103 case kDalvikReg: 104 *mask_ptr |= ENCODE_DALVIK_REG; 105 break; 106 case kHeapRef: 107 *mask_ptr |= ENCODE_HEAP_REF; 108 break; 109 case kMustNotAlias: 110 /* Currently only loads can be marked as kMustNotAlias */ 111 DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE)); 112 *mask_ptr |= ENCODE_MUST_NOT_ALIAS; 113 break; 114 default: 115 LOG(FATAL) << "Oat: invalid memref kind - " << mem_type; 116 } 117} 118 119/* 120 * Mark load/store instructions that access Dalvik registers through the stack. 121 */ 122void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, 123 bool is64bit) { 124 SetMemRefType(lir, is_load, kDalvikReg); 125 126 /* 127 * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit 128 * access. 129 */ 130 lir->alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit); 131} 132 133/* 134 * Debugging macros 135 */ 136#define DUMP_RESOURCE_MASK(X) 137 138/* Pretty-print a LIR instruction */ 139void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) { 140 int offset = lir->offset; 141 int dest = lir->operands[0]; 142 const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops)); 143 144 /* Handle pseudo-ops individually, and all regular insns as a group */ 145 switch (lir->opcode) { 146 case kPseudoMethodEntry: 147 LOG(INFO) << "-------- method entry " 148 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 149 break; 150 case kPseudoMethodExit: 151 LOG(INFO) << "-------- Method_Exit"; 152 break; 153 case kPseudoBarrier: 154 LOG(INFO) << "-------- BARRIER"; 155 break; 156 case kPseudoEntryBlock: 157 LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest; 158 break; 159 case kPseudoDalvikByteCodeBoundary: 160 if (lir->operands[0] == 0) { 161 lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string"); 162 } 163 LOG(INFO) << "-------- dalvik offset: 0x" << std::hex 164 << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]); 165 break; 166 case kPseudoExitBlock: 167 LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest; 168 break; 169 case kPseudoPseudoAlign4: 170 LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex 171 << offset << "): .align4"; 172 break; 173 case kPseudoEHBlockLabel: 174 LOG(INFO) << "Exception_Handling:"; 175 break; 176 case kPseudoTargetLabel: 177 case kPseudoNormalBlockLabel: 178 LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":"; 179 break; 180 case kPseudoThrowTarget: 181 LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":"; 182 break; 183 case kPseudoIntrinsicRetry: 184 LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":"; 185 break; 186 case kPseudoSuspendTarget: 187 LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":"; 188 break; 189 case kPseudoSafepointPC: 190 LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 191 break; 192 case kPseudoExportedPC: 193 LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":"; 194 break; 195 case kPseudoCaseLabel: 196 LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x" 197 << std::hex << lir->operands[0] << "|" << std::dec << 198 lir->operands[0]; 199 break; 200 default: 201 if (lir->flags.is_nop && !dump_nop) { 202 break; 203 } else { 204 std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode), 205 lir, base_addr)); 206 std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode), 207 lir, base_addr)); 208 LOG(INFO) << StringPrintf("%05x: %-9s%s%s", 209 reinterpret_cast<unsigned int>(base_addr + offset), 210 op_name.c_str(), op_operands.c_str(), 211 lir->flags.is_nop ? "(nop)" : ""); 212 } 213 break; 214 } 215 216 if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) { 217 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->use_mask, "use")); 218 } 219 if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) { 220 DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->def_mask, "def")); 221 } 222} 223 224void Mir2Lir::DumpPromotionMap() { 225 int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1; 226 for (int i = 0; i < num_regs; i++) { 227 PromotionMap v_reg_map = promotion_map_[i]; 228 std::string buf; 229 if (v_reg_map.fp_location == kLocPhysReg) { 230 StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask()); 231 } 232 233 std::string buf3; 234 if (i < cu_->num_dalvik_registers) { 235 StringAppendF(&buf3, "%02d", i); 236 } else if (i == mir_graph_->GetMethodSReg()) { 237 buf3 = "Method*"; 238 } else { 239 StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers); 240 } 241 242 LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(), 243 v_reg_map.core_location == kLocPhysReg ? 244 "r" : "SP+", v_reg_map.core_location == kLocPhysReg ? 245 v_reg_map.core_reg : SRegOffset(i), 246 buf.c_str()); 247 } 248} 249 250/* Dump a mapping table */ 251void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descriptor, 252 const std::string& name, const std::string& signature, 253 const std::vector<uint32_t>& v) { 254 if (v.size() > 0) { 255 std::string line(StringPrintf("\n %s %s%s_%s_table[%zu] = {", table_name, 256 descriptor.c_str(), name.c_str(), signature.c_str(), v.size())); 257 std::replace(line.begin(), line.end(), ';', '_'); 258 LOG(INFO) << line; 259 for (uint32_t i = 0; i < v.size(); i+=2) { 260 line = StringPrintf(" {0x%05x, 0x%04x},", v[i], v[i+1]); 261 LOG(INFO) << line; 262 } 263 LOG(INFO) <<" };\n\n"; 264 } 265} 266 267/* Dump instructions and constant pool contents */ 268void Mir2Lir::CodegenDump() { 269 LOG(INFO) << "Dumping LIR insns for " 270 << PrettyMethod(cu_->method_idx, *cu_->dex_file); 271 LIR* lir_insn; 272 int insns_size = cu_->code_item->insns_size_in_code_units_; 273 274 LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs; 275 LOG(INFO) << "Ins : " << cu_->num_ins; 276 LOG(INFO) << "Outs : " << cu_->num_outs; 277 LOG(INFO) << "CoreSpills : " << num_core_spills_; 278 LOG(INFO) << "FPSpills : " << num_fp_spills_; 279 LOG(INFO) << "CompilerTemps : " << cu_->num_compiler_temps; 280 LOG(INFO) << "Frame size : " << frame_size_; 281 LOG(INFO) << "code size is " << total_size_ << 282 " bytes, Dalvik size is " << insns_size * 2; 283 LOG(INFO) << "expansion factor: " 284 << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2); 285 DumpPromotionMap(); 286 for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) { 287 DumpLIRInsn(lir_insn, 0); 288 } 289 for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) { 290 LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset, 291 lir_insn->operands[0]); 292 } 293 294 const DexFile::MethodId& method_id = 295 cu_->dex_file->GetMethodId(cu_->method_idx); 296 std::string signature(cu_->dex_file->GetMethodSignature(method_id)); 297 std::string name(cu_->dex_file->GetMethodName(method_id)); 298 std::string descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id)); 299 300 // Dump mapping tables 301 DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_); 302 DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_); 303} 304 305/* 306 * Search the existing constants in the literal pool for an exact or close match 307 * within specified delta (greater or equal to 0). 308 */ 309LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) { 310 while (data_target) { 311 if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta) 312 return data_target; 313 data_target = data_target->next; 314 } 315 return NULL; 316} 317 318/* Search the existing constants in the literal pool for an exact wide match */ 319LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) { 320 bool lo_match = false; 321 LIR* lo_target = NULL; 322 while (data_target) { 323 if (lo_match && (data_target->operands[0] == val_hi)) { 324 // Record high word in case we need to expand this later. 325 lo_target->operands[1] = val_hi; 326 return lo_target; 327 } 328 lo_match = false; 329 if (data_target->operands[0] == val_lo) { 330 lo_match = true; 331 lo_target = data_target; 332 } 333 data_target = data_target->next; 334 } 335 return NULL; 336} 337 338/* 339 * The following are building blocks to insert constants into the pool or 340 * instruction streams. 341 */ 342 343/* Add a 32-bit constant to the constant pool */ 344LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) { 345 /* Add the constant to the literal pool */ 346 if (constant_list_p) { 347 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocData)); 348 new_value->operands[0] = value; 349 new_value->next = *constant_list_p; 350 *constant_list_p = new_value; 351 return new_value; 352 } 353 return NULL; 354} 355 356/* Add a 64-bit constant to the constant pool or mixed with code */ 357LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) { 358 AddWordData(constant_list_p, val_hi); 359 return AddWordData(constant_list_p, val_lo); 360} 361 362static void PushWord(std::vector<uint8_t>&buf, int data) { 363 buf.push_back(data & 0xff); 364 buf.push_back((data >> 8) & 0xff); 365 buf.push_back((data >> 16) & 0xff); 366 buf.push_back((data >> 24) & 0xff); 367} 368 369static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) { 370 while (buf.size() < offset) { 371 buf.push_back(0); 372 } 373} 374 375/* Write the literal pool to the output stream */ 376void Mir2Lir::InstallLiteralPools() { 377 AlignBuffer(code_buffer_, data_offset_); 378 LIR* data_lir = literal_list_; 379 while (data_lir != NULL) { 380 PushWord(code_buffer_, data_lir->operands[0]); 381 data_lir = NEXT_LIR(data_lir); 382 } 383 // Push code and method literals, record offsets for the compiler to patch. 384 data_lir = code_literal_list_; 385 while (data_lir != NULL) { 386 uint32_t target = data_lir->operands[0]; 387 cu_->compiler_driver->AddCodePatch(cu_->dex_file, 388 cu_->method_idx, 389 cu_->invoke_type, 390 target, 391 static_cast<InvokeType>(data_lir->operands[1]), 392 code_buffer_.size()); 393 const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target); 394 // unique based on target to ensure code deduplication works 395 uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id); 396 PushWord(code_buffer_, unique_patch_value); 397 data_lir = NEXT_LIR(data_lir); 398 } 399 data_lir = method_literal_list_; 400 while (data_lir != NULL) { 401 uint32_t target = data_lir->operands[0]; 402 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, 403 cu_->method_idx, 404 cu_->invoke_type, 405 target, 406 static_cast<InvokeType>(data_lir->operands[1]), 407 code_buffer_.size()); 408 const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target); 409 // unique based on target to ensure code deduplication works 410 uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id); 411 PushWord(code_buffer_, unique_patch_value); 412 data_lir = NEXT_LIR(data_lir); 413 } 414} 415 416/* Write the switch tables to the output stream */ 417void Mir2Lir::InstallSwitchTables() { 418 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 419 while (true) { 420 Mir2Lir::SwitchTable* tab_rec = iterator.Next(); 421 if (tab_rec == NULL) break; 422 AlignBuffer(code_buffer_, tab_rec->offset); 423 /* 424 * For Arm, our reference point is the address of the bx 425 * instruction that does the launch, so we have to subtract 426 * the auto pc-advance. For other targets the reference point 427 * is a label, so we can use the offset as-is. 428 */ 429 int bx_offset = INVALID_OFFSET; 430 switch (cu_->instruction_set) { 431 case kThumb2: 432 bx_offset = tab_rec->anchor->offset + 4; 433 break; 434 case kX86: 435 bx_offset = 0; 436 break; 437 case kMips: 438 bx_offset = tab_rec->anchor->offset; 439 break; 440 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 441 } 442 if (cu_->verbose) { 443 LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset; 444 } 445 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 446 const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2])); 447 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 448 int disp = tab_rec->targets[elems]->offset - bx_offset; 449 if (cu_->verbose) { 450 LOG(INFO) << " Case[" << elems << "] key: 0x" 451 << std::hex << keys[elems] << ", disp: 0x" 452 << std::hex << disp; 453 } 454 PushWord(code_buffer_, keys[elems]); 455 PushWord(code_buffer_, 456 tab_rec->targets[elems]->offset - bx_offset); 457 } 458 } else { 459 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 460 static_cast<int>(Instruction::kPackedSwitchSignature)); 461 for (int elems = 0; elems < tab_rec->table[1]; elems++) { 462 int disp = tab_rec->targets[elems]->offset - bx_offset; 463 if (cu_->verbose) { 464 LOG(INFO) << " Case[" << elems << "] disp: 0x" 465 << std::hex << disp; 466 } 467 PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset); 468 } 469 } 470 } 471} 472 473/* Write the fill array dta to the output stream */ 474void Mir2Lir::InstallFillArrayData() { 475 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 476 while (true) { 477 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 478 if (tab_rec == NULL) break; 479 AlignBuffer(code_buffer_, tab_rec->offset); 480 for (int i = 0; i < (tab_rec->size + 1) / 2; i++) { 481 code_buffer_.push_back(tab_rec->table[i] & 0xFF); 482 code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF); 483 } 484 } 485} 486 487static int AssignLiteralOffsetCommon(LIR* lir, int offset) { 488 for (; lir != NULL; lir = lir->next) { 489 lir->offset = offset; 490 offset += 4; 491 } 492 return offset; 493} 494 495// Make sure we have a code address for every declared catch entry 496bool Mir2Lir::VerifyCatchEntries() { 497 bool success = true; 498 for (std::set<uint32_t>::const_iterator it = mir_graph_->catches_.begin(); 499 it != mir_graph_->catches_.end(); ++it) { 500 uint32_t dex_pc = *it; 501 bool found = false; 502 for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) { 503 if (dex_pc == dex2pc_mapping_table_[i+1]) { 504 found = true; 505 break; 506 } 507 } 508 if (!found) { 509 LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc; 510 success = false; 511 } 512 } 513 // Now, try in the other direction 514 for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) { 515 uint32_t dex_pc = dex2pc_mapping_table_[i+1]; 516 if (mir_graph_->catches_.find(dex_pc) == mir_graph_->catches_.end()) { 517 LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc; 518 success = false; 519 } 520 } 521 if (!success) { 522 LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 523 LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: " 524 << dex2pc_mapping_table_.size()/2; 525 } 526 return success; 527} 528 529 530void Mir2Lir::CreateMappingTables() { 531 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) { 532 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) { 533 pc2dex_mapping_table_.push_back(tgt_lir->offset); 534 pc2dex_mapping_table_.push_back(tgt_lir->dalvik_offset); 535 } 536 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) { 537 dex2pc_mapping_table_.push_back(tgt_lir->offset); 538 dex2pc_mapping_table_.push_back(tgt_lir->dalvik_offset); 539 } 540 } 541 if (kIsDebugBuild) { 542 CHECK(VerifyCatchEntries()); 543 } 544 CHECK_EQ(pc2dex_mapping_table_.size() & 1, 0U); 545 CHECK_EQ(dex2pc_mapping_table_.size() & 1, 0U); 546 uint32_t total_entries = (pc2dex_mapping_table_.size() + dex2pc_mapping_table_.size()) / 2; 547 uint32_t pc2dex_entries = pc2dex_mapping_table_.size() / 2; 548 encoded_mapping_table_.PushBack(total_entries); 549 encoded_mapping_table_.PushBack(pc2dex_entries); 550 encoded_mapping_table_.InsertBack(pc2dex_mapping_table_.begin(), pc2dex_mapping_table_.end()); 551 encoded_mapping_table_.InsertBack(dex2pc_mapping_table_.begin(), dex2pc_mapping_table_.end()); 552 if (kIsDebugBuild) { 553 // Verify the encoded table holds the expected data. 554 MappingTable table(&encoded_mapping_table_.GetData()[0]); 555 CHECK_EQ(table.TotalSize(), total_entries); 556 CHECK_EQ(table.PcToDexSize(), pc2dex_entries); 557 CHECK_EQ(table.DexToPcSize(), dex2pc_mapping_table_.size() / 2); 558 MappingTable::PcToDexIterator it = table.PcToDexBegin(); 559 for (uint32_t i = 0; i < pc2dex_mapping_table_.size(); ++i, ++it) { 560 CHECK_EQ(pc2dex_mapping_table_.at(i), it.NativePcOffset()); 561 ++i; 562 CHECK_EQ(pc2dex_mapping_table_.at(i), it.DexPc()); 563 } 564 MappingTable::DexToPcIterator it2 = table.DexToPcBegin(); 565 for (uint32_t i = 0; i < dex2pc_mapping_table_.size(); ++i, ++it2) { 566 CHECK_EQ(dex2pc_mapping_table_.at(i), it2.NativePcOffset()); 567 ++i; 568 CHECK_EQ(dex2pc_mapping_table_.at(i), it2.DexPc()); 569 } 570 } 571} 572 573class NativePcToReferenceMapBuilder { 574 public: 575 NativePcToReferenceMapBuilder(std::vector<uint8_t>* table, 576 size_t entries, uint32_t max_native_offset, 577 size_t references_width) : entries_(entries), 578 references_width_(references_width), in_use_(entries), 579 table_(table) { 580 // Compute width in bytes needed to hold max_native_offset. 581 native_offset_width_ = 0; 582 while (max_native_offset != 0) { 583 native_offset_width_++; 584 max_native_offset >>= 8; 585 } 586 // Resize table and set up header. 587 table->resize((EntryWidth() * entries) + sizeof(uint32_t)); 588 CHECK_LT(native_offset_width_, 1U << 3); 589 (*table)[0] = native_offset_width_ & 7; 590 CHECK_LT(references_width_, 1U << 13); 591 (*table)[0] |= (references_width_ << 3) & 0xFF; 592 (*table)[1] = (references_width_ >> 5) & 0xFF; 593 CHECK_LT(entries, 1U << 16); 594 (*table)[2] = entries & 0xFF; 595 (*table)[3] = (entries >> 8) & 0xFF; 596 } 597 598 void AddEntry(uint32_t native_offset, const uint8_t* references) { 599 size_t table_index = TableIndex(native_offset); 600 while (in_use_[table_index]) { 601 table_index = (table_index + 1) % entries_; 602 } 603 in_use_[table_index] = true; 604 SetNativeOffset(table_index, native_offset); 605 DCHECK_EQ(native_offset, GetNativeOffset(table_index)); 606 SetReferences(table_index, references); 607 } 608 609 private: 610 size_t TableIndex(uint32_t native_offset) { 611 return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_; 612 } 613 614 uint32_t GetNativeOffset(size_t table_index) { 615 uint32_t native_offset = 0; 616 size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); 617 for (size_t i = 0; i < native_offset_width_; i++) { 618 native_offset |= (*table_)[table_offset + i] << (i * 8); 619 } 620 return native_offset; 621 } 622 623 void SetNativeOffset(size_t table_index, uint32_t native_offset) { 624 size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); 625 for (size_t i = 0; i < native_offset_width_; i++) { 626 (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF; 627 } 628 } 629 630 void SetReferences(size_t table_index, const uint8_t* references) { 631 size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t); 632 memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_); 633 } 634 635 size_t EntryWidth() const { 636 return native_offset_width_ + references_width_; 637 } 638 639 // Number of entries in the table. 640 const size_t entries_; 641 // Number of bytes used to encode the reference bitmap. 642 const size_t references_width_; 643 // Number of bytes used to encode a native offset. 644 size_t native_offset_width_; 645 // Entries that are in use. 646 std::vector<bool> in_use_; 647 // The table we're building. 648 std::vector<uint8_t>* const table_; 649}; 650 651void Mir2Lir::CreateNativeGcMap() { 652 const std::vector<uint32_t>& mapping_table = pc2dex_mapping_table_; 653 uint32_t max_native_offset = 0; 654 for (size_t i = 0; i < mapping_table.size(); i += 2) { 655 uint32_t native_offset = mapping_table[i + 0]; 656 if (native_offset > max_native_offset) { 657 max_native_offset = native_offset; 658 } 659 } 660 MethodReference method_ref(cu_->dex_file, cu_->method_idx); 661 const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref); 662 verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4); 663 // Compute native offset to references size. 664 NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_, 665 mapping_table.size() / 2, max_native_offset, 666 dex_gc_map.RegWidth()); 667 668 for (size_t i = 0; i < mapping_table.size(); i += 2) { 669 uint32_t native_offset = mapping_table[i + 0]; 670 uint32_t dex_pc = mapping_table[i + 1]; 671 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); 672 CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc; 673 native_gc_map_builder.AddEntry(native_offset, references); 674 } 675} 676 677/* Determine the offset of each literal field */ 678int Mir2Lir::AssignLiteralOffset(int offset) { 679 offset = AssignLiteralOffsetCommon(literal_list_, offset); 680 offset = AssignLiteralOffsetCommon(code_literal_list_, offset); 681 offset = AssignLiteralOffsetCommon(method_literal_list_, offset); 682 return offset; 683} 684 685int Mir2Lir::AssignSwitchTablesOffset(int offset) { 686 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 687 while (true) { 688 Mir2Lir::SwitchTable *tab_rec = iterator.Next(); 689 if (tab_rec == NULL) break; 690 tab_rec->offset = offset; 691 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 692 offset += tab_rec->table[1] * (sizeof(int) * 2); 693 } else { 694 DCHECK_EQ(static_cast<int>(tab_rec->table[0]), 695 static_cast<int>(Instruction::kPackedSwitchSignature)); 696 offset += tab_rec->table[1] * sizeof(int); 697 } 698 } 699 return offset; 700} 701 702int Mir2Lir::AssignFillArrayDataOffset(int offset) { 703 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_); 704 while (true) { 705 Mir2Lir::FillArrayData *tab_rec = iterator.Next(); 706 if (tab_rec == NULL) break; 707 tab_rec->offset = offset; 708 offset += tab_rec->size; 709 // word align 710 offset = (offset + 3) & ~3; 711 } 712 return offset; 713} 714 715// LIR offset assignment. 716int Mir2Lir::AssignInsnOffsets() { 717 LIR* lir; 718 int offset = 0; 719 720 for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { 721 lir->offset = offset; 722 if (LIKELY(lir->opcode >= 0)) { 723 if (!lir->flags.is_nop) { 724 offset += lir->flags.size; 725 } 726 } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) { 727 if (offset & 0x2) { 728 offset += 2; 729 lir->operands[0] = 1; 730 } else { 731 lir->operands[0] = 0; 732 } 733 } 734 /* Pseudo opcodes don't consume space */ 735 } 736 return offset; 737} 738 739/* 740 * Walk the compilation unit and assign offsets to instructions 741 * and literals and compute the total size of the compiled unit. 742 */ 743void Mir2Lir::AssignOffsets() { 744 int offset = AssignInsnOffsets(); 745 746 /* Const values have to be word aligned */ 747 offset = (offset + 3) & ~3; 748 749 /* Set up offsets for literals */ 750 data_offset_ = offset; 751 752 offset = AssignLiteralOffset(offset); 753 754 offset = AssignSwitchTablesOffset(offset); 755 756 offset = AssignFillArrayDataOffset(offset); 757 758 total_size_ = offset; 759} 760 761/* 762 * Go over each instruction in the list and calculate the offset from the top 763 * before sending them off to the assembler. If out-of-range branch distance is 764 * seen rearrange the instructions a bit to correct it. 765 */ 766void Mir2Lir::AssembleLIR() { 767 AssignOffsets(); 768 int assembler_retries = 0; 769 /* 770 * Assemble here. Note that we generate code with optimistic assumptions 771 * and if found now to work, we'll have to redo the sequence and retry. 772 */ 773 774 while (true) { 775 AssemblerStatus res = AssembleInstructions(0); 776 if (res == kSuccess) { 777 break; 778 } else { 779 assembler_retries++; 780 if (assembler_retries > MAX_ASSEMBLER_RETRIES) { 781 CodegenDump(); 782 LOG(FATAL) << "Assembler error - too many retries"; 783 } 784 // Redo offsets and try again 785 AssignOffsets(); 786 code_buffer_.clear(); 787 } 788 } 789 790 // Install literals 791 InstallLiteralPools(); 792 793 // Install switch tables 794 InstallSwitchTables(); 795 796 // Install fill array data 797 InstallFillArrayData(); 798 799 // Create the mapping table and native offset to reference map. 800 CreateMappingTables(); 801 802 CreateNativeGcMap(); 803} 804 805/* 806 * Insert a kPseudoCaseLabel at the beginning of the Dalvik 807 * offset vaddr. This label will be used to fix up the case 808 * branch table during the assembly phase. All resource flags 809 * are set to prevent code motion. KeyVal is just there for debugging. 810 */ 811LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) { 812 LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id]; 813 LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR)); 814 new_label->dalvik_offset = vaddr; 815 new_label->opcode = kPseudoCaseLabel; 816 new_label->operands[0] = keyVal; 817 new_label->def_mask = ENCODE_ALL; 818 InsertLIRAfter(boundary_lir, new_label); 819 return new_label; 820} 821 822void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) { 823 const uint16_t* table = tab_rec->table; 824 int base_vaddr = tab_rec->vaddr; 825 const int *targets = reinterpret_cast<const int*>(&table[4]); 826 int entries = table[1]; 827 int low_key = s4FromSwitchData(&table[2]); 828 for (int i = 0; i < entries; i++) { 829 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key); 830 } 831} 832 833void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) { 834 const uint16_t* table = tab_rec->table; 835 int base_vaddr = tab_rec->vaddr; 836 int entries = table[1]; 837 const int* keys = reinterpret_cast<const int*>(&table[2]); 838 const int* targets = &keys[entries]; 839 for (int i = 0; i < entries; i++) { 840 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]); 841 } 842} 843 844void Mir2Lir::ProcessSwitchTables() { 845 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_); 846 while (true) { 847 Mir2Lir::SwitchTable *tab_rec = iterator.Next(); 848 if (tab_rec == NULL) break; 849 if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) { 850 MarkPackedCaseLabels(tab_rec); 851 } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { 852 MarkSparseCaseLabels(tab_rec); 853 } else { 854 LOG(FATAL) << "Invalid switch table"; 855 } 856 } 857} 858 859void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) { 860 /* 861 * Sparse switch data format: 862 * ushort ident = 0x0200 magic value 863 * ushort size number of entries in the table; > 0 864 * int keys[size] keys, sorted low-to-high; 32-bit aligned 865 * int targets[size] branch targets, relative to switch opcode 866 * 867 * Total size is (2+size*4) 16-bit code units. 868 */ 869 uint16_t ident = table[0]; 870 int entries = table[1]; 871 const int* keys = reinterpret_cast<const int*>(&table[2]); 872 const int* targets = &keys[entries]; 873 LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident 874 << ", entries: " << std::dec << entries; 875 for (int i = 0; i < entries; i++) { 876 LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i]; 877 } 878} 879 880void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) { 881 /* 882 * Packed switch data format: 883 * ushort ident = 0x0100 magic value 884 * ushort size number of entries in the table 885 * int first_key first (and lowest) switch case value 886 * int targets[size] branch targets, relative to switch opcode 887 * 888 * Total size is (4+size*2) 16-bit code units. 889 */ 890 uint16_t ident = table[0]; 891 const int* targets = reinterpret_cast<const int*>(&table[4]); 892 int entries = table[1]; 893 int low_key = s4FromSwitchData(&table[2]); 894 LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident 895 << ", entries: " << std::dec << entries << ", low_key: " << low_key; 896 for (int i = 0; i < entries; i++) { 897 LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex 898 << targets[i]; 899 } 900} 901 902/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */ 903void Mir2Lir::MarkBoundary(int offset, const char* inst_str) { 904 NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str)); 905} 906 907bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) { 908 bool is_taken; 909 switch (opcode) { 910 case Instruction::IF_EQ: is_taken = (src1 == src2); break; 911 case Instruction::IF_NE: is_taken = (src1 != src2); break; 912 case Instruction::IF_LT: is_taken = (src1 < src2); break; 913 case Instruction::IF_GE: is_taken = (src1 >= src2); break; 914 case Instruction::IF_GT: is_taken = (src1 > src2); break; 915 case Instruction::IF_LE: is_taken = (src1 <= src2); break; 916 case Instruction::IF_EQZ: is_taken = (src1 == 0); break; 917 case Instruction::IF_NEZ: is_taken = (src1 != 0); break; 918 case Instruction::IF_LTZ: is_taken = (src1 < 0); break; 919 case Instruction::IF_GEZ: is_taken = (src1 >= 0); break; 920 case Instruction::IF_GTZ: is_taken = (src1 > 0); break; 921 case Instruction::IF_LEZ: is_taken = (src1 <= 0); break; 922 default: 923 LOG(FATAL) << "Unexpected opcode " << opcode; 924 is_taken = false; 925 } 926 return is_taken; 927} 928 929// Convert relation of src1/src2 to src2/src1 930ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) { 931 ConditionCode res; 932 switch (before) { 933 case kCondEq: res = kCondEq; break; 934 case kCondNe: res = kCondNe; break; 935 case kCondLt: res = kCondGt; break; 936 case kCondGt: res = kCondLt; break; 937 case kCondLe: res = kCondGe; break; 938 case kCondGe: res = kCondLe; break; 939 default: 940 res = static_cast<ConditionCode>(0); 941 LOG(FATAL) << "Unexpected ccode " << before; 942 } 943 return res; 944} 945 946// TODO: move to mir_to_lir.cc 947Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena) 948 : Backend(arena), 949 literal_list_(NULL), 950 method_literal_list_(NULL), 951 code_literal_list_(NULL), 952 cu_(cu), 953 mir_graph_(mir_graph), 954 switch_tables_(arena, 4, kGrowableArraySwitchTables), 955 fill_array_data_(arena, 4, kGrowableArrayFillArrayData), 956 throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads), 957 suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads), 958 intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc), 959 tempreg_info_(arena, 20, kGrowableArrayMisc), 960 reginfo_map_(arena, 64, kGrowableArrayMisc), 961 data_offset_(0), 962 total_size_(0), 963 block_label_list_(NULL), 964 current_dalvik_offset_(0), 965 reg_pool_(NULL), 966 live_sreg_(0), 967 num_core_spills_(0), 968 num_fp_spills_(0), 969 frame_size_(0), 970 core_spill_mask_(0), 971 fp_spill_mask_(0), 972 first_lir_insn_(NULL), 973 last_lir_insn_(NULL) { 974 promotion_map_ = static_cast<PromotionMap*> 975 (arena_->Alloc((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) * 976 sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc)); 977} 978 979void Mir2Lir::Materialize() { 980 CompilerInitializeRegAlloc(); // Needs to happen after SSA naming 981 982 /* Allocate Registers using simple local allocation scheme */ 983 SimpleRegAlloc(); 984 985 if (mir_graph_->IsSpecialCase()) { 986 /* 987 * Custom codegen for special cases. If for any reason the 988 * special codegen doesn't succeed, first_lir_insn_ will 989 * set to NULL; 990 */ 991 SpecialMIR2LIR(mir_graph_->GetSpecialCase()); 992 } 993 994 /* Convert MIR to LIR, etc. */ 995 if (first_lir_insn_ == NULL) { 996 MethodMIR2LIR(); 997 } 998 999 /* Method is not empty */ 1000 if (first_lir_insn_) { 1001 // mark the targets of switch statement case labels 1002 ProcessSwitchTables(); 1003 1004 /* Convert LIR into machine code. */ 1005 AssembleLIR(); 1006 1007 if (cu_->verbose) { 1008 CodegenDump(); 1009 } 1010 } 1011} 1012 1013CompiledMethod* Mir2Lir::GetCompiledMethod() { 1014 // Combine vmap tables - core regs, then fp regs - into vmap_table 1015 std::vector<uint16_t> raw_vmap_table; 1016 // Core regs may have been inserted out of order - sort first 1017 std::sort(core_vmap_table_.begin(), core_vmap_table_.end()); 1018 for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) { 1019 // Copy, stripping out the phys register sort key 1020 raw_vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]); 1021 } 1022 // If we have a frame, push a marker to take place of lr 1023 if (frame_size_ > 0) { 1024 raw_vmap_table.push_back(INVALID_VREG); 1025 } else { 1026 DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0); 1027 DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0); 1028 } 1029 // Combine vmap tables - core regs, then fp regs. fp regs already sorted 1030 for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) { 1031 raw_vmap_table.push_back(fp_vmap_table_[i]); 1032 } 1033 UnsignedLeb128EncodingVector vmap_encoder; 1034 // Prefix the encoded data with its size. 1035 vmap_encoder.PushBack(raw_vmap_table.size()); 1036 for (uint16_t cur : raw_vmap_table) { 1037 vmap_encoder.PushBack(cur); 1038 } 1039 CompiledMethod* result = 1040 new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_, 1041 core_spill_mask_, fp_spill_mask_, encoded_mapping_table_.GetData(), 1042 vmap_encoder.GetData(), native_gc_map_); 1043 return result; 1044} 1045 1046int Mir2Lir::ComputeFrameSize() { 1047 /* Figure out the frame size */ 1048 static const uint32_t kAlignMask = kStackAlignment - 1; 1049 uint32_t size = (num_core_spills_ + num_fp_spills_ + 1050 1 /* filler word */ + cu_->num_regs + cu_->num_outs + 1051 cu_->num_compiler_temps + 1 /* cur_method* */) 1052 * sizeof(uint32_t); 1053 /* Align and set */ 1054 return (size + kAlignMask) & ~(kAlignMask); 1055} 1056 1057/* 1058 * Append an LIR instruction to the LIR list maintained by a compilation 1059 * unit 1060 */ 1061void Mir2Lir::AppendLIR(LIR* lir) { 1062 if (first_lir_insn_ == NULL) { 1063 DCHECK(last_lir_insn_ == NULL); 1064 last_lir_insn_ = first_lir_insn_ = lir; 1065 lir->prev = lir->next = NULL; 1066 } else { 1067 last_lir_insn_->next = lir; 1068 lir->prev = last_lir_insn_; 1069 lir->next = NULL; 1070 last_lir_insn_ = lir; 1071 } 1072} 1073 1074/* 1075 * Insert an LIR instruction before the current instruction, which cannot be the 1076 * first instruction. 1077 * 1078 * prev_lir <-> new_lir <-> current_lir 1079 */ 1080void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) { 1081 DCHECK(current_lir->prev != NULL); 1082 LIR *prev_lir = current_lir->prev; 1083 1084 prev_lir->next = new_lir; 1085 new_lir->prev = prev_lir; 1086 new_lir->next = current_lir; 1087 current_lir->prev = new_lir; 1088} 1089 1090/* 1091 * Insert an LIR instruction after the current instruction, which cannot be the 1092 * first instruction. 1093 * 1094 * current_lir -> new_lir -> old_next 1095 */ 1096void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) { 1097 new_lir->prev = current_lir; 1098 new_lir->next = current_lir->next; 1099 current_lir->next = new_lir; 1100 new_lir->next->prev = new_lir; 1101} 1102 1103} // namespace art 1104