mir_graph.cc revision 41b175aba41c9365a1c53b8a1afbd17129c87c14
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mir_graph.h" 18 19#include <inttypes.h> 20#include <queue> 21#include <unistd.h> 22 23#include "base/bit_vector-inl.h" 24#include "base/logging.h" 25#include "base/stl_util.h" 26#include "base/stringprintf.h" 27#include "base/scoped_arena_containers.h" 28#include "compiler_ir.h" 29#include "dex_file-inl.h" 30#include "dex_flags.h" 31#include "dex_instruction-inl.h" 32#include "driver/compiler_driver.h" 33#include "driver/dex_compilation_unit.h" 34#include "dex/quick/quick_compiler.h" 35#include "leb128.h" 36#include "pass_driver_me_post_opt.h" 37#include "stack.h" 38#include "utils.h" 39 40namespace art { 41 42#define MAX_PATTERN_LEN 5 43 44const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = { 45 "Phi", 46 "Copy", 47 "FusedCmplFloat", 48 "FusedCmpgFloat", 49 "FusedCmplDouble", 50 "FusedCmpgDouble", 51 "FusedCmpLong", 52 "Nop", 53 "OpNullCheck", 54 "OpRangeCheck", 55 "OpDivZeroCheck", 56 "Check", 57 "Select", 58 "ConstVector", 59 "MoveVector", 60 "PackedMultiply", 61 "PackedAddition", 62 "PackedSubtract", 63 "PackedShiftLeft", 64 "PackedSignedShiftRight", 65 "PackedUnsignedShiftRight", 66 "PackedAnd", 67 "PackedOr", 68 "PackedXor", 69 "PackedAddReduce", 70 "PackedReduce", 71 "PackedSet", 72 "ReserveVectorRegisters", 73 "ReturnVectorRegisters", 74 "MemBarrier", 75 "PackedArrayGet", 76 "PackedArrayPut", 77 "MaddInt", 78 "MsubInt", 79 "MaddLong", 80 "MsubLong", 81}; 82 83MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) 84 : reg_location_(nullptr), 85 block_id_map_(std::less<unsigned int>(), arena->Adapter()), 86 cu_(cu), 87 ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)), 88 ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)), 89 vreg_to_ssa_map_(nullptr), 90 ssa_last_defs_(nullptr), 91 is_constant_v_(nullptr), 92 constant_values_(nullptr), 93 use_counts_(arena->Adapter()), 94 raw_use_counts_(arena->Adapter()), 95 num_reachable_blocks_(0), 96 max_num_reachable_blocks_(0), 97 dfs_orders_up_to_date_(false), 98 domination_up_to_date_(false), 99 mir_ssa_rep_up_to_date_(false), 100 topological_order_up_to_date_(false), 101 dfs_order_(arena->Adapter(kArenaAllocDfsPreOrder)), 102 dfs_post_order_(arena->Adapter(kArenaAllocDfsPostOrder)), 103 dom_post_order_traversal_(arena->Adapter(kArenaAllocDomPostOrder)), 104 topological_order_(arena->Adapter(kArenaAllocTopologicalSortOrder)), 105 topological_order_loop_ends_(arena->Adapter(kArenaAllocTopologicalSortOrder)), 106 topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)), 107 topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)), 108 max_nested_loops_(0u), 109 i_dom_list_(nullptr), 110 temp_scoped_alloc_(), 111 block_list_(arena->Adapter(kArenaAllocBBList)), 112 try_block_addr_(nullptr), 113 entry_block_(nullptr), 114 exit_block_(nullptr), 115 current_code_item_(nullptr), 116 m_units_(arena->Adapter()), 117 method_stack_(arena->Adapter()), 118 current_method_(kInvalidEntry), 119 current_offset_(kInvalidEntry), 120 def_count_(0), 121 opcode_count_(nullptr), 122 num_ssa_regs_(0), 123 extended_basic_blocks_(arena->Adapter()), 124 method_sreg_(0), 125 attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke. 126 checkstats_(nullptr), 127 arena_(arena), 128 backward_branches_(0), 129 forward_branches_(0), 130 num_non_special_compiler_temps_(0), 131 max_available_special_compiler_temps_(1), // We only need the method ptr as a special temp for now. 132 requested_backend_temp_(false), 133 compiler_temps_committed_(false), 134 punt_to_interpreter_(false), 135 merged_df_flags_(0u), 136 ifield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)), 137 sfield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)), 138 method_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)), 139 suspend_checks_in_loops_(nullptr) { 140 memset(&temp_, 0, sizeof(temp_)); 141 use_counts_.reserve(256); 142 raw_use_counts_.reserve(256); 143 block_list_.reserve(100); 144 try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */); 145 146 147 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 148 // X86 requires a temp to keep track of the method address. 149 // TODO For x86_64, addressing can be done with RIP. When that is implemented, 150 // this needs to be updated to reserve 0 temps for BE. 151 max_available_non_special_compiler_temps_ = cu_->target64 ? 2 : 1; 152 reserved_temps_for_backend_ = max_available_non_special_compiler_temps_; 153 } else { 154 // Other architectures do not have a known lower bound for non-special temps. 155 // We allow the update of the max to happen at BE initialization stage and simply set 0 for now. 156 max_available_non_special_compiler_temps_ = 0; 157 reserved_temps_for_backend_ = 0; 158 } 159} 160 161MIRGraph::~MIRGraph() { 162 STLDeleteElements(&block_list_); 163 STLDeleteElements(&m_units_); 164} 165 166/* 167 * Parse an instruction, return the length of the instruction 168 */ 169int MIRGraph::ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction) { 170 const Instruction* inst = Instruction::At(code_ptr); 171 decoded_instruction->opcode = inst->Opcode(); 172 decoded_instruction->vA = inst->HasVRegA() ? inst->VRegA() : 0; 173 decoded_instruction->vB = inst->HasVRegB() ? inst->VRegB() : 0; 174 decoded_instruction->vB_wide = inst->HasWideVRegB() ? inst->WideVRegB() : 0; 175 decoded_instruction->vC = inst->HasVRegC() ? inst->VRegC() : 0; 176 if (inst->HasVarArgs()) { 177 inst->GetVarArgs(decoded_instruction->arg); 178 } 179 return inst->SizeInCodeUnits(); 180} 181 182 183/* Split an existing block from the specified code offset into two */ 184BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset, 185 BasicBlock* orig_block, BasicBlock** immed_pred_block_p) { 186 DCHECK_GT(code_offset, orig_block->start_offset); 187 MIR* insn = orig_block->first_mir_insn; 188 MIR* prev = nullptr; // Will be set to instruction before split. 189 while (insn) { 190 if (insn->offset == code_offset) break; 191 prev = insn; 192 insn = insn->next; 193 } 194 if (insn == nullptr) { 195 LOG(FATAL) << "Break split failed"; 196 } 197 // Now insn is at the instruction where we want to split, namely 198 // insn will be the first instruction of the "bottom" block. 199 // Similarly, prev will be the last instruction of the "top" block 200 201 BasicBlock* bottom_block = CreateNewBB(kDalvikByteCode); 202 203 bottom_block->start_offset = code_offset; 204 bottom_block->first_mir_insn = insn; 205 bottom_block->last_mir_insn = orig_block->last_mir_insn; 206 207 /* If this block was terminated by a return, conditional branch or throw, 208 * the flag needs to go with the bottom block 209 */ 210 bottom_block->terminated_by_return = orig_block->terminated_by_return; 211 orig_block->terminated_by_return = false; 212 213 bottom_block->conditional_branch = orig_block->conditional_branch; 214 orig_block->conditional_branch = false; 215 216 bottom_block->explicit_throw = orig_block->explicit_throw; 217 orig_block->explicit_throw = false; 218 219 /* Handle the taken path */ 220 bottom_block->taken = orig_block->taken; 221 if (bottom_block->taken != NullBasicBlockId) { 222 orig_block->taken = NullBasicBlockId; 223 BasicBlock* bb_taken = GetBasicBlock(bottom_block->taken); 224 bb_taken->ErasePredecessor(orig_block->id); 225 bb_taken->predecessors.push_back(bottom_block->id); 226 } 227 228 /* Handle the fallthrough path */ 229 bottom_block->fall_through = orig_block->fall_through; 230 orig_block->fall_through = bottom_block->id; 231 bottom_block->predecessors.push_back(orig_block->id); 232 if (bottom_block->fall_through != NullBasicBlockId) { 233 BasicBlock* bb_fall_through = GetBasicBlock(bottom_block->fall_through); 234 bb_fall_through->ErasePredecessor(orig_block->id); 235 bb_fall_through->predecessors.push_back(bottom_block->id); 236 } 237 238 /* Handle the successor list */ 239 if (orig_block->successor_block_list_type != kNotUsed) { 240 bottom_block->successor_block_list_type = orig_block->successor_block_list_type; 241 bottom_block->successor_blocks.swap(orig_block->successor_blocks); 242 orig_block->successor_block_list_type = kNotUsed; 243 DCHECK(orig_block->successor_blocks.empty()); // Empty after the swap() above. 244 for (SuccessorBlockInfo* successor_block_info : bottom_block->successor_blocks) { 245 BasicBlock* bb = GetBasicBlock(successor_block_info->block); 246 if (bb != nullptr) { 247 bb->ErasePredecessor(orig_block->id); 248 bb->predecessors.push_back(bottom_block->id); 249 } 250 } 251 } 252 253 orig_block->last_mir_insn = prev; 254 prev->next = nullptr; 255 256 /* 257 * Update the immediate predecessor block pointer so that outgoing edges 258 * can be applied to the proper block. 259 */ 260 if (immed_pred_block_p) { 261 DCHECK_EQ(*immed_pred_block_p, orig_block); 262 *immed_pred_block_p = bottom_block; 263 } 264 265 // Associate dex instructions in the bottom block with the new container. 266 DCHECK(insn != nullptr); 267 DCHECK(insn != orig_block->first_mir_insn); 268 DCHECK(insn == bottom_block->first_mir_insn); 269 DCHECK_EQ(insn->offset, bottom_block->start_offset); 270 // Scan the "bottom" instructions, remapping them to the 271 // newly created "bottom" block. 272 MIR* p = insn; 273 p->bb = bottom_block->id; 274 while (p != bottom_block->last_mir_insn) { 275 p = p->next; 276 DCHECK(p != nullptr); 277 p->bb = bottom_block->id; 278 } 279 280 return bottom_block; 281} 282 283/* 284 * Given a code offset, find out the block that starts with it. If the offset 285 * is in the middle of an existing block, split it into two. If immed_pred_block_p 286 * is not non-null and is the block being split, update *immed_pred_block_p to 287 * point to the bottom block so that outgoing edges can be set up properly 288 * (by the caller) 289 * Utilizes a map for fast lookup of the typical cases. 290 */ 291BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create, 292 BasicBlock** immed_pred_block_p, 293 ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { 294 if (UNLIKELY(code_offset >= current_code_item_->insns_size_in_code_units_)) { 295 // There can be a fall-through out of the method code. We shall record such a block 296 // here (assuming create==true) and check that it's dead at the end of InlineMethod(). 297 // Though we're only aware of the cases where code_offset is exactly the same as 298 // insns_size_in_code_units_, treat greater code_offset the same just in case. 299 code_offset = current_code_item_->insns_size_in_code_units_; 300 } 301 302 int block_id = (*dex_pc_to_block_map)[code_offset]; 303 BasicBlock* bb = GetBasicBlock(block_id); 304 305 if ((bb != nullptr) && (bb->start_offset == code_offset)) { 306 // Does this containing block start with the desired instruction? 307 return bb; 308 } 309 310 // No direct hit. 311 if (!create) { 312 return nullptr; 313 } 314 315 if (bb != nullptr) { 316 // The target exists somewhere in an existing block. 317 BasicBlock* bottom_block = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : nullptr); 318 DCHECK(bottom_block != nullptr); 319 MIR* p = bottom_block->first_mir_insn; 320 BasicBlock* orig_block = bb; 321 DCHECK_EQ((*dex_pc_to_block_map)[p->offset], orig_block->id); 322 // Scan the "bottom" instructions, remapping them to the 323 // newly created "bottom" block. 324 (*dex_pc_to_block_map)[p->offset] = bottom_block->id; 325 while (p != bottom_block->last_mir_insn) { 326 p = p->next; 327 DCHECK(p != nullptr); 328 int opcode = p->dalvikInsn.opcode; 329 /* 330 * Some messiness here to ensure that we only enter real opcodes and only the 331 * first half of a potentially throwing instruction that has been split into 332 * CHECK and work portions. Since the 2nd half of a split operation is always 333 * the first in a BasicBlock, we can't hit it here. 334 */ 335 if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { 336 BasicBlockId mapped_id = (*dex_pc_to_block_map)[p->offset]; 337 // At first glance the instructions should all be mapped to orig_block. 338 // However, multiple instructions may correspond to the same dex, hence an earlier 339 // instruction may have already moved the mapping for dex to bottom_block. 340 DCHECK((mapped_id == orig_block->id) || (mapped_id == bottom_block->id)); 341 (*dex_pc_to_block_map)[p->offset] = bottom_block->id; 342 } 343 } 344 return bottom_block; 345 } 346 347 // Create a new block. 348 bb = CreateNewBB(kDalvikByteCode); 349 bb->start_offset = code_offset; 350 (*dex_pc_to_block_map)[bb->start_offset] = bb->id; 351 return bb; 352} 353 354 355/* Identify code range in try blocks and set up the empty catch blocks */ 356void MIRGraph::ProcessTryCatchBlocks(ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { 357 int tries_size = current_code_item_->tries_size_; 358 DexOffset offset; 359 360 if (tries_size == 0) { 361 return; 362 } 363 364 for (int i = 0; i < tries_size; i++) { 365 const DexFile::TryItem* pTry = 366 DexFile::GetTryItems(*current_code_item_, i); 367 DexOffset start_offset = pTry->start_addr_; 368 DexOffset end_offset = start_offset + pTry->insn_count_; 369 for (offset = start_offset; offset < end_offset; offset++) { 370 try_block_addr_->SetBit(offset); 371 } 372 } 373 374 // Iterate over each of the handlers to enqueue the empty Catch blocks. 375 const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0); 376 uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr); 377 for (uint32_t idx = 0; idx < handlers_size; idx++) { 378 CatchHandlerIterator iterator(handlers_ptr); 379 for (; iterator.HasNext(); iterator.Next()) { 380 uint32_t address = iterator.GetHandlerAddress(); 381 FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map); 382 } 383 handlers_ptr = iterator.EndDataPointer(); 384 } 385} 386 387bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, 388 NarrowDexOffset catch_offset) { 389 // Catches for monitor-exit during stack unwinding have the pattern 390 // move-exception (move)* (goto)? monitor-exit throw 391 // In the currently generated dex bytecode we see these catching a bytecode range including 392 // either its own or an identical monitor-exit, http://b/15745363 . This function checks if 393 // it's the case for a given monitor-exit and catch block so that we can ignore it. 394 // (We don't want to ignore all monitor-exit catches since one could enclose a synchronized 395 // block in a try-block and catch the NPE, Error or Throwable and we should let it through; 396 // even though a throwing monitor-exit certainly indicates a bytecode error.) 397 const Instruction* monitor_exit = Instruction::At(current_code_item_->insns_ + monitor_exit_offset); 398 DCHECK(monitor_exit->Opcode() == Instruction::MONITOR_EXIT); 399 int monitor_reg = monitor_exit->VRegA_11x(); 400 const Instruction* check_insn = Instruction::At(current_code_item_->insns_ + catch_offset); 401 DCHECK(check_insn->Opcode() == Instruction::MOVE_EXCEPTION); 402 if (check_insn->VRegA_11x() == monitor_reg) { 403 // Unexpected move-exception to the same register. Probably not the pattern we're looking for. 404 return false; 405 } 406 check_insn = check_insn->Next(); 407 while (true) { 408 int dest = -1; 409 bool wide = false; 410 switch (check_insn->Opcode()) { 411 case Instruction::MOVE_WIDE: 412 wide = true; 413 FALLTHROUGH_INTENDED; 414 case Instruction::MOVE_OBJECT: 415 case Instruction::MOVE: 416 dest = check_insn->VRegA_12x(); 417 break; 418 419 case Instruction::MOVE_WIDE_FROM16: 420 wide = true; 421 FALLTHROUGH_INTENDED; 422 case Instruction::MOVE_OBJECT_FROM16: 423 case Instruction::MOVE_FROM16: 424 dest = check_insn->VRegA_22x(); 425 break; 426 427 case Instruction::MOVE_WIDE_16: 428 wide = true; 429 FALLTHROUGH_INTENDED; 430 case Instruction::MOVE_OBJECT_16: 431 case Instruction::MOVE_16: 432 dest = check_insn->VRegA_32x(); 433 break; 434 435 case Instruction::GOTO: 436 case Instruction::GOTO_16: 437 case Instruction::GOTO_32: 438 check_insn = check_insn->RelativeAt(check_insn->GetTargetOffset()); 439 FALLTHROUGH_INTENDED; 440 default: 441 return check_insn->Opcode() == Instruction::MONITOR_EXIT && 442 check_insn->VRegA_11x() == monitor_reg; 443 } 444 445 if (dest == monitor_reg || (wide && dest + 1 == monitor_reg)) { 446 return false; 447 } 448 449 check_insn = check_insn->Next(); 450 } 451} 452 453/* Process instructions with the kBranch flag */ 454BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, 455 int width, int flags, const uint16_t* code_ptr, 456 const uint16_t* code_end, 457 ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { 458 DexOffset target = cur_offset; 459 switch (insn->dalvikInsn.opcode) { 460 case Instruction::GOTO: 461 case Instruction::GOTO_16: 462 case Instruction::GOTO_32: 463 target += insn->dalvikInsn.vA; 464 break; 465 case Instruction::IF_EQ: 466 case Instruction::IF_NE: 467 case Instruction::IF_LT: 468 case Instruction::IF_GE: 469 case Instruction::IF_GT: 470 case Instruction::IF_LE: 471 cur_block->conditional_branch = true; 472 target += insn->dalvikInsn.vC; 473 break; 474 case Instruction::IF_EQZ: 475 case Instruction::IF_NEZ: 476 case Instruction::IF_LTZ: 477 case Instruction::IF_GEZ: 478 case Instruction::IF_GTZ: 479 case Instruction::IF_LEZ: 480 cur_block->conditional_branch = true; 481 target += insn->dalvikInsn.vB; 482 break; 483 default: 484 LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set"; 485 } 486 CountBranch(target); 487 BasicBlock* taken_block = FindBlock(target, /* create */ true, 488 /* immed_pred_block_p */ &cur_block, 489 dex_pc_to_block_map); 490 DCHECK(taken_block != nullptr); 491 cur_block->taken = taken_block->id; 492 taken_block->predecessors.push_back(cur_block->id); 493 494 /* Always terminate the current block for conditional branches */ 495 if (flags & Instruction::kContinue) { 496 BasicBlock* fallthrough_block = FindBlock(cur_offset + width, 497 /* create */ 498 true, 499 /* immed_pred_block_p */ 500 &cur_block, 501 dex_pc_to_block_map); 502 DCHECK(fallthrough_block != nullptr); 503 cur_block->fall_through = fallthrough_block->id; 504 fallthrough_block->predecessors.push_back(cur_block->id); 505 } else if (code_ptr < code_end) { 506 FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map); 507 } 508 return cur_block; 509} 510 511/* Process instructions with the kSwitch flag */ 512BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, 513 int width, int flags, 514 ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { 515 UNUSED(flags); 516 const uint16_t* switch_data = 517 reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + 518 static_cast<int32_t>(insn->dalvikInsn.vB)); 519 int size; 520 const int* keyTable; 521 const int* target_table; 522 int i; 523 int first_key; 524 525 /* 526 * Packed switch data format: 527 * ushort ident = 0x0100 magic value 528 * ushort size number of entries in the table 529 * int first_key first (and lowest) switch case value 530 * int targets[size] branch targets, relative to switch opcode 531 * 532 * Total size is (4+size*2) 16-bit code units. 533 */ 534 if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) { 535 DCHECK_EQ(static_cast<int>(switch_data[0]), 536 static_cast<int>(Instruction::kPackedSwitchSignature)); 537 size = switch_data[1]; 538 first_key = switch_data[2] | (switch_data[3] << 16); 539 target_table = reinterpret_cast<const int*>(&switch_data[4]); 540 keyTable = nullptr; // Make the compiler happy. 541 /* 542 * Sparse switch data format: 543 * ushort ident = 0x0200 magic value 544 * ushort size number of entries in the table; > 0 545 * int keys[size] keys, sorted low-to-high; 32-bit aligned 546 * int targets[size] branch targets, relative to switch opcode 547 * 548 * Total size is (2+size*4) 16-bit code units. 549 */ 550 } else { 551 DCHECK_EQ(static_cast<int>(switch_data[0]), 552 static_cast<int>(Instruction::kSparseSwitchSignature)); 553 size = switch_data[1]; 554 keyTable = reinterpret_cast<const int*>(&switch_data[2]); 555 target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]); 556 first_key = 0; // To make the compiler happy. 557 } 558 559 if (cur_block->successor_block_list_type != kNotUsed) { 560 LOG(FATAL) << "Successor block list already in use: " 561 << static_cast<int>(cur_block->successor_block_list_type); 562 } 563 cur_block->successor_block_list_type = 564 (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? kPackedSwitch : kSparseSwitch; 565 cur_block->successor_blocks.reserve(size); 566 567 for (i = 0; i < size; i++) { 568 BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true, 569 /* immed_pred_block_p */ &cur_block, 570 dex_pc_to_block_map); 571 DCHECK(case_block != nullptr); 572 SuccessorBlockInfo* successor_block_info = 573 static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo), 574 kArenaAllocSuccessor)); 575 successor_block_info->block = case_block->id; 576 successor_block_info->key = 577 (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? 578 first_key + i : keyTable[i]; 579 cur_block->successor_blocks.push_back(successor_block_info); 580 case_block->predecessors.push_back(cur_block->id); 581 } 582 583 /* Fall-through case */ 584 BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true, 585 /* immed_pred_block_p */ nullptr, 586 dex_pc_to_block_map); 587 DCHECK(fallthrough_block != nullptr); 588 cur_block->fall_through = fallthrough_block->id; 589 fallthrough_block->predecessors.push_back(cur_block->id); 590 return cur_block; 591} 592 593/* Process instructions with the kThrow flag */ 594BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, 595 int width, int flags, ArenaBitVector* try_block_addr, 596 const uint16_t* code_ptr, const uint16_t* code_end, 597 ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { 598 UNUSED(flags); 599 bool in_try_block = try_block_addr->IsBitSet(cur_offset); 600 bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW); 601 602 /* In try block */ 603 if (in_try_block) { 604 CatchHandlerIterator iterator(*current_code_item_, cur_offset); 605 606 if (cur_block->successor_block_list_type != kNotUsed) { 607 LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file); 608 LOG(FATAL) << "Successor block list already in use: " 609 << static_cast<int>(cur_block->successor_block_list_type); 610 } 611 612 for (; iterator.HasNext(); iterator.Next()) { 613 BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */, 614 nullptr /* immed_pred_block_p */, 615 dex_pc_to_block_map); 616 if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT && 617 IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) { 618 // Don't allow monitor-exit to catch its own exception, http://b/15745363 . 619 continue; 620 } 621 if (cur_block->successor_block_list_type == kNotUsed) { 622 cur_block->successor_block_list_type = kCatch; 623 } 624 catch_block->catch_entry = true; 625 if (kIsDebugBuild) { 626 catches_.insert(catch_block->start_offset); 627 } 628 SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*> 629 (arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor)); 630 successor_block_info->block = catch_block->id; 631 successor_block_info->key = iterator.GetHandlerTypeIndex(); 632 cur_block->successor_blocks.push_back(successor_block_info); 633 catch_block->predecessors.push_back(cur_block->id); 634 } 635 in_try_block = (cur_block->successor_block_list_type != kNotUsed); 636 } 637 bool build_all_edges = 638 (cu_->disable_opt & (1 << kSuppressExceptionEdges)) || is_throw || in_try_block; 639 if (!in_try_block && build_all_edges) { 640 BasicBlock* eh_block = CreateNewBB(kExceptionHandling); 641 cur_block->taken = eh_block->id; 642 eh_block->start_offset = cur_offset; 643 eh_block->predecessors.push_back(cur_block->id); 644 } 645 646 if (is_throw) { 647 cur_block->explicit_throw = true; 648 if (code_ptr < code_end) { 649 // Force creation of new block following THROW via side-effect. 650 FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map); 651 } 652 if (!in_try_block) { 653 // Don't split a THROW that can't rethrow - we're done. 654 return cur_block; 655 } 656 } 657 658 if (!build_all_edges) { 659 /* 660 * Even though there is an exception edge here, control cannot return to this 661 * method. Thus, for the purposes of dataflow analysis and optimization, we can 662 * ignore the edge. Doing this reduces compile time, and increases the scope 663 * of the basic-block level optimization pass. 664 */ 665 return cur_block; 666 } 667 668 /* 669 * Split the potentially-throwing instruction into two parts. 670 * The first half will be a pseudo-op that captures the exception 671 * edges and terminates the basic block. It always falls through. 672 * Then, create a new basic block that begins with the throwing instruction 673 * (minus exceptions). Note: this new basic block must NOT be entered into 674 * the block_map. If the potentially-throwing instruction is the target of a 675 * future branch, we need to find the check psuedo half. The new 676 * basic block containing the work portion of the instruction should 677 * only be entered via fallthrough from the block containing the 678 * pseudo exception edge MIR. Note also that this new block is 679 * not automatically terminated after the work portion, and may 680 * contain following instructions. 681 * 682 * Note also that the dex_pc_to_block_map entry for the potentially 683 * throwing instruction will refer to the original basic block. 684 */ 685 BasicBlock* new_block = CreateNewBB(kDalvikByteCode); 686 new_block->start_offset = insn->offset; 687 cur_block->fall_through = new_block->id; 688 new_block->predecessors.push_back(cur_block->id); 689 MIR* new_insn = NewMIR(); 690 *new_insn = *insn; 691 insn->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheck); 692 // Associate the two halves. 693 insn->meta.throw_insn = new_insn; 694 new_block->AppendMIR(new_insn); 695 return new_block; 696} 697 698/* Parse a Dex method and insert it into the MIRGraph at the current insert point. */ 699void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, 700 InvokeType invoke_type ATTRIBUTE_UNUSED, uint16_t class_def_idx, 701 uint32_t method_idx, jobject class_loader, const DexFile& dex_file) { 702 current_code_item_ = code_item; 703 method_stack_.push_back(std::make_pair(current_method_, current_offset_)); 704 current_method_ = m_units_.size(); 705 current_offset_ = 0; 706 // TODO: will need to snapshot stack image and use that as the mir context identification. 707 m_units_.push_back(new (arena_) DexCompilationUnit( 708 cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file, 709 current_code_item_, class_def_idx, method_idx, access_flags, 710 cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx))); 711 const uint16_t* code_ptr = current_code_item_->insns_; 712 const uint16_t* code_end = 713 current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_; 714 715 // TODO: need to rework expansion of block list & try_block_addr when inlining activated. 716 // TUNING: use better estimate of basic blocks for following resize. 717 block_list_.reserve(block_list_.size() + current_code_item_->insns_size_in_code_units_); 718 // FindBlock lookup cache. 719 ScopedArenaAllocator allocator(&cu_->arena_stack); 720 ScopedArenaVector<uint16_t> dex_pc_to_block_map(allocator.Adapter()); 721 dex_pc_to_block_map.resize(current_code_item_->insns_size_in_code_units_ + 722 1 /* Fall-through on last insn; dead or punt to interpreter. */); 723 724 // TODO: replace with explicit resize routine. Using automatic extension side effect for now. 725 try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_); 726 try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_); 727 728 // If this is the first method, set up default entry and exit blocks. 729 if (current_method_ == 0) { 730 DCHECK(entry_block_ == nullptr); 731 DCHECK(exit_block_ == nullptr); 732 DCHECK_EQ(GetNumBlocks(), 0U); 733 // Use id 0 to represent a null block. 734 BasicBlock* null_block = CreateNewBB(kNullBlock); 735 DCHECK_EQ(null_block->id, NullBasicBlockId); 736 null_block->hidden = true; 737 entry_block_ = CreateNewBB(kEntryBlock); 738 exit_block_ = CreateNewBB(kExitBlock); 739 } else { 740 UNIMPLEMENTED(FATAL) << "Nested inlining not implemented."; 741 /* 742 * Will need to manage storage for ins & outs, push prevous state and update 743 * insert point. 744 */ 745 } 746 747 /* Current block to record parsed instructions */ 748 BasicBlock* cur_block = CreateNewBB(kDalvikByteCode); 749 DCHECK_EQ(current_offset_, 0U); 750 cur_block->start_offset = current_offset_; 751 // TODO: for inlining support, insert at the insert point rather than entry block. 752 entry_block_->fall_through = cur_block->id; 753 cur_block->predecessors.push_back(entry_block_->id); 754 755 /* Identify code range in try blocks and set up the empty catch blocks */ 756 ProcessTryCatchBlocks(&dex_pc_to_block_map); 757 758 uint64_t merged_df_flags = 0u; 759 760 /* Parse all instructions and put them into containing basic blocks */ 761 while (code_ptr < code_end) { 762 MIR *insn = NewMIR(); 763 insn->offset = current_offset_; 764 insn->m_unit_index = current_method_; 765 int width = ParseInsn(code_ptr, &insn->dalvikInsn); 766 Instruction::Code opcode = insn->dalvikInsn.opcode; 767 if (opcode_count_ != nullptr) { 768 opcode_count_[static_cast<int>(opcode)]++; 769 } 770 771 int flags = insn->dalvikInsn.FlagsOf(); 772 int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode); 773 774 uint64_t df_flags = GetDataFlowAttributes(insn); 775 merged_df_flags |= df_flags; 776 777 if (df_flags & DF_HAS_DEFS) { 778 def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1; 779 } 780 781 if (df_flags & DF_LVN) { 782 cur_block->use_lvn = true; // Run local value numbering on this basic block. 783 } 784 785 // Check for inline data block signatures. 786 if (opcode == Instruction::NOP) { 787 // A simple NOP will have a width of 1 at this point, embedded data NOP > 1. 788 if ((width == 1) && ((current_offset_ & 0x1) == 0x1) && ((code_end - code_ptr) > 1)) { 789 // Could be an aligning nop. If an embedded data NOP follows, treat pair as single unit. 790 uint16_t following_raw_instruction = code_ptr[1]; 791 if ((following_raw_instruction == Instruction::kSparseSwitchSignature) || 792 (following_raw_instruction == Instruction::kPackedSwitchSignature) || 793 (following_raw_instruction == Instruction::kArrayDataSignature)) { 794 width += Instruction::At(code_ptr + 1)->SizeInCodeUnits(); 795 } 796 } 797 if (width == 1) { 798 // It is a simple nop - treat normally. 799 cur_block->AppendMIR(insn); 800 } else { 801 DCHECK(cur_block->fall_through == NullBasicBlockId); 802 DCHECK(cur_block->taken == NullBasicBlockId); 803 // Unreachable instruction, mark for no continuation and end basic block. 804 flags &= ~Instruction::kContinue; 805 FindBlock(current_offset_ + width, /* create */ true, 806 /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map); 807 } 808 } else { 809 cur_block->AppendMIR(insn); 810 } 811 812 // Associate the starting dex_pc for this opcode with its containing basic block. 813 dex_pc_to_block_map[insn->offset] = cur_block->id; 814 815 code_ptr += width; 816 817 if (flags & Instruction::kBranch) { 818 cur_block = ProcessCanBranch(cur_block, insn, current_offset_, 819 width, flags, code_ptr, code_end, &dex_pc_to_block_map); 820 } else if (flags & Instruction::kReturn) { 821 cur_block->terminated_by_return = true; 822 cur_block->fall_through = exit_block_->id; 823 exit_block_->predecessors.push_back(cur_block->id); 824 /* 825 * Terminate the current block if there are instructions 826 * afterwards. 827 */ 828 if (code_ptr < code_end) { 829 /* 830 * Create a fallthrough block for real instructions 831 * (incl. NOP). 832 */ 833 FindBlock(current_offset_ + width, /* create */ true, 834 /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map); 835 } 836 } else if (flags & Instruction::kThrow) { 837 cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_, 838 code_ptr, code_end, &dex_pc_to_block_map); 839 } else if (flags & Instruction::kSwitch) { 840 cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, 841 flags, &dex_pc_to_block_map); 842 } 843 if (verify_flags & Instruction::kVerifyVarArgRange || 844 verify_flags & Instruction::kVerifyVarArgRangeNonZero) { 845 /* 846 * The Quick backend's runtime model includes a gap between a method's 847 * argument ("in") vregs and the rest of its vregs. Handling a range instruction 848 * which spans the gap is somewhat complicated, and should not happen 849 * in normal usage of dx. Punt to the interpreter. 850 */ 851 int first_reg_in_range = insn->dalvikInsn.vC; 852 int last_reg_in_range = first_reg_in_range + insn->dalvikInsn.vA - 1; 853 if (IsInVReg(first_reg_in_range) != IsInVReg(last_reg_in_range)) { 854 punt_to_interpreter_ = true; 855 } 856 } 857 current_offset_ += width; 858 BasicBlock* next_block = FindBlock(current_offset_, /* create */ false, 859 /* immed_pred_block_p */ nullptr, 860 &dex_pc_to_block_map); 861 if (next_block) { 862 /* 863 * The next instruction could be the target of a previously parsed 864 * forward branch so a block is already created. If the current 865 * instruction is not an unconditional branch, connect them through 866 * the fall-through link. 867 */ 868 DCHECK(cur_block->fall_through == NullBasicBlockId || 869 GetBasicBlock(cur_block->fall_through) == next_block || 870 GetBasicBlock(cur_block->fall_through) == exit_block_); 871 872 if ((cur_block->fall_through == NullBasicBlockId) && (flags & Instruction::kContinue)) { 873 cur_block->fall_through = next_block->id; 874 next_block->predecessors.push_back(cur_block->id); 875 } 876 cur_block = next_block; 877 } 878 } 879 merged_df_flags_ = merged_df_flags; 880 881 if (cu_->enable_debug & (1 << kDebugDumpCFG)) { 882 DumpCFG("/sdcard/1_post_parse_cfg/", true); 883 } 884 885 if (cu_->verbose) { 886 DumpMIRGraph(); 887 } 888 889 // Check if there's been a fall-through out of the method code. 890 BasicBlockId out_bb_id = dex_pc_to_block_map[current_code_item_->insns_size_in_code_units_]; 891 if (UNLIKELY(out_bb_id != NullBasicBlockId)) { 892 // Eagerly calculate DFS order to determine if the block is dead. 893 DCHECK(!DfsOrdersUpToDate()); 894 ComputeDFSOrders(); 895 BasicBlock* out_bb = GetBasicBlock(out_bb_id); 896 DCHECK(out_bb != nullptr); 897 if (out_bb->block_type != kDead) { 898 LOG(WARNING) << "Live fall-through out of method in " << PrettyMethod(method_idx, dex_file); 899 SetPuntToInterpreter(true); 900 } 901 } 902} 903 904void MIRGraph::ShowOpcodeStats() { 905 DCHECK(opcode_count_ != nullptr); 906 LOG(INFO) << "Opcode Count"; 907 for (int i = 0; i < kNumPackedOpcodes; i++) { 908 if (opcode_count_[i] != 0) { 909 LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i)) 910 << " " << opcode_count_[i]; 911 } 912 } 913} 914 915uint64_t MIRGraph::GetDataFlowAttributes(Instruction::Code opcode) { 916 DCHECK_LT((size_t) opcode, (sizeof(oat_data_flow_attributes_) / sizeof(oat_data_flow_attributes_[0]))); 917 return oat_data_flow_attributes_[opcode]; 918} 919 920uint64_t MIRGraph::GetDataFlowAttributes(MIR* mir) { 921 DCHECK(mir != nullptr); 922 Instruction::Code opcode = mir->dalvikInsn.opcode; 923 return GetDataFlowAttributes(opcode); 924} 925 926// The path can easily surpass FS limits because of parameters etc. Use pathconf to get FS 927// restrictions here. Note that a successful invocation will return an actual value. If the path 928// is too long for some reason, the return will be ENAMETOOLONG. Then cut off part of the name. 929// 930// It's possible the path is not valid, or some other errors appear. In that case return false. 931static bool CreateDumpFile(std::string& fname, const char* dir_prefix, NarrowDexOffset start_offset, 932 const char *suffix, int nr, std::string* output) { 933 std::string dir = StringPrintf("./%s", dir_prefix); 934 int64_t max_name_length = pathconf(dir.c_str(), _PC_NAME_MAX); 935 if (max_name_length <= 0) { 936 PLOG(ERROR) << "Could not get file name restrictions for " << dir; 937 return false; 938 } 939 940 std::string name = StringPrintf("%s%x%s_%d.dot", fname.c_str(), start_offset, 941 suffix == nullptr ? "" : suffix, nr); 942 std::string fpath; 943 if (static_cast<int64_t>(name.size()) > max_name_length) { 944 std::string suffix_str = StringPrintf("_%d.dot", nr); 945 name = name.substr(0, static_cast<size_t>(max_name_length) - suffix_str.size()) + suffix_str; 946 } 947 // Sanity check. 948 DCHECK_LE(name.size(), static_cast<size_t>(max_name_length)); 949 950 *output = StringPrintf("%s%s", dir_prefix, name.c_str()); 951 return true; 952} 953 954// TODO: use a configurable base prefix, and adjust callers to supply pass name. 955/* Dump the CFG into a DOT graph */ 956void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suffix) { 957 FILE* file; 958 static AtomicInteger cnt(0); 959 960 // Increment counter to get a unique file number. 961 cnt++; 962 int nr = cnt.LoadRelaxed(); 963 964 std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file)); 965 ReplaceSpecialChars(fname); 966 std::string fpath; 967 if (!CreateDumpFile(fname, dir_prefix, GetBasicBlock(GetEntryBlock()->fall_through)->start_offset, 968 suffix, nr, &fpath)) { 969 LOG(ERROR) << "Could not create dump file name for " << fname; 970 return; 971 } 972 file = fopen(fpath.c_str(), "w"); 973 if (file == nullptr) { 974 PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG."; 975 return; 976 } 977 fprintf(file, "digraph G {\n"); 978 979 fprintf(file, " rankdir=TB\n"); 980 981 int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_; 982 int idx; 983 984 for (idx = 0; idx < num_blocks; idx++) { 985 int block_idx = all_blocks ? idx : dfs_order_[idx]; 986 BasicBlock* bb = GetBasicBlock(block_idx); 987 if (bb == nullptr) continue; 988 if (bb->block_type == kDead) continue; 989 if (bb->hidden) continue; 990 if (bb->block_type == kEntryBlock) { 991 fprintf(file, " entry_%d [shape=Mdiamond];\n", bb->id); 992 } else if (bb->block_type == kExitBlock) { 993 fprintf(file, " exit_%d [shape=Mdiamond];\n", bb->id); 994 } else if (bb->block_type == kDalvikByteCode) { 995 fprintf(file, " block%04x_%d [shape=record,label = \"{ \\\n", 996 bb->start_offset, bb->id); 997 const MIR* mir; 998 fprintf(file, " {block id %d\\l}%s\\\n", bb->id, 999 bb->first_mir_insn ? " | " : " "); 1000 for (mir = bb->first_mir_insn; mir; mir = mir->next) { 1001 int opcode = mir->dalvikInsn.opcode; 1002 fprintf(file, " {%04x %s %s %s %s %s %s %s %s %s\\l}%s\\\n", mir->offset, 1003 mir->ssa_rep ? GetDalvikDisassembly(mir) : 1004 !MIR::DecodedInstruction::IsPseudoMirOp(opcode) ? 1005 Instruction::Name(mir->dalvikInsn.opcode) : 1006 extended_mir_op_names_[opcode - kMirOpFirst], 1007 (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ", 1008 (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ", 1009 (mir->optimization_flags & MIR_IGNORE_SUSPEND_CHECK) != 0 ? " no_suspendcheck" : " ", 1010 (mir->optimization_flags & MIR_STORE_NON_TEMPORAL) != 0 ? " non_temporal" : " ", 1011 (mir->optimization_flags & MIR_CALLEE) != 0 ? " inlined" : " ", 1012 (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0 ? " cl_inited" : " ", 1013 (mir->optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0 ? " cl_in_cache" : " ", 1014 (mir->optimization_flags & MIR_IGNORE_DIV_ZERO_CHECK) != 0 ? " no_div_check" : " ", 1015 mir->next ? " | " : " "); 1016 } 1017 fprintf(file, " }\"];\n\n"); 1018 } else if (bb->block_type == kExceptionHandling) { 1019 char block_name[BLOCK_NAME_LEN]; 1020 1021 GetBlockName(bb, block_name); 1022 fprintf(file, " %s [shape=invhouse];\n", block_name); 1023 } 1024 1025 char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN]; 1026 1027 if (bb->taken != NullBasicBlockId) { 1028 GetBlockName(bb, block_name1); 1029 GetBlockName(GetBasicBlock(bb->taken), block_name2); 1030 fprintf(file, " %s:s -> %s:n [style=dotted]\n", 1031 block_name1, block_name2); 1032 } 1033 if (bb->fall_through != NullBasicBlockId) { 1034 GetBlockName(bb, block_name1); 1035 GetBlockName(GetBasicBlock(bb->fall_through), block_name2); 1036 fprintf(file, " %s:s -> %s:n\n", block_name1, block_name2); 1037 } 1038 1039 if (bb->successor_block_list_type != kNotUsed) { 1040 fprintf(file, " succ%04x_%d [shape=%s,label = \"{ \\\n", 1041 bb->start_offset, bb->id, 1042 (bb->successor_block_list_type == kCatch) ? "Mrecord" : "record"); 1043 1044 int last_succ_id = static_cast<int>(bb->successor_blocks.size() - 1u); 1045 int succ_id = 0; 1046 for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) { 1047 BasicBlock* dest_block = GetBasicBlock(successor_block_info->block); 1048 fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n", 1049 succ_id, 1050 successor_block_info->key, 1051 dest_block->start_offset, 1052 (succ_id != last_succ_id) ? " | " : " "); 1053 ++succ_id; 1054 } 1055 fprintf(file, " }\"];\n\n"); 1056 1057 GetBlockName(bb, block_name1); 1058 fprintf(file, " %s:s -> succ%04x_%d:n [style=dashed]\n", 1059 block_name1, bb->start_offset, bb->id); 1060 1061 // Link the successor pseudo-block with all of its potential targets. 1062 succ_id = 0; 1063 for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) { 1064 BasicBlock* dest_block = GetBasicBlock(successor_block_info->block); 1065 1066 GetBlockName(dest_block, block_name2); 1067 fprintf(file, " succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset, 1068 bb->id, succ_id++, block_name2); 1069 } 1070 } 1071 fprintf(file, "\n"); 1072 1073 if (cu_->verbose) { 1074 /* Display the dominator tree */ 1075 GetBlockName(bb, block_name1); 1076 fprintf(file, " cfg%s [label=\"%s\", shape=none];\n", 1077 block_name1, block_name1); 1078 if (bb->i_dom) { 1079 GetBlockName(GetBasicBlock(bb->i_dom), block_name2); 1080 fprintf(file, " cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1); 1081 } 1082 } 1083 } 1084 fprintf(file, "}\n"); 1085 fclose(file); 1086} 1087 1088/* Insert an MIR instruction to the end of a basic block. */ 1089void BasicBlock::AppendMIR(MIR* mir) { 1090 // Insert it after the last MIR. 1091 InsertMIRListAfter(last_mir_insn, mir, mir); 1092} 1093 1094void BasicBlock::AppendMIRList(MIR* first_list_mir, MIR* last_list_mir) { 1095 // Insert it after the last MIR. 1096 InsertMIRListAfter(last_mir_insn, first_list_mir, last_list_mir); 1097} 1098 1099void BasicBlock::AppendMIRList(const std::vector<MIR*>& insns) { 1100 for (std::vector<MIR*>::const_iterator it = insns.begin(); it != insns.end(); it++) { 1101 MIR* new_mir = *it; 1102 1103 // Add a copy of each MIR. 1104 InsertMIRListAfter(last_mir_insn, new_mir, new_mir); 1105 } 1106} 1107 1108/* Insert a MIR instruction after the specified MIR. */ 1109void BasicBlock::InsertMIRAfter(MIR* current_mir, MIR* new_mir) { 1110 InsertMIRListAfter(current_mir, new_mir, new_mir); 1111} 1112 1113void BasicBlock::InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir) { 1114 // If no MIR, we are done. 1115 if (first_list_mir == nullptr || last_list_mir == nullptr) { 1116 return; 1117 } 1118 1119 // If insert_after is null, assume BB is empty. 1120 if (insert_after == nullptr) { 1121 first_mir_insn = first_list_mir; 1122 last_mir_insn = last_list_mir; 1123 last_list_mir->next = nullptr; 1124 } else { 1125 MIR* after_list = insert_after->next; 1126 insert_after->next = first_list_mir; 1127 last_list_mir->next = after_list; 1128 if (after_list == nullptr) { 1129 last_mir_insn = last_list_mir; 1130 } 1131 } 1132 1133 // Set this BB to be the basic block of the MIRs. 1134 MIR* last = last_list_mir->next; 1135 for (MIR* mir = first_list_mir; mir != last; mir = mir->next) { 1136 mir->bb = id; 1137 } 1138} 1139 1140/* Insert an MIR instruction to the head of a basic block. */ 1141void BasicBlock::PrependMIR(MIR* mir) { 1142 InsertMIRListBefore(first_mir_insn, mir, mir); 1143} 1144 1145void BasicBlock::PrependMIRList(MIR* first_list_mir, MIR* last_list_mir) { 1146 // Insert it before the first MIR. 1147 InsertMIRListBefore(first_mir_insn, first_list_mir, last_list_mir); 1148} 1149 1150void BasicBlock::PrependMIRList(const std::vector<MIR*>& to_add) { 1151 for (std::vector<MIR*>::const_iterator it = to_add.begin(); it != to_add.end(); it++) { 1152 MIR* mir = *it; 1153 1154 InsertMIRListBefore(first_mir_insn, mir, mir); 1155 } 1156} 1157 1158/* Insert a MIR instruction before the specified MIR. */ 1159void BasicBlock::InsertMIRBefore(MIR* current_mir, MIR* new_mir) { 1160 // Insert as a single element list. 1161 return InsertMIRListBefore(current_mir, new_mir, new_mir); 1162} 1163 1164MIR* BasicBlock::FindPreviousMIR(MIR* mir) { 1165 MIR* current = first_mir_insn; 1166 1167 while (current != nullptr) { 1168 MIR* next = current->next; 1169 1170 if (next == mir) { 1171 return current; 1172 } 1173 1174 current = next; 1175 } 1176 1177 return nullptr; 1178} 1179 1180void BasicBlock::InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir) { 1181 // If no MIR, we are done. 1182 if (first_list_mir == nullptr || last_list_mir == nullptr) { 1183 return; 1184 } 1185 1186 // If insert_before is null, assume BB is empty. 1187 if (insert_before == nullptr) { 1188 first_mir_insn = first_list_mir; 1189 last_mir_insn = last_list_mir; 1190 last_list_mir->next = nullptr; 1191 } else { 1192 if (first_mir_insn == insert_before) { 1193 last_list_mir->next = first_mir_insn; 1194 first_mir_insn = first_list_mir; 1195 } else { 1196 // Find the preceding MIR. 1197 MIR* before_list = FindPreviousMIR(insert_before); 1198 DCHECK(before_list != nullptr); 1199 before_list->next = first_list_mir; 1200 last_list_mir->next = insert_before; 1201 } 1202 } 1203 1204 // Set this BB to be the basic block of the MIRs. 1205 for (MIR* mir = first_list_mir; mir != last_list_mir->next; mir = mir->next) { 1206 mir->bb = id; 1207 } 1208} 1209 1210bool BasicBlock::RemoveMIR(MIR* mir) { 1211 // Remove as a single element list. 1212 return RemoveMIRList(mir, mir); 1213} 1214 1215bool BasicBlock::RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir) { 1216 if (first_list_mir == nullptr) { 1217 return false; 1218 } 1219 1220 // Try to find the MIR. 1221 MIR* before_list = nullptr; 1222 MIR* after_list = nullptr; 1223 1224 // If we are removing from the beginning of the MIR list. 1225 if (first_mir_insn == first_list_mir) { 1226 before_list = nullptr; 1227 } else { 1228 before_list = FindPreviousMIR(first_list_mir); 1229 if (before_list == nullptr) { 1230 // We did not find the mir. 1231 return false; 1232 } 1233 } 1234 1235 // Remove the BB information and also find the after_list. 1236 for (MIR* mir = first_list_mir; mir != last_list_mir->next; mir = mir->next) { 1237 mir->bb = NullBasicBlockId; 1238 } 1239 1240 after_list = last_list_mir->next; 1241 1242 // If there is nothing before the list, after_list is the first_mir. 1243 if (before_list == nullptr) { 1244 first_mir_insn = after_list; 1245 } else { 1246 before_list->next = after_list; 1247 } 1248 1249 // If there is nothing after the list, before_list is last_mir. 1250 if (after_list == nullptr) { 1251 last_mir_insn = before_list; 1252 } 1253 1254 return true; 1255} 1256 1257MIR* BasicBlock::GetFirstNonPhiInsn() { 1258 MIR* mir = first_mir_insn; 1259 while (mir != nullptr && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) { 1260 mir = mir->next; 1261 } 1262 return mir; 1263} 1264 1265MIR* BasicBlock::GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current) { 1266 MIR* next_mir = nullptr; 1267 1268 if (current != nullptr) { 1269 next_mir = current->next; 1270 } 1271 1272 if (next_mir == nullptr) { 1273 // Only look for next MIR that follows unconditionally. 1274 if ((taken == NullBasicBlockId) && (fall_through != NullBasicBlockId)) { 1275 next_mir = mir_graph->GetBasicBlock(fall_through)->first_mir_insn; 1276 } 1277 } 1278 1279 return next_mir; 1280} 1281 1282static void FillTypeSizeString(uint32_t type_size, std::string* decoded_mir) { 1283 DCHECK(decoded_mir != nullptr); 1284 OpSize type = static_cast<OpSize>(type_size >> 16); 1285 uint16_t vect_size = (type_size & 0xFFFF); 1286 1287 // Now print the type and vector size. 1288 std::stringstream ss; 1289 ss << " (type:"; 1290 ss << type; 1291 ss << " vectsize:"; 1292 ss << vect_size; 1293 ss << ")"; 1294 1295 decoded_mir->append(ss.str()); 1296} 1297 1298void MIRGraph::DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir) { 1299 DCHECK(decoded_mir != nullptr); 1300 int opcode = mir->dalvikInsn.opcode; 1301 SSARepresentation* ssa_rep = mir->ssa_rep; 1302 int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0; 1303 int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0; 1304 1305 if (opcode < kMirOpFirst) { 1306 return; // It is not an extended instruction. 1307 } 1308 1309 decoded_mir->append(extended_mir_op_names_[opcode - kMirOpFirst]); 1310 1311 switch (opcode) { 1312 case kMirOpPhi: { 1313 if (defs > 0 && uses > 0) { 1314 BasicBlockId* incoming = mir->meta.phi_incoming; 1315 decoded_mir->append(StringPrintf(" %s = (%s", 1316 GetSSANameWithConst(ssa_rep->defs[0], true).c_str(), 1317 GetSSANameWithConst(ssa_rep->uses[0], true).c_str())); 1318 decoded_mir->append(StringPrintf(":%d", incoming[0])); 1319 for (int i = 1; i < uses; i++) { 1320 decoded_mir->append(StringPrintf(", %s:%d", GetSSANameWithConst(ssa_rep->uses[i], true).c_str(), incoming[i])); 1321 } 1322 decoded_mir->append(")"); 1323 } 1324 break; 1325 } 1326 case kMirOpCopy: 1327 if (ssa_rep != nullptr) { 1328 decoded_mir->append(" "); 1329 decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false)); 1330 if (defs > 1) { 1331 decoded_mir->append(", "); 1332 decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false)); 1333 } 1334 decoded_mir->append(" = "); 1335 decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[0], false)); 1336 if (uses > 1) { 1337 decoded_mir->append(", "); 1338 decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false)); 1339 } 1340 } else { 1341 decoded_mir->append(StringPrintf(" v%d = v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1342 } 1343 break; 1344 case kMirOpFusedCmplFloat: 1345 case kMirOpFusedCmpgFloat: 1346 case kMirOpFusedCmplDouble: 1347 case kMirOpFusedCmpgDouble: 1348 case kMirOpFusedCmpLong: 1349 if (ssa_rep != nullptr) { 1350 decoded_mir->append(" "); 1351 decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[0], false)); 1352 for (int i = 1; i < uses; i++) { 1353 decoded_mir->append(", "); 1354 decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[i], false)); 1355 } 1356 } else { 1357 decoded_mir->append(StringPrintf(" v%d, v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1358 } 1359 break; 1360 case kMirOpMoveVector: 1361 decoded_mir->append(StringPrintf(" vect%d = vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1362 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1363 break; 1364 case kMirOpPackedAddition: 1365 decoded_mir->append(StringPrintf(" vect%d = vect%d + vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1366 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1367 break; 1368 case kMirOpPackedMultiply: 1369 decoded_mir->append(StringPrintf(" vect%d = vect%d * vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1370 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1371 break; 1372 case kMirOpPackedSubtract: 1373 decoded_mir->append(StringPrintf(" vect%d = vect%d - vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1374 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1375 break; 1376 case kMirOpPackedAnd: 1377 decoded_mir->append(StringPrintf(" vect%d = vect%d & vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1378 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1379 break; 1380 case kMirOpPackedOr: 1381 decoded_mir->append(StringPrintf(" vect%d = vect%d \\| vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1382 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1383 break; 1384 case kMirOpPackedXor: 1385 decoded_mir->append(StringPrintf(" vect%d = vect%d ^ vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1386 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1387 break; 1388 case kMirOpPackedShiftLeft: 1389 decoded_mir->append(StringPrintf(" vect%d = vect%d \\<\\< %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1390 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1391 break; 1392 case kMirOpPackedUnsignedShiftRight: 1393 decoded_mir->append(StringPrintf(" vect%d = vect%d \\>\\>\\> %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1394 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1395 break; 1396 case kMirOpPackedSignedShiftRight: 1397 decoded_mir->append(StringPrintf(" vect%d = vect%d \\>\\> %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1398 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1399 break; 1400 case kMirOpConstVector: 1401 decoded_mir->append(StringPrintf(" vect%d = %x, %x, %x, %x", mir->dalvikInsn.vA, mir->dalvikInsn.arg[0], 1402 mir->dalvikInsn.arg[1], mir->dalvikInsn.arg[2], mir->dalvikInsn.arg[3])); 1403 break; 1404 case kMirOpPackedSet: 1405 if (ssa_rep != nullptr) { 1406 decoded_mir->append(StringPrintf(" vect%d = %s", mir->dalvikInsn.vA, 1407 GetSSANameWithConst(ssa_rep->uses[0], false).c_str())); 1408 if (uses > 1) { 1409 decoded_mir->append(", "); 1410 decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false)); 1411 } 1412 } else { 1413 decoded_mir->append(StringPrintf(" vect%d = v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1414 } 1415 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1416 break; 1417 case kMirOpPackedAddReduce: 1418 if (ssa_rep != nullptr) { 1419 decoded_mir->append(" "); 1420 decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false)); 1421 if (defs > 1) { 1422 decoded_mir->append(", "); 1423 decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false)); 1424 } 1425 decoded_mir->append(StringPrintf(" = vect%d + %s", mir->dalvikInsn.vB, 1426 GetSSANameWithConst(ssa_rep->uses[0], false).c_str())); 1427 if (uses > 1) { 1428 decoded_mir->append(", "); 1429 decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false)); 1430 } 1431 } else { 1432 decoded_mir->append(StringPrintf("v%d = vect%d + v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB, mir->dalvikInsn.vA)); 1433 } 1434 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1435 break; 1436 case kMirOpPackedReduce: 1437 if (ssa_rep != nullptr) { 1438 decoded_mir->append(" "); 1439 decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false)); 1440 if (defs > 1) { 1441 decoded_mir->append(", "); 1442 decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false)); 1443 } 1444 decoded_mir->append(StringPrintf(" = vect%d (extr_idx:%d)", mir->dalvikInsn.vB, mir->dalvikInsn.arg[0])); 1445 } else { 1446 decoded_mir->append(StringPrintf(" v%d = vect%d (extr_idx:%d)", mir->dalvikInsn.vA, 1447 mir->dalvikInsn.vB, mir->dalvikInsn.arg[0])); 1448 } 1449 FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir); 1450 break; 1451 case kMirOpReserveVectorRegisters: 1452 case kMirOpReturnVectorRegisters: 1453 decoded_mir->append(StringPrintf(" vect%d - vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB)); 1454 break; 1455 case kMirOpMemBarrier: { 1456 decoded_mir->append(" type:"); 1457 std::stringstream ss; 1458 ss << static_cast<MemBarrierKind>(mir->dalvikInsn.vA); 1459 decoded_mir->append(ss.str()); 1460 break; 1461 } 1462 case kMirOpPackedArrayGet: 1463 case kMirOpPackedArrayPut: 1464 decoded_mir->append(StringPrintf(" vect%d", mir->dalvikInsn.vA)); 1465 if (ssa_rep != nullptr) { 1466 decoded_mir->append(StringPrintf(", %s[%s]", 1467 GetSSANameWithConst(ssa_rep->uses[0], false).c_str(), 1468 GetSSANameWithConst(ssa_rep->uses[1], false).c_str())); 1469 } else { 1470 decoded_mir->append(StringPrintf(", v%d[v%d]", mir->dalvikInsn.vB, mir->dalvikInsn.vC)); 1471 } 1472 FillTypeSizeString(mir->dalvikInsn.arg[0], decoded_mir); 1473 break; 1474 case kMirOpMaddInt: 1475 case kMirOpMsubInt: 1476 case kMirOpMaddLong: 1477 case kMirOpMsubLong: 1478 if (ssa_rep != nullptr) { 1479 decoded_mir->append(" "); 1480 decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false)); 1481 if (defs > 1) { 1482 decoded_mir->append(", "); 1483 decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false)); 1484 } 1485 for (int i = 0; i < uses; i++) { 1486 decoded_mir->append(", "); 1487 decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[i], false)); 1488 } 1489 } else { 1490 decoded_mir->append(StringPrintf(" v%d, v%d, v%d, v%d", 1491 mir->dalvikInsn.vA, mir->dalvikInsn.vB, 1492 mir->dalvikInsn.vC, mir->dalvikInsn.arg[0])); 1493 } 1494 break; 1495 default: 1496 break; 1497 } 1498} 1499 1500char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { 1501 MIR::DecodedInstruction insn = mir->dalvikInsn; 1502 std::string str; 1503 int flags = 0; 1504 int opcode = insn.opcode; 1505 char* ret; 1506 bool nop = false; 1507 SSARepresentation* ssa_rep = mir->ssa_rep; 1508 Instruction::Format dalvik_format = Instruction::k10x; // Default to no-operand format. 1509 1510 // Handle special cases that recover the original dalvik instruction. 1511 if (opcode == kMirOpCheck) { 1512 str.append(extended_mir_op_names_[opcode - kMirOpFirst]); 1513 str.append(": "); 1514 // Recover the original Dex instruction. 1515 insn = mir->meta.throw_insn->dalvikInsn; 1516 ssa_rep = mir->meta.throw_insn->ssa_rep; 1517 opcode = insn.opcode; 1518 } else if (opcode == kMirOpNop) { 1519 str.append("["); 1520 if (mir->offset < current_code_item_->insns_size_in_code_units_) { 1521 // Recover original opcode. 1522 insn.opcode = Instruction::At(current_code_item_->insns_ + mir->offset)->Opcode(); 1523 opcode = insn.opcode; 1524 } 1525 nop = true; 1526 } 1527 int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0; 1528 int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0; 1529 1530 if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { 1531 // Note that this does not check the MIR's opcode in all cases. In cases where it 1532 // recovered dalvik instruction, it uses opcode of that instead of the extended one. 1533 DisassembleExtendedInstr(mir, &str); 1534 } else { 1535 dalvik_format = Instruction::FormatOf(insn.opcode); 1536 flags = insn.FlagsOf(); 1537 str.append(Instruction::Name(insn.opcode)); 1538 1539 // For invokes-style formats, treat wide regs as a pair of singles. 1540 bool show_singles = ((dalvik_format == Instruction::k35c) || 1541 (dalvik_format == Instruction::k3rc)); 1542 if (defs != 0) { 1543 str.append(" "); 1544 str.append(GetSSANameWithConst(ssa_rep->defs[0], false)); 1545 if (defs > 1) { 1546 str.append(", "); 1547 str.append(GetSSANameWithConst(ssa_rep->defs[1], false)); 1548 } 1549 if (uses != 0) { 1550 str.append(", "); 1551 } 1552 } 1553 for (int i = 0; i < uses; i++) { 1554 str.append(" "); 1555 str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles)); 1556 if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) { 1557 // For the listing, skip the high sreg. 1558 i++; 1559 } 1560 if (i != (uses - 1)) { 1561 str.append(","); 1562 } 1563 } 1564 1565 switch (dalvik_format) { 1566 case Instruction::k11n: // Add one immediate from vB. 1567 case Instruction::k21s: 1568 case Instruction::k31i: 1569 case Instruction::k21h: 1570 str.append(StringPrintf(", #0x%x", insn.vB)); 1571 break; 1572 case Instruction::k51l: // Add one wide immediate. 1573 str.append(StringPrintf(", #%" PRId64, insn.vB_wide)); 1574 break; 1575 case Instruction::k21c: // One register, one string/type/method index. 1576 case Instruction::k31c: 1577 str.append(StringPrintf(", index #0x%x", insn.vB)); 1578 break; 1579 case Instruction::k22c: // Two registers, one string/type/method index. 1580 str.append(StringPrintf(", index #0x%x", insn.vC)); 1581 break; 1582 case Instruction::k22s: // Add one immediate from vC. 1583 case Instruction::k22b: 1584 str.append(StringPrintf(", #0x%x", insn.vC)); 1585 break; 1586 default: 1587 // Nothing left to print. 1588 break; 1589 } 1590 1591 if ((flags & Instruction::kBranch) != 0) { 1592 // For branches, decode the instructions to print out the branch targets. 1593 int offset = 0; 1594 switch (dalvik_format) { 1595 case Instruction::k21t: 1596 offset = insn.vB; 1597 break; 1598 case Instruction::k22t: 1599 offset = insn.vC; 1600 break; 1601 case Instruction::k10t: 1602 case Instruction::k20t: 1603 case Instruction::k30t: 1604 offset = insn.vA; 1605 break; 1606 default: 1607 LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode; 1608 break; 1609 } 1610 str.append(StringPrintf(", 0x%x (%c%x)", mir->offset + offset, 1611 offset > 0 ? '+' : '-', offset > 0 ? offset : -offset)); 1612 } 1613 1614 if (nop) { 1615 str.append("]--optimized away"); 1616 } 1617 } 1618 int length = str.length() + 1; 1619 ret = arena_->AllocArray<char>(length, kArenaAllocDFInfo); 1620 strncpy(ret, str.c_str(), length); 1621 return ret; 1622} 1623 1624/* Turn method name into a legal Linux file name */ 1625void MIRGraph::ReplaceSpecialChars(std::string& str) { 1626 static const struct { const char before; const char after; } match[] = { 1627 {'/', '-'}, {';', '#'}, {' ', '#'}, {'$', '+'}, 1628 {'(', '@'}, {')', '@'}, {'<', '='}, {'>', '='} 1629 }; 1630 for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) { 1631 std::replace(str.begin(), str.end(), match[i].before, match[i].after); 1632 } 1633} 1634 1635std::string MIRGraph::GetSSAName(int ssa_reg) { 1636 // TODO: This value is needed for debugging. Currently, we compute this and then copy to the 1637 // arena. We should be smarter and just place straight into the arena, or compute the 1638 // value more lazily. 1639 int vreg = SRegToVReg(ssa_reg); 1640 if (vreg >= static_cast<int>(GetFirstTempVR())) { 1641 return StringPrintf("t%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg)); 1642 } else { 1643 return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg)); 1644 } 1645} 1646 1647// Similar to GetSSAName, but if ssa name represents an immediate show that as well. 1648std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) { 1649 if (reg_location_ == nullptr) { 1650 // Pre-SSA - just use the standard name. 1651 return GetSSAName(ssa_reg); 1652 } 1653 if (IsConst(reg_location_[ssa_reg])) { 1654 if (!singles_only && reg_location_[ssa_reg].wide && 1655 !reg_location_[ssa_reg].high_word) { 1656 return StringPrintf("v%d_%d#0x%" PRIx64, SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg), 1657 ConstantValueWide(reg_location_[ssa_reg])); 1658 } else { 1659 return StringPrintf("v%d_%d#0x%x", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg), 1660 ConstantValue(reg_location_[ssa_reg])); 1661 } 1662 } else { 1663 int vreg = SRegToVReg(ssa_reg); 1664 if (vreg >= static_cast<int>(GetFirstTempVR())) { 1665 return StringPrintf("t%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg)); 1666 } else { 1667 return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg)); 1668 } 1669 } 1670} 1671 1672void MIRGraph::GetBlockName(BasicBlock* bb, char* name) { 1673 switch (bb->block_type) { 1674 case kEntryBlock: 1675 snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id); 1676 break; 1677 case kExitBlock: 1678 snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id); 1679 break; 1680 case kDalvikByteCode: 1681 snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id); 1682 break; 1683 case kExceptionHandling: 1684 snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset, 1685 bb->id); 1686 break; 1687 default: 1688 snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id); 1689 break; 1690 } 1691} 1692 1693const char* MIRGraph::GetShortyFromMethodReference(const MethodReference& target_method) { 1694 const DexFile::MethodId& method_id = 1695 target_method.dex_file->GetMethodId(target_method.dex_method_index); 1696 return target_method.dex_file->GetShorty(method_id.proto_idx_); 1697} 1698 1699/* Debug Utility - dump a compilation unit */ 1700void MIRGraph::DumpMIRGraph() { 1701 const char* block_type_names[] = { 1702 "Null Block", 1703 "Entry Block", 1704 "Code Block", 1705 "Exit Block", 1706 "Exception Handling", 1707 "Catch Block" 1708 }; 1709 1710 LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 1711 LOG(INFO) << GetInsns(0) << " insns"; 1712 LOG(INFO) << GetNumBlocks() << " blocks in total"; 1713 1714 for (BasicBlock* bb : block_list_) { 1715 LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)", 1716 bb->id, 1717 block_type_names[bb->block_type], 1718 bb->start_offset, 1719 bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset, 1720 bb->last_mir_insn ? "" : " empty"); 1721 if (bb->taken != NullBasicBlockId) { 1722 LOG(INFO) << " Taken branch: block " << bb->taken 1723 << "(0x" << std::hex << GetBasicBlock(bb->taken)->start_offset << ")"; 1724 } 1725 if (bb->fall_through != NullBasicBlockId) { 1726 LOG(INFO) << " Fallthrough : block " << bb->fall_through 1727 << " (0x" << std::hex << GetBasicBlock(bb->fall_through)->start_offset << ")"; 1728 } 1729 } 1730} 1731 1732/* 1733 * Build an array of location records for the incoming arguments. 1734 * Note: one location record per word of arguments, with dummy 1735 * high-word loc for wide arguments. Also pull up any following 1736 * MOVE_RESULT and incorporate it into the invoke. 1737 */ 1738CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range) { 1739 CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo), 1740 kArenaAllocMisc)); 1741 MIR* move_result_mir = FindMoveResult(bb, mir); 1742 if (move_result_mir == nullptr) { 1743 info->result.location = kLocInvalid; 1744 } else { 1745 info->result = GetRawDest(move_result_mir); 1746 move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop); 1747 } 1748 info->num_arg_words = mir->ssa_rep->num_uses; 1749 info->args = (info->num_arg_words == 0) ? nullptr : 1750 arena_->AllocArray<RegLocation>(info->num_arg_words, kArenaAllocMisc); 1751 for (size_t i = 0; i < info->num_arg_words; i++) { 1752 info->args[i] = GetRawSrc(mir, i); 1753 } 1754 info->opt_flags = mir->optimization_flags; 1755 info->type = type; 1756 info->is_range = is_range; 1757 if (IsInstructionQuickInvoke(mir->dalvikInsn.opcode)) { 1758 const auto& method_info = GetMethodLoweringInfo(mir); 1759 info->method_ref = method_info.GetTargetMethod(); 1760 } else { 1761 info->method_ref = MethodReference(GetCurrentDexCompilationUnit()->GetDexFile(), 1762 mir->dalvikInsn.vB); 1763 } 1764 info->index = mir->dalvikInsn.vB; 1765 info->offset = mir->offset; 1766 info->mir = mir; 1767 return info; 1768} 1769 1770// Allocate a new MIR. 1771MIR* MIRGraph::NewMIR() { 1772 MIR* mir = new (arena_) MIR(); 1773 return mir; 1774} 1775 1776// Allocate a new basic block. 1777BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) { 1778 BasicBlock* bb = new (arena_) BasicBlock(block_id, block_type, arena_); 1779 1780 // TUNING: better estimate of the exit block predecessors? 1781 bb->predecessors.reserve((block_type == kExitBlock) ? 2048 : 2); 1782 block_id_map_.Put(block_id, block_id); 1783 return bb; 1784} 1785 1786void MIRGraph::InitializeConstantPropagation() { 1787 is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false); 1788 constant_values_ = arena_->AllocArray<int>(GetNumSSARegs(), kArenaAllocDFInfo); 1789} 1790 1791void MIRGraph::InitializeMethodUses() { 1792 // The gate starts by initializing the use counts. 1793 int num_ssa_regs = GetNumSSARegs(); 1794 use_counts_.clear(); 1795 use_counts_.reserve(num_ssa_regs + 32); 1796 use_counts_.resize(num_ssa_regs, 0u); 1797 raw_use_counts_.clear(); 1798 raw_use_counts_.reserve(num_ssa_regs + 32); 1799 raw_use_counts_.resize(num_ssa_regs, 0u); 1800} 1801 1802void MIRGraph::SSATransformationStart() { 1803 DCHECK(temp_scoped_alloc_.get() == nullptr); 1804 temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack)); 1805 temp_.ssa.num_vregs = GetNumOfCodeAndTempVRs(); 1806 temp_.ssa.work_live_vregs = new (temp_scoped_alloc_.get()) ArenaBitVector( 1807 temp_scoped_alloc_.get(), temp_.ssa.num_vregs, false, kBitMapRegisterV); 1808} 1809 1810void MIRGraph::SSATransformationEnd() { 1811 // Verify the dataflow information after the pass. 1812 if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) { 1813 VerifyDataflow(); 1814 } 1815 1816 temp_.ssa.num_vregs = 0u; 1817 temp_.ssa.work_live_vregs = nullptr; 1818 DCHECK(temp_.ssa.def_block_matrix == nullptr); 1819 temp_.ssa.phi_node_blocks = nullptr; 1820 DCHECK(temp_scoped_alloc_.get() != nullptr); 1821 temp_scoped_alloc_.reset(); 1822 1823 // Update the maximum number of reachable blocks. 1824 max_num_reachable_blocks_ = num_reachable_blocks_; 1825 1826 // Mark MIR SSA representations as up to date. 1827 mir_ssa_rep_up_to_date_ = true; 1828} 1829 1830size_t MIRGraph::GetNumDalvikInsns() const { 1831 size_t cumulative_size = 0u; 1832 bool counted_current_item = false; 1833 const uint8_t size_for_null_code_item = 2u; 1834 1835 for (auto it : m_units_) { 1836 const DexFile::CodeItem* code_item = it->GetCodeItem(); 1837 // Even if the code item is null, we still count non-zero value so that 1838 // each m_unit is counted as having impact. 1839 cumulative_size += (code_item == nullptr ? 1840 size_for_null_code_item : code_item->insns_size_in_code_units_); 1841 if (code_item == current_code_item_) { 1842 counted_current_item = true; 1843 } 1844 } 1845 1846 // If the current code item was not counted yet, count it now. 1847 // This can happen for example in unit tests where some fields like m_units_ 1848 // are not initialized. 1849 if (counted_current_item == false) { 1850 cumulative_size += (current_code_item_ == nullptr ? 1851 size_for_null_code_item : current_code_item_->insns_size_in_code_units_); 1852 } 1853 1854 return cumulative_size; 1855} 1856 1857static BasicBlock* SelectTopologicalSortOrderFallBack( 1858 MIRGraph* mir_graph, const ArenaBitVector* current_loop, 1859 const ScopedArenaVector<size_t>* visited_cnt_values, ScopedArenaAllocator* allocator, 1860 ScopedArenaVector<BasicBlockId>* tmp_stack) { 1861 // No true loop head has been found but there may be true loop heads after the mess we need 1862 // to resolve. To avoid taking one of those, pick the candidate with the highest number of 1863 // reachable unvisited nodes. That candidate will surely be a part of a loop. 1864 BasicBlock* fall_back = nullptr; 1865 size_t fall_back_num_reachable = 0u; 1866 // Reuse the same bit vector for each candidate to mark reachable unvisited blocks. 1867 ArenaBitVector candidate_reachable(allocator, mir_graph->GetNumBlocks(), false, kBitMapMisc); 1868 AllNodesIterator iter(mir_graph); 1869 for (BasicBlock* candidate = iter.Next(); candidate != nullptr; candidate = iter.Next()) { 1870 if (candidate->hidden || // Hidden, or 1871 candidate->visited || // already processed, or 1872 (*visited_cnt_values)[candidate->id] == 0u || // no processed predecessors, or 1873 (current_loop != nullptr && // outside current loop. 1874 !current_loop->IsBitSet(candidate->id))) { 1875 continue; 1876 } 1877 DCHECK(tmp_stack->empty()); 1878 tmp_stack->push_back(candidate->id); 1879 candidate_reachable.ClearAllBits(); 1880 size_t num_reachable = 0u; 1881 while (!tmp_stack->empty()) { 1882 BasicBlockId current_id = tmp_stack->back(); 1883 tmp_stack->pop_back(); 1884 BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id); 1885 DCHECK(current_bb != nullptr); 1886 ChildBlockIterator child_iter(current_bb, mir_graph); 1887 BasicBlock* child_bb = child_iter.Next(); 1888 for ( ; child_bb != nullptr; child_bb = child_iter.Next()) { 1889 DCHECK(!child_bb->hidden); 1890 if (child_bb->visited || // Already processed, or 1891 (current_loop != nullptr && // outside current loop. 1892 !current_loop->IsBitSet(child_bb->id))) { 1893 continue; 1894 } 1895 if (!candidate_reachable.IsBitSet(child_bb->id)) { 1896 candidate_reachable.SetBit(child_bb->id); 1897 tmp_stack->push_back(child_bb->id); 1898 num_reachable += 1u; 1899 } 1900 } 1901 } 1902 if (fall_back_num_reachable < num_reachable) { 1903 fall_back_num_reachable = num_reachable; 1904 fall_back = candidate; 1905 } 1906 } 1907 return fall_back; 1908} 1909 1910// Compute from which unvisited blocks is bb_id reachable through unvisited blocks. 1911static void ComputeUnvisitedReachableFrom(MIRGraph* mir_graph, BasicBlockId bb_id, 1912 ArenaBitVector* reachable, 1913 ScopedArenaVector<BasicBlockId>* tmp_stack) { 1914 // NOTE: Loop heads indicated by the "visited" flag. 1915 DCHECK(tmp_stack->empty()); 1916 reachable->ClearAllBits(); 1917 tmp_stack->push_back(bb_id); 1918 while (!tmp_stack->empty()) { 1919 BasicBlockId current_id = tmp_stack->back(); 1920 tmp_stack->pop_back(); 1921 BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id); 1922 DCHECK(current_bb != nullptr); 1923 for (BasicBlockId pred_id : current_bb->predecessors) { 1924 BasicBlock* pred_bb = mir_graph->GetBasicBlock(pred_id); 1925 DCHECK(pred_bb != nullptr); 1926 if (!pred_bb->visited && !reachable->IsBitSet(pred_bb->id)) { 1927 reachable->SetBit(pred_bb->id); 1928 tmp_stack->push_back(pred_bb->id); 1929 } 1930 } 1931 } 1932} 1933 1934void MIRGraph::ComputeTopologicalSortOrder() { 1935 ScopedArenaAllocator allocator(&cu_->arena_stack); 1936 unsigned int num_blocks = GetNumBlocks(); 1937 1938 ScopedArenaQueue<BasicBlock*> q(allocator.Adapter()); 1939 ScopedArenaVector<size_t> visited_cnt_values(num_blocks, 0u, allocator.Adapter()); 1940 ScopedArenaVector<BasicBlockId> loop_head_stack(allocator.Adapter()); 1941 size_t max_nested_loops = 0u; 1942 ArenaBitVector loop_exit_blocks(&allocator, num_blocks, false, kBitMapMisc); 1943 loop_exit_blocks.ClearAllBits(); 1944 1945 // Count the number of blocks to process and add the entry block(s). 1946 unsigned int num_blocks_to_process = 0u; 1947 for (BasicBlock* bb : block_list_) { 1948 if (bb->hidden == true) { 1949 continue; 1950 } 1951 1952 num_blocks_to_process += 1u; 1953 1954 if (bb->predecessors.size() == 0u) { 1955 // Add entry block to the queue. 1956 q.push(bb); 1957 } 1958 } 1959 1960 // Clear the topological order arrays. 1961 topological_order_.clear(); 1962 topological_order_.reserve(num_blocks); 1963 topological_order_loop_ends_.clear(); 1964 topological_order_loop_ends_.resize(num_blocks, 0u); 1965 topological_order_indexes_.clear(); 1966 topological_order_indexes_.resize(num_blocks, static_cast<uint16_t>(-1)); 1967 1968 // Mark all blocks as unvisited. 1969 ClearAllVisitedFlags(); 1970 1971 // For loop heads, keep track from which blocks they are reachable not going through other 1972 // loop heads. Other loop heads are excluded to detect the heads of nested loops. The children 1973 // in this set go into the loop body, the other children are jumping over the loop. 1974 ScopedArenaVector<ArenaBitVector*> loop_head_reachable_from(allocator.Adapter()); 1975 loop_head_reachable_from.resize(num_blocks, nullptr); 1976 // Reuse the same temp stack whenever calculating a loop_head_reachable_from[loop_head_id]. 1977 ScopedArenaVector<BasicBlockId> tmp_stack(allocator.Adapter()); 1978 1979 while (num_blocks_to_process != 0u) { 1980 BasicBlock* bb = nullptr; 1981 if (!q.empty()) { 1982 num_blocks_to_process -= 1u; 1983 // Get top. 1984 bb = q.front(); 1985 q.pop(); 1986 if (bb->visited) { 1987 // Loop head: it was already processed, mark end and copy exit blocks to the queue. 1988 DCHECK(q.empty()) << PrettyMethod(cu_->method_idx, *cu_->dex_file); 1989 uint16_t idx = static_cast<uint16_t>(topological_order_.size()); 1990 topological_order_loop_ends_[topological_order_indexes_[bb->id]] = idx; 1991 DCHECK_EQ(loop_head_stack.back(), bb->id); 1992 loop_head_stack.pop_back(); 1993 ArenaBitVector* reachable = 1994 loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()]; 1995 for (BasicBlockId candidate_id : loop_exit_blocks.Indexes()) { 1996 if (reachable == nullptr || reachable->IsBitSet(candidate_id)) { 1997 q.push(GetBasicBlock(candidate_id)); 1998 // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again, 1999 // so clearing the bit has no effect on the iterator. 2000 loop_exit_blocks.ClearBit(candidate_id); 2001 } 2002 } 2003 continue; 2004 } 2005 } else { 2006 // Find the new loop head. 2007 AllNodesIterator iter(this); 2008 while (true) { 2009 BasicBlock* candidate = iter.Next(); 2010 if (candidate == nullptr) { 2011 // We did not find a true loop head, fall back to a reachable block in any loop. 2012 ArenaBitVector* current_loop = 2013 loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()]; 2014 bb = SelectTopologicalSortOrderFallBack(this, current_loop, &visited_cnt_values, 2015 &allocator, &tmp_stack); 2016 DCHECK(bb != nullptr) << PrettyMethod(cu_->method_idx, *cu_->dex_file); 2017 if (kIsDebugBuild && cu_->dex_file != nullptr) { 2018 LOG(INFO) << "Topological sort order: Using fall-back in " 2019 << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " BB #" << bb->id 2020 << " @0x" << std::hex << bb->start_offset 2021 << ", num_blocks = " << std::dec << num_blocks; 2022 } 2023 break; 2024 } 2025 if (candidate->hidden || // Hidden, or 2026 candidate->visited || // already processed, or 2027 visited_cnt_values[candidate->id] == 0u || // no processed predecessors, or 2028 (!loop_head_stack.empty() && // outside current loop. 2029 !loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(candidate->id))) { 2030 continue; 2031 } 2032 2033 for (BasicBlockId pred_id : candidate->predecessors) { 2034 BasicBlock* pred_bb = GetBasicBlock(pred_id); 2035 DCHECK(pred_bb != nullptr); 2036 if (pred_bb != candidate && !pred_bb->visited && 2037 !pred_bb->dominators->IsBitSet(candidate->id)) { 2038 candidate = nullptr; // Set candidate to null to indicate failure. 2039 break; 2040 } 2041 } 2042 if (candidate != nullptr) { 2043 bb = candidate; 2044 break; 2045 } 2046 } 2047 // Compute blocks from which the loop head is reachable and process those blocks first. 2048 ArenaBitVector* reachable = 2049 new (&allocator) ArenaBitVector(&allocator, num_blocks, false, kBitMapMisc); 2050 loop_head_reachable_from[bb->id] = reachable; 2051 ComputeUnvisitedReachableFrom(this, bb->id, reachable, &tmp_stack); 2052 // Now mark as loop head. (Even if it's only a fall back when we don't find a true loop.) 2053 loop_head_stack.push_back(bb->id); 2054 max_nested_loops = std::max(max_nested_loops, loop_head_stack.size()); 2055 } 2056 2057 DCHECK_EQ(bb->hidden, false); 2058 DCHECK_EQ(bb->visited, false); 2059 bb->visited = true; 2060 bb->nesting_depth = loop_head_stack.size(); 2061 2062 // Now add the basic block. 2063 uint16_t idx = static_cast<uint16_t>(topological_order_.size()); 2064 topological_order_indexes_[bb->id] = idx; 2065 topological_order_.push_back(bb->id); 2066 2067 // Update visited_cnt_values for children. 2068 ChildBlockIterator succIter(bb, this); 2069 BasicBlock* successor = succIter.Next(); 2070 for ( ; successor != nullptr; successor = succIter.Next()) { 2071 if (successor->hidden) { 2072 continue; 2073 } 2074 2075 // One more predecessor was visited. 2076 visited_cnt_values[successor->id] += 1u; 2077 if (visited_cnt_values[successor->id] == successor->predecessors.size()) { 2078 if (loop_head_stack.empty() || 2079 loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(successor->id)) { 2080 q.push(successor); 2081 } else { 2082 DCHECK(!loop_exit_blocks.IsBitSet(successor->id)); 2083 loop_exit_blocks.SetBit(successor->id); 2084 } 2085 } 2086 } 2087 } 2088 2089 // Prepare the loop head stack for iteration. 2090 topological_order_loop_head_stack_.clear(); 2091 topological_order_loop_head_stack_.reserve(max_nested_loops); 2092 max_nested_loops_ = max_nested_loops; 2093 topological_order_up_to_date_ = true; 2094} 2095 2096bool BasicBlock::IsExceptionBlock() const { 2097 if (block_type == kExceptionHandling) { 2098 return true; 2099 } 2100 return false; 2101} 2102 2103ChildBlockIterator::ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph) 2104 : basic_block_(bb), mir_graph_(mir_graph), visited_fallthrough_(false), 2105 visited_taken_(false), have_successors_(false) { 2106 // Check if we actually do have successors. 2107 if (basic_block_ != 0 && basic_block_->successor_block_list_type != kNotUsed) { 2108 have_successors_ = true; 2109 successor_iter_ = basic_block_->successor_blocks.cbegin(); 2110 } 2111} 2112 2113BasicBlock* ChildBlockIterator::Next() { 2114 // We check if we have a basic block. If we don't we cannot get next child. 2115 if (basic_block_ == nullptr) { 2116 return nullptr; 2117 } 2118 2119 // If we haven't visited fallthrough, return that. 2120 if (visited_fallthrough_ == false) { 2121 visited_fallthrough_ = true; 2122 2123 BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->fall_through); 2124 if (result != nullptr) { 2125 return result; 2126 } 2127 } 2128 2129 // If we haven't visited taken, return that. 2130 if (visited_taken_ == false) { 2131 visited_taken_ = true; 2132 2133 BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->taken); 2134 if (result != nullptr) { 2135 return result; 2136 } 2137 } 2138 2139 // We visited both taken and fallthrough. Now check if we have successors we need to visit. 2140 if (have_successors_ == true) { 2141 // Get information about next successor block. 2142 auto end = basic_block_->successor_blocks.cend(); 2143 while (successor_iter_ != end) { 2144 SuccessorBlockInfo* successor_block_info = *successor_iter_; 2145 ++successor_iter_; 2146 // If block was replaced by zero block, take next one. 2147 if (successor_block_info->block != NullBasicBlockId) { 2148 return mir_graph_->GetBasicBlock(successor_block_info->block); 2149 } 2150 } 2151 } 2152 2153 // We do not have anything. 2154 return nullptr; 2155} 2156 2157BasicBlock* BasicBlock::Copy(CompilationUnit* c_unit) { 2158 MIRGraph* mir_graph = c_unit->mir_graph.get(); 2159 return Copy(mir_graph); 2160} 2161 2162BasicBlock* BasicBlock::Copy(MIRGraph* mir_graph) { 2163 BasicBlock* result_bb = mir_graph->CreateNewBB(block_type); 2164 2165 // We don't do a memcpy style copy here because it would lead to a lot of things 2166 // to clean up. Let us do it by hand instead. 2167 // Copy in taken and fallthrough. 2168 result_bb->fall_through = fall_through; 2169 result_bb->taken = taken; 2170 2171 // Copy successor links if needed. 2172 ArenaAllocator* arena = mir_graph->GetArena(); 2173 2174 result_bb->successor_block_list_type = successor_block_list_type; 2175 if (result_bb->successor_block_list_type != kNotUsed) { 2176 result_bb->successor_blocks.reserve(successor_blocks.size()); 2177 for (SuccessorBlockInfo* sbi_old : successor_blocks) { 2178 SuccessorBlockInfo* sbi_new = static_cast<SuccessorBlockInfo*>( 2179 arena->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor)); 2180 memcpy(sbi_new, sbi_old, sizeof(SuccessorBlockInfo)); 2181 result_bb->successor_blocks.push_back(sbi_new); 2182 } 2183 } 2184 2185 // Copy offset, method. 2186 result_bb->start_offset = start_offset; 2187 2188 // Now copy instructions. 2189 for (MIR* mir = first_mir_insn; mir != 0; mir = mir->next) { 2190 // Get a copy first. 2191 MIR* copy = mir->Copy(mir_graph); 2192 2193 // Append it. 2194 result_bb->AppendMIR(copy); 2195 } 2196 2197 return result_bb; 2198} 2199 2200MIR* MIR::Copy(MIRGraph* mir_graph) { 2201 MIR* res = mir_graph->NewMIR(); 2202 *res = *this; 2203 2204 // Remove links 2205 res->next = nullptr; 2206 res->bb = NullBasicBlockId; 2207 res->ssa_rep = nullptr; 2208 2209 return res; 2210} 2211 2212MIR* MIR::Copy(CompilationUnit* c_unit) { 2213 return Copy(c_unit->mir_graph.get()); 2214} 2215 2216uint32_t SSARepresentation::GetStartUseIndex(Instruction::Code opcode) { 2217 // Default result. 2218 int res = 0; 2219 2220 // We are basically setting the iputs to their igets counterparts. 2221 switch (opcode) { 2222 case Instruction::IPUT: 2223 case Instruction::IPUT_OBJECT: 2224 case Instruction::IPUT_BOOLEAN: 2225 case Instruction::IPUT_BYTE: 2226 case Instruction::IPUT_CHAR: 2227 case Instruction::IPUT_SHORT: 2228 case Instruction::IPUT_QUICK: 2229 case Instruction::IPUT_OBJECT_QUICK: 2230 case Instruction::IPUT_BOOLEAN_QUICK: 2231 case Instruction::IPUT_BYTE_QUICK: 2232 case Instruction::IPUT_CHAR_QUICK: 2233 case Instruction::IPUT_SHORT_QUICK: 2234 case Instruction::APUT: 2235 case Instruction::APUT_OBJECT: 2236 case Instruction::APUT_BOOLEAN: 2237 case Instruction::APUT_BYTE: 2238 case Instruction::APUT_CHAR: 2239 case Instruction::APUT_SHORT: 2240 case Instruction::SPUT: 2241 case Instruction::SPUT_OBJECT: 2242 case Instruction::SPUT_BOOLEAN: 2243 case Instruction::SPUT_BYTE: 2244 case Instruction::SPUT_CHAR: 2245 case Instruction::SPUT_SHORT: 2246 // Skip the VR containing what to store. 2247 res = 1; 2248 break; 2249 case Instruction::IPUT_WIDE: 2250 case Instruction::IPUT_WIDE_QUICK: 2251 case Instruction::APUT_WIDE: 2252 case Instruction::SPUT_WIDE: 2253 // Skip the two VRs containing what to store. 2254 res = 2; 2255 break; 2256 default: 2257 // Do nothing in the general case. 2258 break; 2259 } 2260 2261 return res; 2262} 2263 2264/** 2265 * @brief Given a decoded instruction, it checks whether the instruction 2266 * sets a constant and if it does, more information is provided about the 2267 * constant being set. 2268 * @param ptr_value pointer to a 64-bit holder for the constant. 2269 * @param wide Updated by function whether a wide constant is being set by bytecode. 2270 * @return Returns false if the decoded instruction does not represent a constant bytecode. 2271 */ 2272bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const { 2273 bool sets_const = true; 2274 int64_t value = vB; 2275 2276 DCHECK(ptr_value != nullptr); 2277 DCHECK(wide != nullptr); 2278 2279 switch (opcode) { 2280 case Instruction::CONST_4: 2281 case Instruction::CONST_16: 2282 case Instruction::CONST: 2283 *wide = false; 2284 value <<= 32; // In order to get the sign extend. 2285 value >>= 32; 2286 break; 2287 case Instruction::CONST_HIGH16: 2288 *wide = false; 2289 value <<= 48; // In order to get the sign extend. 2290 value >>= 32; 2291 break; 2292 case Instruction::CONST_WIDE_16: 2293 case Instruction::CONST_WIDE_32: 2294 *wide = true; 2295 value <<= 32; // In order to get the sign extend. 2296 value >>= 32; 2297 break; 2298 case Instruction::CONST_WIDE: 2299 *wide = true; 2300 value = vB_wide; 2301 break; 2302 case Instruction::CONST_WIDE_HIGH16: 2303 *wide = true; 2304 value <<= 48; // In order to get the sign extend. 2305 break; 2306 default: 2307 sets_const = false; 2308 break; 2309 } 2310 2311 if (sets_const) { 2312 *ptr_value = value; 2313 } 2314 2315 return sets_const; 2316} 2317 2318void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) { 2319 // Reset flags for all MIRs in bb. 2320 for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) { 2321 mir->optimization_flags &= (~reset_flags); 2322 } 2323} 2324 2325void BasicBlock::Kill(MIRGraph* mir_graph) { 2326 for (BasicBlockId pred_id : predecessors) { 2327 BasicBlock* pred_bb = mir_graph->GetBasicBlock(pred_id); 2328 DCHECK(pred_bb != nullptr); 2329 2330 // Sadly we have to go through the children by hand here. 2331 pred_bb->ReplaceChild(id, NullBasicBlockId); 2332 } 2333 predecessors.clear(); 2334 2335 // Mark as dead and hidden. 2336 block_type = kDead; 2337 hidden = true; 2338 2339 // Detach it from its MIRs so we don't generate code for them. Also detached MIRs 2340 // are updated to know that they no longer have a parent. 2341 for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) { 2342 mir->bb = NullBasicBlockId; 2343 } 2344 first_mir_insn = nullptr; 2345 last_mir_insn = nullptr; 2346 2347 data_flow_info = nullptr; 2348 2349 // Erase this bb from all children's predecessors and kill unreachable children. 2350 ChildBlockIterator iter(this, mir_graph); 2351 for (BasicBlock* succ_bb = iter.Next(); succ_bb != nullptr; succ_bb = iter.Next()) { 2352 succ_bb->ErasePredecessor(id); 2353 } 2354 2355 // Remove links to children. 2356 fall_through = NullBasicBlockId; 2357 taken = NullBasicBlockId; 2358 successor_block_list_type = kNotUsed; 2359 2360 if (kIsDebugBuild) { 2361 if (catch_entry) { 2362 DCHECK_EQ(mir_graph->catches_.count(start_offset), 1u); 2363 mir_graph->catches_.erase(start_offset); 2364 } 2365 } 2366} 2367 2368bool BasicBlock::IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg) { 2369 // In order to determine if the ssa reg is live out, we scan all the MIRs. We remember 2370 // the last SSA number of the same dalvik register. At the end, if it is different than ssa_reg, 2371 // then it is not live out of this BB. 2372 int dalvik_reg = c_unit->mir_graph->SRegToVReg(ssa_reg); 2373 2374 int last_ssa_reg = -1; 2375 2376 // Walk through the MIRs backwards. 2377 for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) { 2378 // Get ssa rep. 2379 SSARepresentation *ssa_rep = mir->ssa_rep; 2380 2381 // Go through the defines for this MIR. 2382 for (int i = 0; i < ssa_rep->num_defs; i++) { 2383 DCHECK(ssa_rep->defs != nullptr); 2384 2385 // Get the ssa reg. 2386 int def_ssa_reg = ssa_rep->defs[i]; 2387 2388 // Get dalvik reg. 2389 int def_dalvik_reg = c_unit->mir_graph->SRegToVReg(def_ssa_reg); 2390 2391 // Compare dalvik regs. 2392 if (dalvik_reg == def_dalvik_reg) { 2393 // We found a def of the register that we are being asked about. 2394 // Remember it. 2395 last_ssa_reg = def_ssa_reg; 2396 } 2397 } 2398 } 2399 2400 if (last_ssa_reg == -1) { 2401 // If we get to this point we couldn't find a define of register user asked about. 2402 // Let's assume the user knows what he's doing so we can be safe and say that if we 2403 // couldn't find a def, it is live out. 2404 return true; 2405 } 2406 2407 // If it is not -1, we found a match, is it ssa_reg? 2408 return (ssa_reg == last_ssa_reg); 2409} 2410 2411bool BasicBlock::ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb) { 2412 // We need to check taken, fall_through, and successor_blocks to replace. 2413 bool found = false; 2414 if (taken == old_bb) { 2415 taken = new_bb; 2416 found = true; 2417 } 2418 2419 if (fall_through == old_bb) { 2420 fall_through = new_bb; 2421 found = true; 2422 } 2423 2424 if (successor_block_list_type != kNotUsed) { 2425 for (SuccessorBlockInfo* successor_block_info : successor_blocks) { 2426 if (successor_block_info->block == old_bb) { 2427 successor_block_info->block = new_bb; 2428 found = true; 2429 } 2430 } 2431 } 2432 2433 return found; 2434} 2435 2436void BasicBlock::ErasePredecessor(BasicBlockId old_pred) { 2437 auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred); 2438 DCHECK(pos != predecessors.end()); 2439 // It's faster to move the back() to *pos than erase(pos). 2440 *pos = predecessors.back(); 2441 predecessors.pop_back(); 2442 size_t idx = std::distance(predecessors.begin(), pos); 2443 for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) { 2444 if (static_cast<int>(mir->dalvikInsn.opcode) != kMirOpPhi) { 2445 break; 2446 } 2447 DCHECK_EQ(mir->ssa_rep->num_uses - 1u, predecessors.size()); 2448 DCHECK_EQ(mir->meta.phi_incoming[idx], old_pred); 2449 mir->meta.phi_incoming[idx] = mir->meta.phi_incoming[predecessors.size()]; 2450 mir->ssa_rep->uses[idx] = mir->ssa_rep->uses[predecessors.size()]; 2451 mir->ssa_rep->num_uses = predecessors.size(); 2452 } 2453} 2454 2455void BasicBlock::UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred) { 2456 DCHECK_NE(new_pred, NullBasicBlockId); 2457 auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred); 2458 DCHECK(pos != predecessors.end()); 2459 *pos = new_pred; 2460 size_t idx = std::distance(predecessors.begin(), pos); 2461 for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) { 2462 if (static_cast<int>(mir->dalvikInsn.opcode) != kMirOpPhi) { 2463 break; 2464 } 2465 DCHECK_EQ(mir->meta.phi_incoming[idx], old_pred); 2466 mir->meta.phi_incoming[idx] = new_pred; 2467 } 2468} 2469 2470// Create a new basic block with block_id as num_blocks_ that is 2471// post-incremented. 2472BasicBlock* MIRGraph::CreateNewBB(BBType block_type) { 2473 BasicBlockId id = static_cast<BasicBlockId>(block_list_.size()); 2474 BasicBlock* res = NewMemBB(block_type, id); 2475 block_list_.push_back(res); 2476 return res; 2477} 2478 2479void MIRGraph::CalculateBasicBlockInformation(const PassManager* const post_opt_pass_manager) { 2480 /* Create the pass driver and launch it */ 2481 PassDriverMEPostOpt driver(post_opt_pass_manager, cu_); 2482 driver.Launch(); 2483} 2484 2485int MIR::DecodedInstruction::FlagsOf() const { 2486 // Calculate new index. 2487 int idx = static_cast<int>(opcode) - kNumPackedOpcodes; 2488 2489 // Check if it is an extended or not. 2490 if (idx < 0) { 2491 return Instruction::FlagsOf(opcode); 2492 } 2493 2494 // For extended, we use a switch. 2495 switch (static_cast<int>(opcode)) { 2496 case kMirOpPhi: 2497 return Instruction::kContinue; 2498 case kMirOpCopy: 2499 return Instruction::kContinue; 2500 case kMirOpFusedCmplFloat: 2501 return Instruction::kContinue | Instruction::kBranch; 2502 case kMirOpFusedCmpgFloat: 2503 return Instruction::kContinue | Instruction::kBranch; 2504 case kMirOpFusedCmplDouble: 2505 return Instruction::kContinue | Instruction::kBranch; 2506 case kMirOpFusedCmpgDouble: 2507 return Instruction::kContinue | Instruction::kBranch; 2508 case kMirOpFusedCmpLong: 2509 return Instruction::kContinue | Instruction::kBranch; 2510 case kMirOpNop: 2511 return Instruction::kContinue; 2512 case kMirOpNullCheck: 2513 return Instruction::kContinue | Instruction::kThrow; 2514 case kMirOpRangeCheck: 2515 return Instruction::kContinue | Instruction::kThrow; 2516 case kMirOpDivZeroCheck: 2517 return Instruction::kContinue | Instruction::kThrow; 2518 case kMirOpCheck: 2519 return Instruction::kContinue | Instruction::kThrow; 2520 case kMirOpSelect: 2521 return Instruction::kContinue; 2522 case kMirOpConstVector: 2523 return Instruction::kContinue; 2524 case kMirOpMoveVector: 2525 return Instruction::kContinue; 2526 case kMirOpPackedMultiply: 2527 return Instruction::kContinue; 2528 case kMirOpPackedAddition: 2529 return Instruction::kContinue; 2530 case kMirOpPackedSubtract: 2531 return Instruction::kContinue; 2532 case kMirOpPackedShiftLeft: 2533 return Instruction::kContinue; 2534 case kMirOpPackedSignedShiftRight: 2535 return Instruction::kContinue; 2536 case kMirOpPackedUnsignedShiftRight: 2537 return Instruction::kContinue; 2538 case kMirOpPackedAnd: 2539 return Instruction::kContinue; 2540 case kMirOpPackedOr: 2541 return Instruction::kContinue; 2542 case kMirOpPackedXor: 2543 return Instruction::kContinue; 2544 case kMirOpPackedAddReduce: 2545 return Instruction::kContinue; 2546 case kMirOpPackedReduce: 2547 return Instruction::kContinue; 2548 case kMirOpPackedSet: 2549 return Instruction::kContinue; 2550 case kMirOpReserveVectorRegisters: 2551 return Instruction::kContinue; 2552 case kMirOpReturnVectorRegisters: 2553 return Instruction::kContinue; 2554 case kMirOpMemBarrier: 2555 return Instruction::kContinue; 2556 case kMirOpPackedArrayGet: 2557 return Instruction::kContinue | Instruction::kThrow; 2558 case kMirOpPackedArrayPut: 2559 return Instruction::kContinue | Instruction::kThrow; 2560 case kMirOpMaddInt: 2561 case kMirOpMsubInt: 2562 case kMirOpMaddLong: 2563 case kMirOpMsubLong: 2564 return Instruction::kContinue; 2565 default: 2566 LOG(WARNING) << "ExtendedFlagsOf: Unhandled case: " << static_cast<int> (opcode); 2567 return 0; 2568 } 2569} 2570 2571const uint16_t* MIRGraph::GetInsns(int m_unit_index) const { 2572 return m_units_[m_unit_index]->GetCodeItem()->insns_; 2573} 2574 2575void MIRGraph::SetPuntToInterpreter(bool val) { 2576 punt_to_interpreter_ = val; 2577 if (val) { 2578 // Disable all subsequent optimizations. They may not be safe to run. (For example, 2579 // LVN/GVN assumes there are no conflicts found by the type inference pass.) 2580 cu_->disable_opt = ~static_cast<decltype(cu_->disable_opt)>(0); 2581 } 2582} 2583 2584} // namespace art 2585