mir_graph.cc revision 2469e60e6ff08c2a0b4cd1e209246c5d91027679
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mir_graph.h" 18 19#include <inttypes.h> 20#include <queue> 21 22#include "base/stl_util.h" 23#include "compiler_internals.h" 24#include "dex_file-inl.h" 25#include "dex_instruction-inl.h" 26#include "dex/quick/dex_file_to_method_inliner_map.h" 27#include "dex/quick/dex_file_method_inliner.h" 28#include "leb128.h" 29#include "pass_driver_me_post_opt.h" 30 31namespace art { 32 33#define MAX_PATTERN_LEN 5 34 35const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = { 36 "Phi", 37 "Copy", 38 "FusedCmplFloat", 39 "FusedCmpgFloat", 40 "FusedCmplDouble", 41 "FusedCmpgDouble", 42 "FusedCmpLong", 43 "Nop", 44 "OpNullCheck", 45 "OpRangeCheck", 46 "OpDivZeroCheck", 47 "Check1", 48 "Check2", 49 "Select", 50 "ConstVector", 51 "MoveVector", 52 "PackedMultiply", 53 "PackedAddition", 54 "PackedSubtract", 55 "PackedShiftLeft", 56 "PackedSignedShiftRight", 57 "PackedUnsignedShiftRight", 58 "PackedAnd", 59 "PackedOr", 60 "PackedXor", 61 "PackedAddReduce", 62 "PackedReduce", 63 "PackedSet", 64}; 65 66MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) 67 : reg_location_(NULL), 68 cu_(cu), 69 ssa_base_vregs_(NULL), 70 ssa_subscripts_(NULL), 71 vreg_to_ssa_map_(NULL), 72 ssa_last_defs_(NULL), 73 is_constant_v_(NULL), 74 constant_values_(NULL), 75 use_counts_(arena, 256, kGrowableArrayMisc), 76 raw_use_counts_(arena, 256, kGrowableArrayMisc), 77 num_reachable_blocks_(0), 78 max_num_reachable_blocks_(0), 79 dfs_order_(NULL), 80 dfs_post_order_(NULL), 81 dom_post_order_traversal_(NULL), 82 topological_order_(nullptr), 83 i_dom_list_(NULL), 84 def_block_matrix_(NULL), 85 temp_scoped_alloc_(), 86 temp_insn_data_(nullptr), 87 temp_bit_vector_size_(0u), 88 temp_bit_vector_(nullptr), 89 block_list_(arena, 100, kGrowableArrayBlockList), 90 try_block_addr_(NULL), 91 entry_block_(NULL), 92 exit_block_(NULL), 93 num_blocks_(0), 94 current_code_item_(NULL), 95 dex_pc_to_block_map_(arena, 0, kGrowableArrayMisc), 96 current_method_(kInvalidEntry), 97 current_offset_(kInvalidEntry), 98 def_count_(0), 99 opcode_count_(NULL), 100 num_ssa_regs_(0), 101 method_sreg_(0), 102 attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke. 103 checkstats_(NULL), 104 arena_(arena), 105 backward_branches_(0), 106 forward_branches_(0), 107 compiler_temps_(arena, 6, kGrowableArrayMisc), 108 num_non_special_compiler_temps_(0), 109 max_available_non_special_compiler_temps_(0), 110 punt_to_interpreter_(false), 111 merged_df_flags_(0u), 112 ifield_lowering_infos_(arena, 0u), 113 sfield_lowering_infos_(arena, 0u), 114 method_lowering_infos_(arena, 0u) { 115 try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */); 116 max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg)) 117 - std::abs(static_cast<int>(kVRegTempBaseReg)); 118} 119 120MIRGraph::~MIRGraph() { 121 STLDeleteElements(&m_units_); 122} 123 124/* 125 * Parse an instruction, return the length of the instruction 126 */ 127int MIRGraph::ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction) { 128 const Instruction* inst = Instruction::At(code_ptr); 129 decoded_instruction->opcode = inst->Opcode(); 130 decoded_instruction->vA = inst->HasVRegA() ? inst->VRegA() : 0; 131 decoded_instruction->vB = inst->HasVRegB() ? inst->VRegB() : 0; 132 decoded_instruction->vB_wide = inst->HasWideVRegB() ? inst->WideVRegB() : 0; 133 decoded_instruction->vC = inst->HasVRegC() ? inst->VRegC() : 0; 134 if (inst->HasVarArgs()) { 135 inst->GetVarArgs(decoded_instruction->arg); 136 } 137 return inst->SizeInCodeUnits(); 138} 139 140 141/* Split an existing block from the specified code offset into two */ 142BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset, 143 BasicBlock* orig_block, BasicBlock** immed_pred_block_p) { 144 DCHECK_GT(code_offset, orig_block->start_offset); 145 MIR* insn = orig_block->first_mir_insn; 146 MIR* prev = NULL; 147 while (insn) { 148 if (insn->offset == code_offset) break; 149 prev = insn; 150 insn = insn->next; 151 } 152 if (insn == NULL) { 153 LOG(FATAL) << "Break split failed"; 154 } 155 BasicBlock* bottom_block = NewMemBB(kDalvikByteCode, num_blocks_++); 156 block_list_.Insert(bottom_block); 157 158 bottom_block->start_offset = code_offset; 159 bottom_block->first_mir_insn = insn; 160 bottom_block->last_mir_insn = orig_block->last_mir_insn; 161 162 /* If this block was terminated by a return, the flag needs to go with the bottom block */ 163 bottom_block->terminated_by_return = orig_block->terminated_by_return; 164 orig_block->terminated_by_return = false; 165 166 /* Handle the taken path */ 167 bottom_block->taken = orig_block->taken; 168 if (bottom_block->taken != NullBasicBlockId) { 169 orig_block->taken = NullBasicBlockId; 170 BasicBlock* bb_taken = GetBasicBlock(bottom_block->taken); 171 bb_taken->predecessors->Delete(orig_block->id); 172 bb_taken->predecessors->Insert(bottom_block->id); 173 } 174 175 /* Handle the fallthrough path */ 176 bottom_block->fall_through = orig_block->fall_through; 177 orig_block->fall_through = bottom_block->id; 178 bottom_block->predecessors->Insert(orig_block->id); 179 if (bottom_block->fall_through != NullBasicBlockId) { 180 BasicBlock* bb_fall_through = GetBasicBlock(bottom_block->fall_through); 181 bb_fall_through->predecessors->Delete(orig_block->id); 182 bb_fall_through->predecessors->Insert(bottom_block->id); 183 } 184 185 /* Handle the successor list */ 186 if (orig_block->successor_block_list_type != kNotUsed) { 187 bottom_block->successor_block_list_type = orig_block->successor_block_list_type; 188 bottom_block->successor_blocks = orig_block->successor_blocks; 189 orig_block->successor_block_list_type = kNotUsed; 190 orig_block->successor_blocks = NULL; 191 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_blocks); 192 while (true) { 193 SuccessorBlockInfo* successor_block_info = iterator.Next(); 194 if (successor_block_info == NULL) break; 195 BasicBlock* bb = GetBasicBlock(successor_block_info->block); 196 bb->predecessors->Delete(orig_block->id); 197 bb->predecessors->Insert(bottom_block->id); 198 } 199 } 200 201 orig_block->last_mir_insn = prev; 202 prev->next = nullptr; 203 204 /* 205 * Update the immediate predecessor block pointer so that outgoing edges 206 * can be applied to the proper block. 207 */ 208 if (immed_pred_block_p) { 209 DCHECK_EQ(*immed_pred_block_p, orig_block); 210 *immed_pred_block_p = bottom_block; 211 } 212 213 // Associate dex instructions in the bottom block with the new container. 214 DCHECK(insn != nullptr); 215 DCHECK(insn != orig_block->first_mir_insn); 216 DCHECK(insn == bottom_block->first_mir_insn); 217 DCHECK_EQ(insn->offset, bottom_block->start_offset); 218 DCHECK(static_cast<int>(insn->dalvikInsn.opcode) == kMirOpCheck || 219 !IsPseudoMirOp(insn->dalvikInsn.opcode)); 220 DCHECK_EQ(dex_pc_to_block_map_.Get(insn->offset), orig_block->id); 221 MIR* p = insn; 222 dex_pc_to_block_map_.Put(p->offset, bottom_block->id); 223 while (p != bottom_block->last_mir_insn) { 224 p = p->next; 225 DCHECK(p != nullptr); 226 p->bb = bottom_block->id; 227 int opcode = p->dalvikInsn.opcode; 228 /* 229 * Some messiness here to ensure that we only enter real opcodes and only the 230 * first half of a potentially throwing instruction that has been split into 231 * CHECK and work portions. Since the 2nd half of a split operation is always 232 * the first in a BasicBlock, we can't hit it here. 233 */ 234 if ((opcode == kMirOpCheck) || !IsPseudoMirOp(opcode)) { 235 DCHECK_EQ(dex_pc_to_block_map_.Get(p->offset), orig_block->id); 236 dex_pc_to_block_map_.Put(p->offset, bottom_block->id); 237 } 238 } 239 240 return bottom_block; 241} 242 243/* 244 * Given a code offset, find out the block that starts with it. If the offset 245 * is in the middle of an existing block, split it into two. If immed_pred_block_p 246 * is not non-null and is the block being split, update *immed_pred_block_p to 247 * point to the bottom block so that outgoing edges can be set up properly 248 * (by the caller) 249 * Utilizes a map for fast lookup of the typical cases. 250 */ 251BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create, 252 BasicBlock** immed_pred_block_p) { 253 if (code_offset >= cu_->code_item->insns_size_in_code_units_) { 254 return NULL; 255 } 256 257 int block_id = dex_pc_to_block_map_.Get(code_offset); 258 BasicBlock* bb = (block_id == 0) ? NULL : block_list_.Get(block_id); 259 260 if ((bb != NULL) && (bb->start_offset == code_offset)) { 261 // Does this containing block start with the desired instruction? 262 return bb; 263 } 264 265 // No direct hit. 266 if (!create) { 267 return NULL; 268 } 269 270 if (bb != NULL) { 271 // The target exists somewhere in an existing block. 272 return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : NULL); 273 } 274 275 // Create a new block. 276 bb = NewMemBB(kDalvikByteCode, num_blocks_++); 277 block_list_.Insert(bb); 278 bb->start_offset = code_offset; 279 dex_pc_to_block_map_.Put(bb->start_offset, bb->id); 280 return bb; 281} 282 283 284/* Identify code range in try blocks and set up the empty catch blocks */ 285void MIRGraph::ProcessTryCatchBlocks() { 286 int tries_size = current_code_item_->tries_size_; 287 DexOffset offset; 288 289 if (tries_size == 0) { 290 return; 291 } 292 293 for (int i = 0; i < tries_size; i++) { 294 const DexFile::TryItem* pTry = 295 DexFile::GetTryItems(*current_code_item_, i); 296 DexOffset start_offset = pTry->start_addr_; 297 DexOffset end_offset = start_offset + pTry->insn_count_; 298 for (offset = start_offset; offset < end_offset; offset++) { 299 try_block_addr_->SetBit(offset); 300 } 301 } 302 303 // Iterate over each of the handlers to enqueue the empty Catch blocks. 304 const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0); 305 uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr); 306 for (uint32_t idx = 0; idx < handlers_size; idx++) { 307 CatchHandlerIterator iterator(handlers_ptr); 308 for (; iterator.HasNext(); iterator.Next()) { 309 uint32_t address = iterator.GetHandlerAddress(); 310 FindBlock(address, false /* split */, true /*create*/, 311 /* immed_pred_block_p */ NULL); 312 } 313 handlers_ptr = iterator.EndDataPointer(); 314 } 315} 316 317/* Process instructions with the kBranch flag */ 318BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, 319 int width, int flags, const uint16_t* code_ptr, 320 const uint16_t* code_end) { 321 DexOffset target = cur_offset; 322 switch (insn->dalvikInsn.opcode) { 323 case Instruction::GOTO: 324 case Instruction::GOTO_16: 325 case Instruction::GOTO_32: 326 target += insn->dalvikInsn.vA; 327 break; 328 case Instruction::IF_EQ: 329 case Instruction::IF_NE: 330 case Instruction::IF_LT: 331 case Instruction::IF_GE: 332 case Instruction::IF_GT: 333 case Instruction::IF_LE: 334 cur_block->conditional_branch = true; 335 target += insn->dalvikInsn.vC; 336 break; 337 case Instruction::IF_EQZ: 338 case Instruction::IF_NEZ: 339 case Instruction::IF_LTZ: 340 case Instruction::IF_GEZ: 341 case Instruction::IF_GTZ: 342 case Instruction::IF_LEZ: 343 cur_block->conditional_branch = true; 344 target += insn->dalvikInsn.vB; 345 break; 346 default: 347 LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set"; 348 } 349 CountBranch(target); 350 BasicBlock* taken_block = FindBlock(target, /* split */ true, /* create */ true, 351 /* immed_pred_block_p */ &cur_block); 352 cur_block->taken = taken_block->id; 353 taken_block->predecessors->Insert(cur_block->id); 354 355 /* Always terminate the current block for conditional branches */ 356 if (flags & Instruction::kContinue) { 357 BasicBlock* fallthrough_block = FindBlock(cur_offset + width, 358 /* 359 * If the method is processed 360 * in sequential order from the 361 * beginning, we don't need to 362 * specify split for continue 363 * blocks. However, this 364 * routine can be called by 365 * compileLoop, which starts 366 * parsing the method from an 367 * arbitrary address in the 368 * method body. 369 */ 370 true, 371 /* create */ 372 true, 373 /* immed_pred_block_p */ 374 &cur_block); 375 cur_block->fall_through = fallthrough_block->id; 376 fallthrough_block->predecessors->Insert(cur_block->id); 377 } else if (code_ptr < code_end) { 378 FindBlock(cur_offset + width, /* split */ false, /* create */ true, 379 /* immed_pred_block_p */ NULL); 380 } 381 return cur_block; 382} 383 384/* Process instructions with the kSwitch flag */ 385BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, 386 int width, int flags) { 387 const uint16_t* switch_data = 388 reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB); 389 int size; 390 const int* keyTable; 391 const int* target_table; 392 int i; 393 int first_key; 394 395 /* 396 * Packed switch data format: 397 * ushort ident = 0x0100 magic value 398 * ushort size number of entries in the table 399 * int first_key first (and lowest) switch case value 400 * int targets[size] branch targets, relative to switch opcode 401 * 402 * Total size is (4+size*2) 16-bit code units. 403 */ 404 if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) { 405 DCHECK_EQ(static_cast<int>(switch_data[0]), 406 static_cast<int>(Instruction::kPackedSwitchSignature)); 407 size = switch_data[1]; 408 first_key = switch_data[2] | (switch_data[3] << 16); 409 target_table = reinterpret_cast<const int*>(&switch_data[4]); 410 keyTable = NULL; // Make the compiler happy. 411 /* 412 * Sparse switch data format: 413 * ushort ident = 0x0200 magic value 414 * ushort size number of entries in the table; > 0 415 * int keys[size] keys, sorted low-to-high; 32-bit aligned 416 * int targets[size] branch targets, relative to switch opcode 417 * 418 * Total size is (2+size*4) 16-bit code units. 419 */ 420 } else { 421 DCHECK_EQ(static_cast<int>(switch_data[0]), 422 static_cast<int>(Instruction::kSparseSwitchSignature)); 423 size = switch_data[1]; 424 keyTable = reinterpret_cast<const int*>(&switch_data[2]); 425 target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]); 426 first_key = 0; // To make the compiler happy. 427 } 428 429 if (cur_block->successor_block_list_type != kNotUsed) { 430 LOG(FATAL) << "Successor block list already in use: " 431 << static_cast<int>(cur_block->successor_block_list_type); 432 } 433 cur_block->successor_block_list_type = 434 (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? kPackedSwitch : kSparseSwitch; 435 cur_block->successor_blocks = 436 new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks); 437 438 for (i = 0; i < size; i++) { 439 BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* split */ true, 440 /* create */ true, /* immed_pred_block_p */ &cur_block); 441 SuccessorBlockInfo* successor_block_info = 442 static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo), 443 kArenaAllocSuccessor)); 444 successor_block_info->block = case_block->id; 445 successor_block_info->key = 446 (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? 447 first_key + i : keyTable[i]; 448 cur_block->successor_blocks->Insert(successor_block_info); 449 case_block->predecessors->Insert(cur_block->id); 450 } 451 452 /* Fall-through case */ 453 BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false, 454 /* create */ true, /* immed_pred_block_p */ NULL); 455 cur_block->fall_through = fallthrough_block->id; 456 fallthrough_block->predecessors->Insert(cur_block->id); 457 return cur_block; 458} 459 460/* Process instructions with the kThrow flag */ 461BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, 462 int width, int flags, ArenaBitVector* try_block_addr, 463 const uint16_t* code_ptr, const uint16_t* code_end) { 464 bool in_try_block = try_block_addr->IsBitSet(cur_offset); 465 bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW); 466 bool build_all_edges = 467 (cu_->disable_opt & (1 << kSuppressExceptionEdges)) || is_throw || in_try_block; 468 469 /* In try block */ 470 if (in_try_block) { 471 CatchHandlerIterator iterator(*current_code_item_, cur_offset); 472 473 if (cur_block->successor_block_list_type != kNotUsed) { 474 LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file); 475 LOG(FATAL) << "Successor block list already in use: " 476 << static_cast<int>(cur_block->successor_block_list_type); 477 } 478 479 cur_block->successor_block_list_type = kCatch; 480 cur_block->successor_blocks = 481 new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, 2, kGrowableArraySuccessorBlocks); 482 483 for (; iterator.HasNext(); iterator.Next()) { 484 BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/, 485 false /* creat */, NULL /* immed_pred_block_p */); 486 catch_block->catch_entry = true; 487 if (kIsDebugBuild) { 488 catches_.insert(catch_block->start_offset); 489 } 490 SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*> 491 (arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor)); 492 successor_block_info->block = catch_block->id; 493 successor_block_info->key = iterator.GetHandlerTypeIndex(); 494 cur_block->successor_blocks->Insert(successor_block_info); 495 catch_block->predecessors->Insert(cur_block->id); 496 } 497 } else if (build_all_edges) { 498 BasicBlock* eh_block = NewMemBB(kExceptionHandling, num_blocks_++); 499 cur_block->taken = eh_block->id; 500 block_list_.Insert(eh_block); 501 eh_block->start_offset = cur_offset; 502 eh_block->predecessors->Insert(cur_block->id); 503 } 504 505 if (is_throw) { 506 cur_block->explicit_throw = true; 507 if (code_ptr < code_end) { 508 // Force creation of new block following THROW via side-effect. 509 FindBlock(cur_offset + width, /* split */ false, /* create */ true, 510 /* immed_pred_block_p */ NULL); 511 } 512 if (!in_try_block) { 513 // Don't split a THROW that can't rethrow - we're done. 514 return cur_block; 515 } 516 } 517 518 if (!build_all_edges) { 519 /* 520 * Even though there is an exception edge here, control cannot return to this 521 * method. Thus, for the purposes of dataflow analysis and optimization, we can 522 * ignore the edge. Doing this reduces compile time, and increases the scope 523 * of the basic-block level optimization pass. 524 */ 525 return cur_block; 526 } 527 528 /* 529 * Split the potentially-throwing instruction into two parts. 530 * The first half will be a pseudo-op that captures the exception 531 * edges and terminates the basic block. It always falls through. 532 * Then, create a new basic block that begins with the throwing instruction 533 * (minus exceptions). Note: this new basic block must NOT be entered into 534 * the block_map. If the potentially-throwing instruction is the target of a 535 * future branch, we need to find the check psuedo half. The new 536 * basic block containing the work portion of the instruction should 537 * only be entered via fallthrough from the block containing the 538 * pseudo exception edge MIR. Note also that this new block is 539 * not automatically terminated after the work portion, and may 540 * contain following instructions. 541 * 542 * Note also that the dex_pc_to_block_map_ entry for the potentially 543 * throwing instruction will refer to the original basic block. 544 */ 545 BasicBlock* new_block = NewMemBB(kDalvikByteCode, num_blocks_++); 546 block_list_.Insert(new_block); 547 new_block->start_offset = insn->offset; 548 cur_block->fall_through = new_block->id; 549 new_block->predecessors->Insert(cur_block->id); 550 MIR* new_insn = NewMIR(); 551 *new_insn = *insn; 552 insn->dalvikInsn.opcode = 553 static_cast<Instruction::Code>(kMirOpCheck); 554 // Associate the two halves. 555 insn->meta.throw_insn = new_insn; 556 new_block->AppendMIR(new_insn); 557 return new_block; 558} 559 560/* Parse a Dex method and insert it into the MIRGraph at the current insert point. */ 561void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, 562 InvokeType invoke_type, uint16_t class_def_idx, 563 uint32_t method_idx, jobject class_loader, const DexFile& dex_file) { 564 current_code_item_ = code_item; 565 method_stack_.push_back(std::make_pair(current_method_, current_offset_)); 566 current_method_ = m_units_.size(); 567 current_offset_ = 0; 568 // TODO: will need to snapshot stack image and use that as the mir context identification. 569 m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(), 570 dex_file, current_code_item_, class_def_idx, method_idx, access_flags, 571 cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx))); 572 const uint16_t* code_ptr = current_code_item_->insns_; 573 const uint16_t* code_end = 574 current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_; 575 576 // TODO: need to rework expansion of block list & try_block_addr when inlining activated. 577 // TUNING: use better estimate of basic blocks for following resize. 578 block_list_.Resize(block_list_.Size() + current_code_item_->insns_size_in_code_units_); 579 dex_pc_to_block_map_.SetSize(dex_pc_to_block_map_.Size() + current_code_item_->insns_size_in_code_units_); 580 581 // TODO: replace with explicit resize routine. Using automatic extension side effect for now. 582 try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_); 583 try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_); 584 585 // If this is the first method, set up default entry and exit blocks. 586 if (current_method_ == 0) { 587 DCHECK(entry_block_ == NULL); 588 DCHECK(exit_block_ == NULL); 589 DCHECK_EQ(num_blocks_, 0); 590 // Use id 0 to represent a null block. 591 BasicBlock* null_block = NewMemBB(kNullBlock, num_blocks_++); 592 DCHECK_EQ(null_block->id, NullBasicBlockId); 593 null_block->hidden = true; 594 block_list_.Insert(null_block); 595 entry_block_ = NewMemBB(kEntryBlock, num_blocks_++); 596 block_list_.Insert(entry_block_); 597 exit_block_ = NewMemBB(kExitBlock, num_blocks_++); 598 block_list_.Insert(exit_block_); 599 // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated. 600 cu_->dex_file = &dex_file; 601 cu_->class_def_idx = class_def_idx; 602 cu_->method_idx = method_idx; 603 cu_->access_flags = access_flags; 604 cu_->invoke_type = invoke_type; 605 cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); 606 cu_->num_ins = current_code_item_->ins_size_; 607 cu_->num_regs = current_code_item_->registers_size_ - cu_->num_ins; 608 cu_->num_outs = current_code_item_->outs_size_; 609 cu_->num_dalvik_registers = current_code_item_->registers_size_; 610 cu_->insns = current_code_item_->insns_; 611 cu_->code_item = current_code_item_; 612 } else { 613 UNIMPLEMENTED(FATAL) << "Nested inlining not implemented."; 614 /* 615 * Will need to manage storage for ins & outs, push prevous state and update 616 * insert point. 617 */ 618 } 619 620 /* Current block to record parsed instructions */ 621 BasicBlock* cur_block = NewMemBB(kDalvikByteCode, num_blocks_++); 622 DCHECK_EQ(current_offset_, 0U); 623 cur_block->start_offset = current_offset_; 624 block_list_.Insert(cur_block); 625 // TODO: for inlining support, insert at the insert point rather than entry block. 626 entry_block_->fall_through = cur_block->id; 627 cur_block->predecessors->Insert(entry_block_->id); 628 629 /* Identify code range in try blocks and set up the empty catch blocks */ 630 ProcessTryCatchBlocks(); 631 632 uint64_t merged_df_flags = 0u; 633 634 /* Parse all instructions and put them into containing basic blocks */ 635 while (code_ptr < code_end) { 636 MIR *insn = NewMIR(); 637 insn->offset = current_offset_; 638 insn->m_unit_index = current_method_; 639 int width = ParseInsn(code_ptr, &insn->dalvikInsn); 640 Instruction::Code opcode = insn->dalvikInsn.opcode; 641 if (opcode_count_ != NULL) { 642 opcode_count_[static_cast<int>(opcode)]++; 643 } 644 645 int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode); 646 int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode); 647 648 uint64_t df_flags = GetDataFlowAttributes(insn); 649 merged_df_flags |= df_flags; 650 651 if (df_flags & DF_HAS_DEFS) { 652 def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1; 653 } 654 655 if (df_flags & DF_LVN) { 656 cur_block->use_lvn = true; // Run local value numbering on this basic block. 657 } 658 659 // Check for inline data block signatures. 660 if (opcode == Instruction::NOP) { 661 // A simple NOP will have a width of 1 at this point, embedded data NOP > 1. 662 if ((width == 1) && ((current_offset_ & 0x1) == 0x1) && ((code_end - code_ptr) > 1)) { 663 // Could be an aligning nop. If an embedded data NOP follows, treat pair as single unit. 664 uint16_t following_raw_instruction = code_ptr[1]; 665 if ((following_raw_instruction == Instruction::kSparseSwitchSignature) || 666 (following_raw_instruction == Instruction::kPackedSwitchSignature) || 667 (following_raw_instruction == Instruction::kArrayDataSignature)) { 668 width += Instruction::At(code_ptr + 1)->SizeInCodeUnits(); 669 } 670 } 671 if (width == 1) { 672 // It is a simple nop - treat normally. 673 cur_block->AppendMIR(insn); 674 } else { 675 DCHECK(cur_block->fall_through == NullBasicBlockId); 676 DCHECK(cur_block->taken == NullBasicBlockId); 677 // Unreachable instruction, mark for no continuation. 678 flags &= ~Instruction::kContinue; 679 } 680 } else { 681 cur_block->AppendMIR(insn); 682 } 683 684 // Associate the starting dex_pc for this opcode with its containing basic block. 685 dex_pc_to_block_map_.Put(insn->offset, cur_block->id); 686 687 code_ptr += width; 688 689 if (flags & Instruction::kBranch) { 690 cur_block = ProcessCanBranch(cur_block, insn, current_offset_, 691 width, flags, code_ptr, code_end); 692 } else if (flags & Instruction::kReturn) { 693 cur_block->terminated_by_return = true; 694 cur_block->fall_through = exit_block_->id; 695 exit_block_->predecessors->Insert(cur_block->id); 696 /* 697 * Terminate the current block if there are instructions 698 * afterwards. 699 */ 700 if (code_ptr < code_end) { 701 /* 702 * Create a fallthrough block for real instructions 703 * (incl. NOP). 704 */ 705 FindBlock(current_offset_ + width, /* split */ false, /* create */ true, 706 /* immed_pred_block_p */ NULL); 707 } 708 } else if (flags & Instruction::kThrow) { 709 cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_, 710 code_ptr, code_end); 711 } else if (flags & Instruction::kSwitch) { 712 cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags); 713 } 714 if (verify_flags & Instruction::kVerifyVarArgRange) { 715 /* 716 * The Quick backend's runtime model includes a gap between a method's 717 * argument ("in") vregs and the rest of its vregs. Handling a range instruction 718 * which spans the gap is somewhat complicated, and should not happen 719 * in normal usage of dx. Punt to the interpreter. 720 */ 721 int first_reg_in_range = insn->dalvikInsn.vC; 722 int last_reg_in_range = first_reg_in_range + insn->dalvikInsn.vA - 1; 723 if (IsInVReg(first_reg_in_range) != IsInVReg(last_reg_in_range)) { 724 punt_to_interpreter_ = true; 725 } 726 } 727 current_offset_ += width; 728 BasicBlock* next_block = FindBlock(current_offset_, /* split */ false, /* create */ 729 false, /* immed_pred_block_p */ NULL); 730 if (next_block) { 731 /* 732 * The next instruction could be the target of a previously parsed 733 * forward branch so a block is already created. If the current 734 * instruction is not an unconditional branch, connect them through 735 * the fall-through link. 736 */ 737 DCHECK(cur_block->fall_through == NullBasicBlockId || 738 GetBasicBlock(cur_block->fall_through) == next_block || 739 GetBasicBlock(cur_block->fall_through) == exit_block_); 740 741 if ((cur_block->fall_through == NullBasicBlockId) && (flags & Instruction::kContinue)) { 742 cur_block->fall_through = next_block->id; 743 next_block->predecessors->Insert(cur_block->id); 744 } 745 cur_block = next_block; 746 } 747 } 748 merged_df_flags_ = merged_df_flags; 749 750 if (cu_->enable_debug & (1 << kDebugDumpCFG)) { 751 DumpCFG("/sdcard/1_post_parse_cfg/", true); 752 } 753 754 if (cu_->verbose) { 755 DumpMIRGraph(); 756 } 757} 758 759void MIRGraph::ShowOpcodeStats() { 760 DCHECK(opcode_count_ != NULL); 761 LOG(INFO) << "Opcode Count"; 762 for (int i = 0; i < kNumPackedOpcodes; i++) { 763 if (opcode_count_[i] != 0) { 764 LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i)) 765 << " " << opcode_count_[i]; 766 } 767 } 768} 769 770uint64_t MIRGraph::GetDataFlowAttributes(Instruction::Code opcode) { 771 DCHECK_LT((size_t) opcode, (sizeof(oat_data_flow_attributes_) / sizeof(oat_data_flow_attributes_[0]))); 772 return oat_data_flow_attributes_[opcode]; 773} 774 775uint64_t MIRGraph::GetDataFlowAttributes(MIR* mir) { 776 DCHECK(mir != nullptr); 777 Instruction::Code opcode = mir->dalvikInsn.opcode; 778 return GetDataFlowAttributes(opcode); 779} 780 781// TODO: use a configurable base prefix, and adjust callers to supply pass name. 782/* Dump the CFG into a DOT graph */ 783void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suffix) { 784 FILE* file; 785 std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file)); 786 ReplaceSpecialChars(fname); 787 fname = StringPrintf("%s%s%x%s.dot", dir_prefix, fname.c_str(), 788 GetBasicBlock(GetEntryBlock()->fall_through)->start_offset, 789 suffix == nullptr ? "" : suffix); 790 file = fopen(fname.c_str(), "w"); 791 if (file == NULL) { 792 return; 793 } 794 fprintf(file, "digraph G {\n"); 795 796 fprintf(file, " rankdir=TB\n"); 797 798 int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_; 799 int idx; 800 801 for (idx = 0; idx < num_blocks; idx++) { 802 int block_idx = all_blocks ? idx : dfs_order_->Get(idx); 803 BasicBlock* bb = GetBasicBlock(block_idx); 804 if (bb == NULL) continue; 805 if (bb->block_type == kDead) continue; 806 if (bb->block_type == kEntryBlock) { 807 fprintf(file, " entry_%d [shape=Mdiamond];\n", bb->id); 808 } else if (bb->block_type == kExitBlock) { 809 fprintf(file, " exit_%d [shape=Mdiamond];\n", bb->id); 810 } else if (bb->block_type == kDalvikByteCode) { 811 fprintf(file, " block%04x_%d [shape=record,label = \"{ \\\n", 812 bb->start_offset, bb->id); 813 const MIR* mir; 814 fprintf(file, " {block id %d\\l}%s\\\n", bb->id, 815 bb->first_mir_insn ? " | " : " "); 816 for (mir = bb->first_mir_insn; mir; mir = mir->next) { 817 int opcode = mir->dalvikInsn.opcode; 818 if (opcode > kMirOpSelect && opcode < kMirOpLast) { 819 if (opcode == kMirOpConstVector) { 820 fprintf(file, " {%04x %s %d %d %d %d %d %d\\l}%s\\\n", mir->offset, 821 extended_mir_op_names_[kMirOpConstVector - kMirOpFirst], 822 mir->dalvikInsn.vA, 823 mir->dalvikInsn.vB, 824 mir->dalvikInsn.arg[0], 825 mir->dalvikInsn.arg[1], 826 mir->dalvikInsn.arg[2], 827 mir->dalvikInsn.arg[3], 828 mir->next ? " | " : " "); 829 } else { 830 fprintf(file, " {%04x %s %d %d %d\\l}%s\\\n", mir->offset, 831 extended_mir_op_names_[opcode - kMirOpFirst], 832 mir->dalvikInsn.vA, 833 mir->dalvikInsn.vB, 834 mir->dalvikInsn.vC, 835 mir->next ? " | " : " "); 836 } 837 } else { 838 fprintf(file, " {%04x %s %s %s\\l}%s\\\n", mir->offset, 839 mir->ssa_rep ? GetDalvikDisassembly(mir) : 840 (opcode < kMirOpFirst) ? 841 Instruction::Name(mir->dalvikInsn.opcode) : 842 extended_mir_op_names_[opcode - kMirOpFirst], 843 (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ", 844 (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ", 845 mir->next ? " | " : " "); 846 } 847 } 848 fprintf(file, " }\"];\n\n"); 849 } else if (bb->block_type == kExceptionHandling) { 850 char block_name[BLOCK_NAME_LEN]; 851 852 GetBlockName(bb, block_name); 853 fprintf(file, " %s [shape=invhouse];\n", block_name); 854 } 855 856 char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN]; 857 858 if (bb->taken != NullBasicBlockId) { 859 GetBlockName(bb, block_name1); 860 GetBlockName(GetBasicBlock(bb->taken), block_name2); 861 fprintf(file, " %s:s -> %s:n [style=dotted]\n", 862 block_name1, block_name2); 863 } 864 if (bb->fall_through != NullBasicBlockId) { 865 GetBlockName(bb, block_name1); 866 GetBlockName(GetBasicBlock(bb->fall_through), block_name2); 867 fprintf(file, " %s:s -> %s:n\n", block_name1, block_name2); 868 } 869 870 if (bb->successor_block_list_type != kNotUsed) { 871 fprintf(file, " succ%04x_%d [shape=%s,label = \"{ \\\n", 872 bb->start_offset, bb->id, 873 (bb->successor_block_list_type == kCatch) ? "Mrecord" : "record"); 874 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks); 875 SuccessorBlockInfo* successor_block_info = iterator.Next(); 876 877 int succ_id = 0; 878 while (true) { 879 if (successor_block_info == NULL) break; 880 881 BasicBlock* dest_block = GetBasicBlock(successor_block_info->block); 882 SuccessorBlockInfo *next_successor_block_info = iterator.Next(); 883 884 fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n", 885 succ_id++, 886 successor_block_info->key, 887 dest_block->start_offset, 888 (next_successor_block_info != NULL) ? " | " : " "); 889 890 successor_block_info = next_successor_block_info; 891 } 892 fprintf(file, " }\"];\n\n"); 893 894 GetBlockName(bb, block_name1); 895 fprintf(file, " %s:s -> succ%04x_%d:n [style=dashed]\n", 896 block_name1, bb->start_offset, bb->id); 897 898 // Link the successor pseudo-block with all of its potential targets. 899 GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_blocks); 900 901 succ_id = 0; 902 while (true) { 903 SuccessorBlockInfo* successor_block_info = iter.Next(); 904 if (successor_block_info == NULL) break; 905 906 BasicBlock* dest_block = GetBasicBlock(successor_block_info->block); 907 908 GetBlockName(dest_block, block_name2); 909 fprintf(file, " succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset, 910 bb->id, succ_id++, block_name2); 911 } 912 } 913 fprintf(file, "\n"); 914 915 if (cu_->verbose) { 916 /* Display the dominator tree */ 917 GetBlockName(bb, block_name1); 918 fprintf(file, " cfg%s [label=\"%s\", shape=none];\n", 919 block_name1, block_name1); 920 if (bb->i_dom) { 921 GetBlockName(GetBasicBlock(bb->i_dom), block_name2); 922 fprintf(file, " cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1); 923 } 924 } 925 } 926 fprintf(file, "}\n"); 927 fclose(file); 928} 929 930/* Insert an MIR instruction to the end of a basic block. */ 931void BasicBlock::AppendMIR(MIR* mir) { 932 // Insert it after the last MIR. 933 InsertMIRListAfter(last_mir_insn, mir, mir); 934} 935 936void BasicBlock::AppendMIRList(MIR* first_list_mir, MIR* last_list_mir) { 937 // Insert it after the last MIR. 938 InsertMIRListAfter(last_mir_insn, first_list_mir, last_list_mir); 939} 940 941void BasicBlock::AppendMIRList(const std::vector<MIR*>& insns) { 942 for (std::vector<MIR*>::const_iterator it = insns.begin(); it != insns.end(); it++) { 943 MIR* new_mir = *it; 944 945 // Add a copy of each MIR. 946 InsertMIRListAfter(last_mir_insn, new_mir, new_mir); 947 } 948} 949 950/* Insert a MIR instruction after the specified MIR. */ 951void BasicBlock::InsertMIRAfter(MIR* current_mir, MIR* new_mir) { 952 InsertMIRListAfter(current_mir, new_mir, new_mir); 953} 954 955void BasicBlock::InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir) { 956 // If no MIR, we are done. 957 if (first_list_mir == nullptr || last_list_mir == nullptr) { 958 return; 959 } 960 961 // If insert_after is null, assume BB is empty. 962 if (insert_after == nullptr) { 963 first_mir_insn = first_list_mir; 964 last_mir_insn = last_list_mir; 965 last_list_mir->next = nullptr; 966 } else { 967 MIR* after_list = insert_after->next; 968 insert_after->next = first_list_mir; 969 last_list_mir->next = after_list; 970 if (after_list == nullptr) { 971 last_mir_insn = last_list_mir; 972 } 973 } 974 975 // Set this BB to be the basic block of the MIRs. 976 MIR* last = last_list_mir->next; 977 for (MIR* mir = first_list_mir; mir != last; mir = mir->next) { 978 mir->bb = id; 979 } 980} 981 982/* Insert an MIR instruction to the head of a basic block. */ 983void BasicBlock::PrependMIR(MIR* mir) { 984 InsertMIRListBefore(first_mir_insn, mir, mir); 985} 986 987void BasicBlock::PrependMIRList(MIR* first_list_mir, MIR* last_list_mir) { 988 // Insert it before the first MIR. 989 InsertMIRListBefore(first_mir_insn, first_list_mir, last_list_mir); 990} 991 992void BasicBlock::PrependMIRList(const std::vector<MIR*>& to_add) { 993 for (std::vector<MIR*>::const_iterator it = to_add.begin(); it != to_add.end(); it++) { 994 MIR* mir = *it; 995 996 InsertMIRListBefore(first_mir_insn, mir, mir); 997 } 998} 999 1000/* Insert a MIR instruction before the specified MIR. */ 1001void BasicBlock::InsertMIRBefore(MIR* current_mir, MIR* new_mir) { 1002 // Insert as a single element list. 1003 return InsertMIRListBefore(current_mir, new_mir, new_mir); 1004} 1005 1006MIR* BasicBlock::FindPreviousMIR(MIR* mir) { 1007 MIR* current = first_mir_insn; 1008 1009 while (current != nullptr) { 1010 MIR* next = current->next; 1011 1012 if (next == mir) { 1013 return current; 1014 } 1015 1016 current = next; 1017 } 1018 1019 return nullptr; 1020} 1021 1022void BasicBlock::InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir) { 1023 // If no MIR, we are done. 1024 if (first_list_mir == nullptr || last_list_mir == nullptr) { 1025 return; 1026 } 1027 1028 // If insert_before is null, assume BB is empty. 1029 if (insert_before == nullptr) { 1030 first_mir_insn = first_list_mir; 1031 last_mir_insn = last_list_mir; 1032 last_list_mir->next = nullptr; 1033 } else { 1034 if (first_mir_insn == insert_before) { 1035 last_list_mir->next = first_mir_insn; 1036 first_mir_insn = first_list_mir; 1037 } else { 1038 // Find the preceding MIR. 1039 MIR* before_list = FindPreviousMIR(insert_before); 1040 DCHECK(before_list != nullptr); 1041 before_list->next = first_list_mir; 1042 last_list_mir->next = insert_before; 1043 } 1044 } 1045 1046 // Set this BB to be the basic block of the MIRs. 1047 for (MIR* mir = first_list_mir; mir != last_list_mir->next; mir = mir->next) { 1048 mir->bb = id; 1049 } 1050} 1051 1052bool BasicBlock::RemoveMIR(MIR* mir) { 1053 // Remove as a single element list. 1054 return RemoveMIRList(mir, mir); 1055} 1056 1057bool BasicBlock::RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir) { 1058 if (first_list_mir == nullptr) { 1059 return false; 1060 } 1061 1062 // Try to find the MIR. 1063 MIR* before_list = nullptr; 1064 MIR* after_list = nullptr; 1065 1066 // If we are removing from the beginning of the MIR list. 1067 if (first_mir_insn == first_list_mir) { 1068 before_list = nullptr; 1069 } else { 1070 before_list = FindPreviousMIR(first_list_mir); 1071 if (before_list == nullptr) { 1072 // We did not find the mir. 1073 return false; 1074 } 1075 } 1076 1077 // Remove the BB information and also find the after_list 1078 for (MIR* mir = first_list_mir; mir != last_list_mir; mir = mir->next) { 1079 mir->bb = NullBasicBlockId; 1080 } 1081 1082 after_list = last_list_mir->next; 1083 1084 // If there is nothing before the list, after_list is the first_mir 1085 if (before_list == nullptr) { 1086 first_mir_insn = after_list; 1087 } 1088 1089 // If there is nothing after the list, before_list is last_mir 1090 if (after_list == nullptr) { 1091 last_mir_insn = before_list; 1092 } 1093 1094 return true; 1095} 1096 1097MIR* BasicBlock::GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current) { 1098 MIR* next_mir = nullptr; 1099 1100 if (current != nullptr) { 1101 next_mir = current->next; 1102 } 1103 1104 if (next_mir == nullptr) { 1105 // Only look for next MIR that follows unconditionally. 1106 if ((taken == NullBasicBlockId) && (fall_through != NullBasicBlockId)) { 1107 next_mir = mir_graph->GetBasicBlock(fall_through)->first_mir_insn; 1108 } 1109 } 1110 1111 return next_mir; 1112} 1113 1114char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { 1115 MIR::DecodedInstruction insn = mir->dalvikInsn; 1116 std::string str; 1117 int flags = 0; 1118 int opcode = insn.opcode; 1119 char* ret; 1120 bool nop = false; 1121 SSARepresentation* ssa_rep = mir->ssa_rep; 1122 Instruction::Format dalvik_format = Instruction::k10x; // Default to no-operand format. 1123 int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0; 1124 int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0; 1125 1126 // Handle special cases. 1127 if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) { 1128 str.append(extended_mir_op_names_[opcode - kMirOpFirst]); 1129 str.append(": "); 1130 // Recover the original Dex instruction. 1131 insn = mir->meta.throw_insn->dalvikInsn; 1132 ssa_rep = mir->meta.throw_insn->ssa_rep; 1133 defs = ssa_rep->num_defs; 1134 uses = ssa_rep->num_uses; 1135 opcode = insn.opcode; 1136 } else if (opcode == kMirOpNop) { 1137 str.append("["); 1138 // Recover original opcode. 1139 insn.opcode = Instruction::At(current_code_item_->insns_ + mir->offset)->Opcode(); 1140 opcode = insn.opcode; 1141 nop = true; 1142 } 1143 1144 if (opcode >= kMirOpFirst) { 1145 str.append(extended_mir_op_names_[opcode - kMirOpFirst]); 1146 } else { 1147 dalvik_format = Instruction::FormatOf(insn.opcode); 1148 flags = Instruction::FlagsOf(insn.opcode); 1149 str.append(Instruction::Name(insn.opcode)); 1150 } 1151 1152 if (opcode == kMirOpPhi) { 1153 BasicBlockId* incoming = mir->meta.phi_incoming; 1154 str.append(StringPrintf(" %s = (%s", 1155 GetSSANameWithConst(ssa_rep->defs[0], true).c_str(), 1156 GetSSANameWithConst(ssa_rep->uses[0], true).c_str())); 1157 str.append(StringPrintf(":%d", incoming[0])); 1158 int i; 1159 for (i = 1; i < uses; i++) { 1160 str.append(StringPrintf(", %s:%d", 1161 GetSSANameWithConst(ssa_rep->uses[i], true).c_str(), 1162 incoming[i])); 1163 } 1164 str.append(")"); 1165 } else if ((flags & Instruction::kBranch) != 0) { 1166 // For branches, decode the instructions to print out the branch targets. 1167 int offset = 0; 1168 switch (dalvik_format) { 1169 case Instruction::k21t: 1170 str.append(StringPrintf(" %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str())); 1171 offset = insn.vB; 1172 break; 1173 case Instruction::k22t: 1174 str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str(), 1175 GetSSANameWithConst(ssa_rep->uses[1], false).c_str())); 1176 offset = insn.vC; 1177 break; 1178 case Instruction::k10t: 1179 case Instruction::k20t: 1180 case Instruction::k30t: 1181 offset = insn.vA; 1182 break; 1183 default: 1184 LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode; 1185 } 1186 str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset, 1187 offset > 0 ? '+' : '-', offset > 0 ? offset : -offset)); 1188 } else { 1189 // For invokes-style formats, treat wide regs as a pair of singles. 1190 bool show_singles = ((dalvik_format == Instruction::k35c) || 1191 (dalvik_format == Instruction::k3rc)); 1192 if (defs != 0) { 1193 str.append(StringPrintf(" %s", GetSSANameWithConst(ssa_rep->defs[0], false).c_str())); 1194 if (uses != 0) { 1195 str.append(", "); 1196 } 1197 } 1198 for (int i = 0; i < uses; i++) { 1199 str.append( 1200 StringPrintf(" %s", GetSSANameWithConst(ssa_rep->uses[i], show_singles).c_str())); 1201 if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) { 1202 // For the listing, skip the high sreg. 1203 i++; 1204 } 1205 if (i != (uses -1)) { 1206 str.append(","); 1207 } 1208 } 1209 switch (dalvik_format) { 1210 case Instruction::k11n: // Add one immediate from vB. 1211 case Instruction::k21s: 1212 case Instruction::k31i: 1213 case Instruction::k21h: 1214 str.append(StringPrintf(", #%d", insn.vB)); 1215 break; 1216 case Instruction::k51l: // Add one wide immediate. 1217 str.append(StringPrintf(", #%" PRId64, insn.vB_wide)); 1218 break; 1219 case Instruction::k21c: // One register, one string/type/method index. 1220 case Instruction::k31c: 1221 str.append(StringPrintf(", index #%d", insn.vB)); 1222 break; 1223 case Instruction::k22c: // Two registers, one string/type/method index. 1224 str.append(StringPrintf(", index #%d", insn.vC)); 1225 break; 1226 case Instruction::k22s: // Add one immediate from vC. 1227 case Instruction::k22b: 1228 str.append(StringPrintf(", #%d", insn.vC)); 1229 break; 1230 default: { 1231 // Nothing left to print. 1232 } 1233 } 1234 } 1235 if (nop) { 1236 str.append("]--optimized away"); 1237 } 1238 int length = str.length() + 1; 1239 ret = static_cast<char*>(arena_->Alloc(length, kArenaAllocDFInfo)); 1240 strncpy(ret, str.c_str(), length); 1241 return ret; 1242} 1243 1244/* Turn method name into a legal Linux file name */ 1245void MIRGraph::ReplaceSpecialChars(std::string& str) { 1246 static const struct { const char before; const char after; } match[] = { 1247 {'/', '-'}, {';', '#'}, {' ', '#'}, {'$', '+'}, 1248 {'(', '@'}, {')', '@'}, {'<', '='}, {'>', '='} 1249 }; 1250 for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) { 1251 std::replace(str.begin(), str.end(), match[i].before, match[i].after); 1252 } 1253} 1254 1255std::string MIRGraph::GetSSAName(int ssa_reg) { 1256 // TODO: This value is needed for LLVM and debugging. Currently, we compute this and then copy to 1257 // the arena. We should be smarter and just place straight into the arena, or compute the 1258 // value more lazily. 1259 return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg)); 1260} 1261 1262// Similar to GetSSAName, but if ssa name represents an immediate show that as well. 1263std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) { 1264 if (reg_location_ == NULL) { 1265 // Pre-SSA - just use the standard name. 1266 return GetSSAName(ssa_reg); 1267 } 1268 if (IsConst(reg_location_[ssa_reg])) { 1269 if (!singles_only && reg_location_[ssa_reg].wide) { 1270 return StringPrintf("v%d_%d#0x%" PRIx64, SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg), 1271 ConstantValueWide(reg_location_[ssa_reg])); 1272 } else { 1273 return StringPrintf("v%d_%d#0x%x", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg), 1274 ConstantValue(reg_location_[ssa_reg])); 1275 } 1276 } else { 1277 return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg)); 1278 } 1279} 1280 1281void MIRGraph::GetBlockName(BasicBlock* bb, char* name) { 1282 switch (bb->block_type) { 1283 case kEntryBlock: 1284 snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id); 1285 break; 1286 case kExitBlock: 1287 snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id); 1288 break; 1289 case kDalvikByteCode: 1290 snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id); 1291 break; 1292 case kExceptionHandling: 1293 snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset, 1294 bb->id); 1295 break; 1296 default: 1297 snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id); 1298 break; 1299 } 1300} 1301 1302const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) { 1303 // TODO: for inlining support, use current code unit. 1304 const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx); 1305 return cu_->dex_file->GetShorty(method_id.proto_idx_); 1306} 1307 1308/* Debug Utility - dump a compilation unit */ 1309void MIRGraph::DumpMIRGraph() { 1310 BasicBlock* bb; 1311 const char* block_type_names[] = { 1312 "Null Block", 1313 "Entry Block", 1314 "Code Block", 1315 "Exit Block", 1316 "Exception Handling", 1317 "Catch Block" 1318 }; 1319 1320 LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 1321 LOG(INFO) << cu_->insns << " insns"; 1322 LOG(INFO) << GetNumBlocks() << " blocks in total"; 1323 GrowableArray<BasicBlock*>::Iterator iterator(&block_list_); 1324 1325 while (true) { 1326 bb = iterator.Next(); 1327 if (bb == NULL) break; 1328 LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)", 1329 bb->id, 1330 block_type_names[bb->block_type], 1331 bb->start_offset, 1332 bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset, 1333 bb->last_mir_insn ? "" : " empty"); 1334 if (bb->taken != NullBasicBlockId) { 1335 LOG(INFO) << " Taken branch: block " << bb->taken 1336 << "(0x" << std::hex << GetBasicBlock(bb->taken)->start_offset << ")"; 1337 } 1338 if (bb->fall_through != NullBasicBlockId) { 1339 LOG(INFO) << " Fallthrough : block " << bb->fall_through 1340 << " (0x" << std::hex << GetBasicBlock(bb->fall_through)->start_offset << ")"; 1341 } 1342 } 1343} 1344 1345/* 1346 * Build an array of location records for the incoming arguments. 1347 * Note: one location record per word of arguments, with dummy 1348 * high-word loc for wide arguments. Also pull up any following 1349 * MOVE_RESULT and incorporate it into the invoke. 1350 */ 1351CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, 1352 bool is_range) { 1353 CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo), 1354 kArenaAllocMisc)); 1355 MIR* move_result_mir = FindMoveResult(bb, mir); 1356 if (move_result_mir == NULL) { 1357 info->result.location = kLocInvalid; 1358 } else { 1359 info->result = GetRawDest(move_result_mir); 1360 move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop); 1361 } 1362 info->num_arg_words = mir->ssa_rep->num_uses; 1363 info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*> 1364 (arena_->Alloc(sizeof(RegLocation) * info->num_arg_words, kArenaAllocMisc)); 1365 for (int i = 0; i < info->num_arg_words; i++) { 1366 info->args[i] = GetRawSrc(mir, i); 1367 } 1368 info->opt_flags = mir->optimization_flags; 1369 info->type = type; 1370 info->is_range = is_range; 1371 info->index = mir->dalvikInsn.vB; 1372 info->offset = mir->offset; 1373 info->mir = mir; 1374 return info; 1375} 1376 1377// Allocate a new MIR. 1378MIR* MIRGraph::NewMIR() { 1379 MIR* mir = new (arena_) MIR(); 1380 return mir; 1381} 1382 1383// Allocate a new basic block. 1384BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) { 1385 BasicBlock* bb = new (arena_) BasicBlock(); 1386 1387 bb->block_type = block_type; 1388 bb->id = block_id; 1389 // TUNING: better estimate of the exit block predecessors? 1390 bb->predecessors = new (arena_) GrowableArray<BasicBlockId>(arena_, 1391 (block_type == kExitBlock) ? 2048 : 2, 1392 kGrowableArrayPredecessors); 1393 bb->successor_block_list_type = kNotUsed; 1394 block_id_map_.Put(block_id, block_id); 1395 return bb; 1396} 1397 1398void MIRGraph::InitializeConstantPropagation() { 1399 is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false); 1400 constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(), kArenaAllocDFInfo)); 1401} 1402 1403void MIRGraph::InitializeMethodUses() { 1404 // The gate starts by initializing the use counts. 1405 int num_ssa_regs = GetNumSSARegs(); 1406 use_counts_.Resize(num_ssa_regs + 32); 1407 raw_use_counts_.Resize(num_ssa_regs + 32); 1408 // Initialize list. 1409 for (int i = 0; i < num_ssa_regs; i++) { 1410 use_counts_.Insert(0); 1411 raw_use_counts_.Insert(0); 1412 } 1413} 1414 1415void MIRGraph::SSATransformationStart() { 1416 DCHECK(temp_scoped_alloc_.get() == nullptr); 1417 temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack)); 1418 temp_bit_vector_size_ = cu_->num_dalvik_registers; 1419 temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector( 1420 temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapRegisterV); 1421 1422 // Update the maximum number of reachable blocks. 1423 max_num_reachable_blocks_ = num_reachable_blocks_; 1424} 1425 1426void MIRGraph::SSATransformationEnd() { 1427 // Verify the dataflow information after the pass. 1428 if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) { 1429 VerifyDataflow(); 1430 } 1431 1432 temp_bit_vector_size_ = 0u; 1433 temp_bit_vector_ = nullptr; 1434 DCHECK(temp_scoped_alloc_.get() != nullptr); 1435 temp_scoped_alloc_.reset(); 1436} 1437 1438void MIRGraph::ComputeTopologicalSortOrder() { 1439 std::queue<BasicBlock*> q; 1440 std::map<int, int> visited_cnt_values; 1441 1442 // Clear the nodes. 1443 ClearAllVisitedFlags(); 1444 1445 // Create the topological order if need be. 1446 if (topological_order_ != nullptr) { 1447 topological_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, 0); 1448 } 1449 topological_order_->Reset(); 1450 1451 // Set up visitedCntValues map for all BB. The default value for this counters in the map is zero. 1452 // also fill initial queue. 1453 GrowableArray<BasicBlock*>::Iterator iterator(&block_list_); 1454 1455 while (true) { 1456 BasicBlock* bb = iterator.Next(); 1457 1458 if (bb == nullptr) { 1459 break; 1460 } 1461 1462 if (bb->hidden == true) { 1463 continue; 1464 } 1465 1466 visited_cnt_values[bb->id] = bb->predecessors->Size(); 1467 1468 GrowableArray<BasicBlockId>::Iterator pred_iterator(bb->predecessors); 1469 // To process loops we should not wait for dominators. 1470 while (true) { 1471 BasicBlock* pred_bb = GetBasicBlock(pred_iterator.Next()); 1472 1473 if (pred_bb == nullptr) { 1474 break; 1475 } 1476 1477 if (pred_bb->dominators == nullptr || pred_bb->hidden == true) { 1478 continue; 1479 } 1480 1481 // Skip the backward branch. 1482 if (pred_bb->dominators->IsBitSet(bb->id) != 0) { 1483 visited_cnt_values[bb->id]--; 1484 } 1485 } 1486 1487 // Add entry block to queue. 1488 if (visited_cnt_values[bb->id] == 0) { 1489 q.push(bb); 1490 } 1491 } 1492 1493 while (q.size() > 0) { 1494 // Get top. 1495 BasicBlock* bb = q.front(); 1496 q.pop(); 1497 1498 DCHECK_EQ(bb->hidden, false); 1499 1500 if (bb->IsExceptionBlock() == true) { 1501 continue; 1502 } 1503 1504 // We've visited all the predecessors. So, we can visit bb. 1505 if (bb->visited == false) { 1506 bb->visited = true; 1507 1508 // Now add the basic block. 1509 topological_order_->Insert(bb->id); 1510 1511 // Reduce visitedCnt for all the successors and add into the queue ones with visitedCnt equals to zero. 1512 ChildBlockIterator succIter(bb, this); 1513 BasicBlock* successor = succIter.Next(); 1514 while (successor != nullptr) { 1515 // one more predecessor was visited. 1516 visited_cnt_values[successor->id]--; 1517 1518 if (visited_cnt_values[successor->id] <= 0 && successor->visited == false && successor->hidden == false) { 1519 q.push(successor); 1520 } 1521 1522 // Take next successor. 1523 successor = succIter.Next(); 1524 } 1525 } 1526 } 1527} 1528 1529bool BasicBlock::IsExceptionBlock() const { 1530 if (block_type == kExceptionHandling) { 1531 return true; 1532 } 1533 return false; 1534} 1535 1536ChildBlockIterator::ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph) 1537 : basic_block_(bb), mir_graph_(mir_graph), visited_fallthrough_(false), 1538 visited_taken_(false), have_successors_(false) { 1539 // Check if we actually do have successors. 1540 if (basic_block_ != 0 && basic_block_->successor_block_list_type != kNotUsed) { 1541 have_successors_ = true; 1542 successor_iter_.Reset(basic_block_->successor_blocks); 1543 } 1544} 1545 1546BasicBlock* ChildBlockIterator::Next() { 1547 // We check if we have a basic block. If we don't we cannot get next child. 1548 if (basic_block_ == nullptr) { 1549 return nullptr; 1550 } 1551 1552 // If we haven't visited fallthrough, return that. 1553 if (visited_fallthrough_ == false) { 1554 visited_fallthrough_ = true; 1555 1556 BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->fall_through); 1557 if (result != nullptr) { 1558 return result; 1559 } 1560 } 1561 1562 // If we haven't visited taken, return that. 1563 if (visited_taken_ == false) { 1564 visited_taken_ = true; 1565 1566 BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->taken); 1567 if (result != nullptr) { 1568 return result; 1569 } 1570 } 1571 1572 // We visited both taken and fallthrough. Now check if we have successors we need to visit. 1573 if (have_successors_ == true) { 1574 // Get information about next successor block. 1575 SuccessorBlockInfo* successor_block_info = successor_iter_.Next(); 1576 1577 // If we don't have anymore successors, return nullptr. 1578 if (successor_block_info != nullptr) { 1579 return mir_graph_->GetBasicBlock(successor_block_info->block); 1580 } 1581 } 1582 1583 // We do not have anything. 1584 return nullptr; 1585} 1586 1587BasicBlock* BasicBlock::Copy(CompilationUnit* c_unit) { 1588 MIRGraph* mir_graph = c_unit->mir_graph.get(); 1589 return Copy(mir_graph); 1590} 1591 1592BasicBlock* BasicBlock::Copy(MIRGraph* mir_graph) { 1593 BasicBlock* result_bb = mir_graph->CreateNewBB(block_type); 1594 1595 // We don't do a memcpy style copy here because it would lead to a lot of things 1596 // to clean up. Let us do it by hand instead. 1597 // Copy in taken and fallthrough. 1598 result_bb->fall_through = fall_through; 1599 result_bb->taken = taken; 1600 1601 // Copy successor links if needed. 1602 ArenaAllocator* arena = mir_graph->GetArena(); 1603 1604 result_bb->successor_block_list_type = successor_block_list_type; 1605 if (result_bb->successor_block_list_type != kNotUsed) { 1606 size_t size = successor_blocks->Size(); 1607 result_bb->successor_blocks = new (arena) GrowableArray<SuccessorBlockInfo*>(arena, size, kGrowableArraySuccessorBlocks); 1608 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(successor_blocks); 1609 while (true) { 1610 SuccessorBlockInfo* sbi_old = iterator.Next(); 1611 if (sbi_old == nullptr) { 1612 break; 1613 } 1614 SuccessorBlockInfo* sbi_new = static_cast<SuccessorBlockInfo*>(arena->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor)); 1615 memcpy(sbi_new, sbi_old, sizeof(SuccessorBlockInfo)); 1616 result_bb->successor_blocks->Insert(sbi_new); 1617 } 1618 } 1619 1620 // Copy offset, method. 1621 result_bb->start_offset = start_offset; 1622 1623 // Now copy instructions. 1624 for (MIR* mir = first_mir_insn; mir != 0; mir = mir->next) { 1625 // Get a copy first. 1626 MIR* copy = mir->Copy(mir_graph); 1627 1628 // Append it. 1629 result_bb->AppendMIR(copy); 1630 } 1631 1632 return result_bb; 1633} 1634 1635MIR* MIR::Copy(MIRGraph* mir_graph) { 1636 MIR* res = mir_graph->NewMIR(); 1637 *res = *this; 1638 1639 // Remove links 1640 res->next = nullptr; 1641 res->bb = NullBasicBlockId; 1642 res->ssa_rep = nullptr; 1643 1644 return res; 1645} 1646 1647MIR* MIR::Copy(CompilationUnit* c_unit) { 1648 return Copy(c_unit->mir_graph.get()); 1649} 1650 1651uint32_t SSARepresentation::GetStartUseIndex(Instruction::Code opcode) { 1652 // Default result. 1653 int res = 0; 1654 1655 // We are basically setting the iputs to their igets counterparts. 1656 switch (opcode) { 1657 case Instruction::IPUT: 1658 case Instruction::IPUT_OBJECT: 1659 case Instruction::IPUT_BOOLEAN: 1660 case Instruction::IPUT_BYTE: 1661 case Instruction::IPUT_CHAR: 1662 case Instruction::IPUT_SHORT: 1663 case Instruction::IPUT_QUICK: 1664 case Instruction::IPUT_OBJECT_QUICK: 1665 case Instruction::APUT: 1666 case Instruction::APUT_OBJECT: 1667 case Instruction::APUT_BOOLEAN: 1668 case Instruction::APUT_BYTE: 1669 case Instruction::APUT_CHAR: 1670 case Instruction::APUT_SHORT: 1671 case Instruction::SPUT: 1672 case Instruction::SPUT_OBJECT: 1673 case Instruction::SPUT_BOOLEAN: 1674 case Instruction::SPUT_BYTE: 1675 case Instruction::SPUT_CHAR: 1676 case Instruction::SPUT_SHORT: 1677 // Skip the VR containing what to store. 1678 res = 1; 1679 break; 1680 case Instruction::IPUT_WIDE: 1681 case Instruction::IPUT_WIDE_QUICK: 1682 case Instruction::APUT_WIDE: 1683 case Instruction::SPUT_WIDE: 1684 // Skip the two VRs containing what to store. 1685 res = 2; 1686 break; 1687 default: 1688 // Do nothing in the general case. 1689 break; 1690 } 1691 1692 return res; 1693} 1694 1695/** 1696 * @brief Given a decoded instruction, it checks whether the instruction 1697 * sets a constant and if it does, more information is provided about the 1698 * constant being set. 1699 * @param ptr_value pointer to a 64-bit holder for the constant. 1700 * @param wide Updated by function whether a wide constant is being set by bytecode. 1701 * @return Returns false if the decoded instruction does not represent a constant bytecode. 1702 */ 1703bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const { 1704 bool sets_const = true; 1705 int64_t value = vB; 1706 1707 DCHECK(ptr_value != nullptr); 1708 DCHECK(wide != nullptr); 1709 1710 switch (opcode) { 1711 case Instruction::CONST_4: 1712 case Instruction::CONST_16: 1713 case Instruction::CONST: 1714 *wide = false; 1715 value <<= 32; // In order to get the sign extend. 1716 value >>= 32; 1717 break; 1718 case Instruction::CONST_HIGH16: 1719 *wide = false; 1720 value <<= 48; // In order to get the sign extend. 1721 value >>= 32; 1722 break; 1723 case Instruction::CONST_WIDE_16: 1724 case Instruction::CONST_WIDE_32: 1725 *wide = true; 1726 value <<= 32; // In order to get the sign extend. 1727 value >>= 32; 1728 break; 1729 case Instruction::CONST_WIDE: 1730 *wide = true; 1731 value = vB_wide; 1732 break; 1733 case Instruction::CONST_WIDE_HIGH16: 1734 *wide = true; 1735 value <<= 48; // In order to get the sign extend. 1736 break; 1737 default: 1738 sets_const = false; 1739 break; 1740 } 1741 1742 if (sets_const) { 1743 *ptr_value = value; 1744 } 1745 1746 return sets_const; 1747} 1748 1749void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) { 1750 // Reset flags for all MIRs in bb. 1751 for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) { 1752 mir->optimization_flags &= (~reset_flags); 1753 } 1754} 1755 1756void BasicBlock::Hide(CompilationUnit* c_unit) { 1757 // First lets make it a dalvik bytecode block so it doesn't have any special meaning. 1758 block_type = kDalvikByteCode; 1759 1760 // Mark it as hidden. 1761 hidden = true; 1762 1763 // Detach it from its MIRs so we don't generate code for them. Also detached MIRs 1764 // are updated to know that they no longer have a parent. 1765 for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) { 1766 mir->bb = NullBasicBlockId; 1767 } 1768 first_mir_insn = nullptr; 1769 last_mir_insn = nullptr; 1770 1771 GrowableArray<BasicBlockId>::Iterator iterator(predecessors); 1772 1773 MIRGraph* mir_graph = c_unit->mir_graph.get(); 1774 while (true) { 1775 BasicBlock* pred_bb = mir_graph->GetBasicBlock(iterator.Next()); 1776 if (pred_bb == nullptr) { 1777 break; 1778 } 1779 1780 // Sadly we have to go through the children by hand here. 1781 pred_bb->ReplaceChild(id, NullBasicBlockId); 1782 } 1783 1784 // Iterate through children of bb we are hiding. 1785 ChildBlockIterator successorChildIter(this, mir_graph); 1786 1787 for (BasicBlock* childPtr = successorChildIter.Next(); childPtr != 0; childPtr = successorChildIter.Next()) { 1788 // Replace child with null child. 1789 childPtr->predecessors->Delete(id); 1790 } 1791} 1792 1793bool BasicBlock::IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg) { 1794 // In order to determine if the ssa reg is live out, we scan all the MIRs. We remember 1795 // the last SSA number of the same dalvik register. At the end, if it is different than ssa_reg, 1796 // then it is not live out of this BB. 1797 int dalvik_reg = c_unit->mir_graph->SRegToVReg(ssa_reg); 1798 1799 int last_ssa_reg = -1; 1800 1801 // Walk through the MIRs backwards. 1802 for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) { 1803 // Get ssa rep. 1804 SSARepresentation *ssa_rep = mir->ssa_rep; 1805 1806 // Go through the defines for this MIR. 1807 for (int i = 0; i < ssa_rep->num_defs; i++) { 1808 DCHECK(ssa_rep->defs != nullptr); 1809 1810 // Get the ssa reg. 1811 int def_ssa_reg = ssa_rep->defs[i]; 1812 1813 // Get dalvik reg. 1814 int def_dalvik_reg = c_unit->mir_graph->SRegToVReg(def_ssa_reg); 1815 1816 // Compare dalvik regs. 1817 if (dalvik_reg == def_dalvik_reg) { 1818 // We found a def of the register that we are being asked about. 1819 // Remember it. 1820 last_ssa_reg = def_ssa_reg; 1821 } 1822 } 1823 } 1824 1825 if (last_ssa_reg == -1) { 1826 // If we get to this point we couldn't find a define of register user asked about. 1827 // Let's assume the user knows what he's doing so we can be safe and say that if we 1828 // couldn't find a def, it is live out. 1829 return true; 1830 } 1831 1832 // If it is not -1, we found a match, is it ssa_reg? 1833 return (ssa_reg == last_ssa_reg); 1834} 1835 1836bool BasicBlock::ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb) { 1837 // We need to check taken, fall_through, and successor_blocks to replace. 1838 bool found = false; 1839 if (taken == old_bb) { 1840 taken = new_bb; 1841 found = true; 1842 } 1843 1844 if (fall_through == old_bb) { 1845 fall_through = new_bb; 1846 found = true; 1847 } 1848 1849 if (successor_block_list_type != kNotUsed) { 1850 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(successor_blocks); 1851 while (true) { 1852 SuccessorBlockInfo* successor_block_info = iterator.Next(); 1853 if (successor_block_info == nullptr) { 1854 break; 1855 } 1856 if (successor_block_info->block == old_bb) { 1857 successor_block_info->block = new_bb; 1858 found = true; 1859 } 1860 } 1861 } 1862 1863 return found; 1864} 1865 1866void BasicBlock::UpdatePredecessor(BasicBlockId old_parent, BasicBlockId new_parent) { 1867 GrowableArray<BasicBlockId>::Iterator iterator(predecessors); 1868 bool found = false; 1869 1870 while (true) { 1871 BasicBlockId pred_bb_id = iterator.Next(); 1872 1873 if (pred_bb_id == NullBasicBlockId) { 1874 break; 1875 } 1876 1877 if (pred_bb_id == old_parent) { 1878 size_t idx = iterator.GetIndex() - 1; 1879 predecessors->Put(idx, new_parent); 1880 found = true; 1881 break; 1882 } 1883 } 1884 1885 // If not found, add it. 1886 if (found == false) { 1887 predecessors->Insert(new_parent); 1888 } 1889} 1890 1891// Create a new basic block with block_id as num_blocks_ that is 1892// post-incremented. 1893BasicBlock* MIRGraph::CreateNewBB(BBType block_type) { 1894 BasicBlock* res = NewMemBB(block_type, num_blocks_++); 1895 block_list_.Insert(res); 1896 return res; 1897} 1898 1899void MIRGraph::CalculateBasicBlockInformation() { 1900 PassDriverMEPostOpt driver(cu_); 1901 driver.Launch(); 1902} 1903 1904void MIRGraph::InitializeBasicBlockData() { 1905 num_blocks_ = block_list_.Size(); 1906} 1907 1908} // namespace art 1909