mir_graph.cc revision 8b2c0b9abc3f520495f4387ea040132ba85cae69
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "base/stl_util.h" 18#include "compiler_internals.h" 19#include "dex_file-inl.h" 20#include "leb128.h" 21#include "mir_graph.h" 22 23namespace art { 24 25#define MAX_PATTERN_LEN 5 26 27struct CodePattern { 28 const Instruction::Code opcodes[MAX_PATTERN_LEN]; 29 const SpecialCaseHandler handler_code; 30}; 31 32static const CodePattern special_patterns[] = { 33 {{Instruction::RETURN_VOID}, kNullMethod}, 34 {{Instruction::CONST, Instruction::RETURN}, kConstFunction}, 35 {{Instruction::CONST_4, Instruction::RETURN}, kConstFunction}, 36 {{Instruction::CONST_4, Instruction::RETURN_OBJECT}, kConstFunction}, 37 {{Instruction::CONST_16, Instruction::RETURN}, kConstFunction}, 38 {{Instruction::IGET, Instruction:: RETURN}, kIGet}, 39 {{Instruction::IGET_BOOLEAN, Instruction::RETURN}, kIGetBoolean}, 40 {{Instruction::IGET_OBJECT, Instruction::RETURN_OBJECT}, kIGetObject}, 41 {{Instruction::IGET_BYTE, Instruction::RETURN}, kIGetByte}, 42 {{Instruction::IGET_CHAR, Instruction::RETURN}, kIGetChar}, 43 {{Instruction::IGET_SHORT, Instruction::RETURN}, kIGetShort}, 44 {{Instruction::IGET_WIDE, Instruction::RETURN_WIDE}, kIGetWide}, 45 {{Instruction::IPUT, Instruction::RETURN_VOID}, kIPut}, 46 {{Instruction::IPUT_BOOLEAN, Instruction::RETURN_VOID}, kIPutBoolean}, 47 {{Instruction::IPUT_OBJECT, Instruction::RETURN_VOID}, kIPutObject}, 48 {{Instruction::IPUT_BYTE, Instruction::RETURN_VOID}, kIPutByte}, 49 {{Instruction::IPUT_CHAR, Instruction::RETURN_VOID}, kIPutChar}, 50 {{Instruction::IPUT_SHORT, Instruction::RETURN_VOID}, kIPutShort}, 51 {{Instruction::IPUT_WIDE, Instruction::RETURN_VOID}, kIPutWide}, 52 {{Instruction::RETURN}, kIdentity}, 53 {{Instruction::RETURN_OBJECT}, kIdentity}, 54 {{Instruction::RETURN_WIDE}, kIdentity}, 55}; 56 57const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = { 58 "Phi", 59 "Copy", 60 "FusedCmplFloat", 61 "FusedCmpgFloat", 62 "FusedCmplDouble", 63 "FusedCmpgDouble", 64 "FusedCmpLong", 65 "Nop", 66 "OpNullCheck", 67 "OpRangeCheck", 68 "OpDivZeroCheck", 69 "Check1", 70 "Check2", 71 "Select", 72}; 73 74MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) 75 : reg_location_(NULL), 76 compiler_temps_(arena, 6, kGrowableArrayMisc), 77 cu_(cu), 78 ssa_base_vregs_(NULL), 79 ssa_subscripts_(NULL), 80 vreg_to_ssa_map_(NULL), 81 ssa_last_defs_(NULL), 82 is_constant_v_(NULL), 83 constant_values_(NULL), 84 use_counts_(arena, 256, kGrowableArrayMisc), 85 raw_use_counts_(arena, 256, kGrowableArrayMisc), 86 num_reachable_blocks_(0), 87 dfs_order_(NULL), 88 dfs_post_order_(NULL), 89 dom_post_order_traversal_(NULL), 90 i_dom_list_(NULL), 91 def_block_matrix_(NULL), 92 temp_block_v_(NULL), 93 temp_dalvik_register_v_(NULL), 94 temp_ssa_register_v_(NULL), 95 block_list_(arena, 100, kGrowableArrayBlockList), 96 try_block_addr_(NULL), 97 entry_block_(NULL), 98 exit_block_(NULL), 99 cur_block_(NULL), 100 num_blocks_(0), 101 current_code_item_(NULL), 102 block_map_(arena, 0, kGrowableArrayMisc), 103 current_method_(kInvalidEntry), 104 current_offset_(kInvalidEntry), 105 def_count_(0), 106 opcode_count_(NULL), 107 num_ssa_regs_(0), 108 method_sreg_(0), 109 attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke. 110 checkstats_(NULL), 111 special_case_(kNoHandler), 112 arena_(arena) { 113 try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */); 114} 115 116MIRGraph::~MIRGraph() { 117 STLDeleteElements(&m_units_); 118} 119 120/* 121 * Parse an instruction, return the length of the instruction 122 */ 123int MIRGraph::ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction) { 124 const Instruction* instruction = Instruction::At(code_ptr); 125 *decoded_instruction = DecodedInstruction(instruction); 126 127 return instruction->SizeInCodeUnits(); 128} 129 130 131/* Split an existing block from the specified code offset into two */ 132BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset, 133 BasicBlock* orig_block, BasicBlock** immed_pred_block_p) { 134 MIR* insn = orig_block->first_mir_insn; 135 while (insn) { 136 if (insn->offset == code_offset) break; 137 insn = insn->next; 138 } 139 if (insn == NULL) { 140 LOG(FATAL) << "Break split failed"; 141 } 142 BasicBlock *bottom_block = NewMemBB(kDalvikByteCode, num_blocks_++); 143 block_list_.Insert(bottom_block); 144 145 bottom_block->start_offset = code_offset; 146 bottom_block->first_mir_insn = insn; 147 bottom_block->last_mir_insn = orig_block->last_mir_insn; 148 149 /* If this block was terminated by a return, the flag needs to go with the bottom block */ 150 bottom_block->terminated_by_return = orig_block->terminated_by_return; 151 orig_block->terminated_by_return = false; 152 153 /* Add it to the quick lookup cache */ 154 block_map_.Put(bottom_block->start_offset, bottom_block); 155 156 /* Handle the taken path */ 157 bottom_block->taken = orig_block->taken; 158 if (bottom_block->taken) { 159 orig_block->taken = NULL; 160 bottom_block->taken->predecessors->Delete(orig_block); 161 bottom_block->taken->predecessors->Insert(bottom_block); 162 } 163 164 /* Handle the fallthrough path */ 165 bottom_block->fall_through = orig_block->fall_through; 166 orig_block->fall_through = bottom_block; 167 bottom_block->predecessors->Insert(orig_block); 168 if (bottom_block->fall_through) { 169 bottom_block->fall_through->predecessors->Delete(orig_block); 170 bottom_block->fall_through->predecessors->Insert(bottom_block); 171 } 172 173 /* Handle the successor list */ 174 if (orig_block->successor_block_list.block_list_type != kNotUsed) { 175 bottom_block->successor_block_list = orig_block->successor_block_list; 176 orig_block->successor_block_list.block_list_type = kNotUsed; 177 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_block_list.blocks); 178 while (true) { 179 SuccessorBlockInfo *successor_block_info = iterator.Next(); 180 if (successor_block_info == NULL) break; 181 BasicBlock *bb = successor_block_info->block; 182 bb->predecessors->Delete(orig_block); 183 bb->predecessors->Insert(bottom_block); 184 } 185 } 186 187 orig_block->last_mir_insn = insn->prev; 188 189 insn->prev->next = NULL; 190 insn->prev = NULL; 191 /* 192 * Update the immediate predecessor block pointer so that outgoing edges 193 * can be applied to the proper block. 194 */ 195 if (immed_pred_block_p) { 196 DCHECK_EQ(*immed_pred_block_p, orig_block); 197 *immed_pred_block_p = bottom_block; 198 } 199 return bottom_block; 200} 201 202/* 203 * Given a code offset, find out the block that starts with it. If the offset 204 * is in the middle of an existing block, split it into two. If immed_pred_block_p 205 * is not non-null and is the block being split, update *immed_pred_block_p to 206 * point to the bottom block so that outgoing edges can be set up properly 207 * (by the caller) 208 * Utilizes a map for fast lookup of the typical cases. 209 */ 210BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool create, 211 BasicBlock** immed_pred_block_p) { 212 BasicBlock* bb; 213 unsigned int i; 214 215 if (code_offset >= cu_->code_item->insns_size_in_code_units_) { 216 return NULL; 217 } 218 bb = block_map_.Get(code_offset); 219 if ((bb != NULL) || !create) { 220 return bb; 221 } 222 223 if (split) { 224 for (i = block_list_.Size(); i > 0; i--) { 225 bb = block_list_.Get(i - 1); 226 if (bb->block_type != kDalvikByteCode) continue; 227 /* Check if a branch jumps into the middle of an existing block */ 228 if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) && 229 (code_offset <= bb->last_mir_insn->offset)) { 230 BasicBlock *new_bb = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? 231 immed_pred_block_p : NULL); 232 return new_bb; 233 } 234 } 235 } 236 237 /* Create a new one */ 238 bb = NewMemBB(kDalvikByteCode, num_blocks_++); 239 block_list_.Insert(bb); 240 bb->start_offset = code_offset; 241 block_map_.Put(bb->start_offset, bb); 242 return bb; 243} 244 245/* Identify code range in try blocks and set up the empty catch blocks */ 246void MIRGraph::ProcessTryCatchBlocks() { 247 int tries_size = current_code_item_->tries_size_; 248 int offset; 249 250 if (tries_size == 0) { 251 return; 252 } 253 254 for (int i = 0; i < tries_size; i++) { 255 const DexFile::TryItem* pTry = 256 DexFile::GetTryItems(*current_code_item_, i); 257 int start_offset = pTry->start_addr_; 258 int end_offset = start_offset + pTry->insn_count_; 259 for (offset = start_offset; offset < end_offset; offset++) { 260 try_block_addr_->SetBit(offset); 261 } 262 } 263 264 // Iterate over each of the handlers to enqueue the empty Catch blocks 265 const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0); 266 uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr); 267 for (uint32_t idx = 0; idx < handlers_size; idx++) { 268 CatchHandlerIterator iterator(handlers_ptr); 269 for (; iterator.HasNext(); iterator.Next()) { 270 uint32_t address = iterator.GetHandlerAddress(); 271 FindBlock(address, false /* split */, true /*create*/, 272 /* immed_pred_block_p */ NULL); 273 } 274 handlers_ptr = iterator.EndDataPointer(); 275 } 276} 277 278/* Process instructions with the kBranch flag */ 279BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, 280 int flags, const uint16_t* code_ptr, 281 const uint16_t* code_end) { 282 int target = cur_offset; 283 switch (insn->dalvikInsn.opcode) { 284 case Instruction::GOTO: 285 case Instruction::GOTO_16: 286 case Instruction::GOTO_32: 287 target += insn->dalvikInsn.vA; 288 break; 289 case Instruction::IF_EQ: 290 case Instruction::IF_NE: 291 case Instruction::IF_LT: 292 case Instruction::IF_GE: 293 case Instruction::IF_GT: 294 case Instruction::IF_LE: 295 cur_block->conditional_branch = true; 296 target += insn->dalvikInsn.vC; 297 break; 298 case Instruction::IF_EQZ: 299 case Instruction::IF_NEZ: 300 case Instruction::IF_LTZ: 301 case Instruction::IF_GEZ: 302 case Instruction::IF_GTZ: 303 case Instruction::IF_LEZ: 304 cur_block->conditional_branch = true; 305 target += insn->dalvikInsn.vB; 306 break; 307 default: 308 LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set"; 309 } 310 BasicBlock *taken_block = FindBlock(target, /* split */ true, /* create */ true, 311 /* immed_pred_block_p */ &cur_block); 312 cur_block->taken = taken_block; 313 taken_block->predecessors->Insert(cur_block); 314 315 /* Always terminate the current block for conditional branches */ 316 if (flags & Instruction::kContinue) { 317 BasicBlock *fallthrough_block = FindBlock(cur_offset + width, 318 /* 319 * If the method is processed 320 * in sequential order from the 321 * beginning, we don't need to 322 * specify split for continue 323 * blocks. However, this 324 * routine can be called by 325 * compileLoop, which starts 326 * parsing the method from an 327 * arbitrary address in the 328 * method body. 329 */ 330 true, 331 /* create */ 332 true, 333 /* immed_pred_block_p */ 334 &cur_block); 335 cur_block->fall_through = fallthrough_block; 336 fallthrough_block->predecessors->Insert(cur_block); 337 } else if (code_ptr < code_end) { 338 FindBlock(cur_offset + width, /* split */ false, /* create */ true, 339 /* immed_pred_block_p */ NULL); 340 } 341 return cur_block; 342} 343 344/* Process instructions with the kSwitch flag */ 345void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, 346 int flags) { 347 const uint16_t* switch_data = 348 reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB); 349 int size; 350 const int* keyTable; 351 const int* target_table; 352 int i; 353 int first_key; 354 355 /* 356 * Packed switch data format: 357 * ushort ident = 0x0100 magic value 358 * ushort size number of entries in the table 359 * int first_key first (and lowest) switch case value 360 * int targets[size] branch targets, relative to switch opcode 361 * 362 * Total size is (4+size*2) 16-bit code units. 363 */ 364 if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) { 365 DCHECK_EQ(static_cast<int>(switch_data[0]), 366 static_cast<int>(Instruction::kPackedSwitchSignature)); 367 size = switch_data[1]; 368 first_key = switch_data[2] | (switch_data[3] << 16); 369 target_table = reinterpret_cast<const int*>(&switch_data[4]); 370 keyTable = NULL; // Make the compiler happy 371 /* 372 * Sparse switch data format: 373 * ushort ident = 0x0200 magic value 374 * ushort size number of entries in the table; > 0 375 * int keys[size] keys, sorted low-to-high; 32-bit aligned 376 * int targets[size] branch targets, relative to switch opcode 377 * 378 * Total size is (2+size*4) 16-bit code units. 379 */ 380 } else { 381 DCHECK_EQ(static_cast<int>(switch_data[0]), 382 static_cast<int>(Instruction::kSparseSwitchSignature)); 383 size = switch_data[1]; 384 keyTable = reinterpret_cast<const int*>(&switch_data[2]); 385 target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]); 386 first_key = 0; // To make the compiler happy 387 } 388 389 if (cur_block->successor_block_list.block_list_type != kNotUsed) { 390 LOG(FATAL) << "Successor block list already in use: " 391 << static_cast<int>(cur_block->successor_block_list.block_list_type); 392 } 393 cur_block->successor_block_list.block_list_type = 394 (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? 395 kPackedSwitch : kSparseSwitch; 396 cur_block->successor_block_list.blocks = 397 new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks); 398 399 for (i = 0; i < size; i++) { 400 BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true, 401 /* create */ true, /* immed_pred_block_p */ &cur_block); 402 SuccessorBlockInfo *successor_block_info = 403 static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo), 404 ArenaAllocator::kAllocSuccessor)); 405 successor_block_info->block = case_block; 406 successor_block_info->key = 407 (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? 408 first_key + i : keyTable[i]; 409 cur_block->successor_block_list.blocks->Insert(successor_block_info); 410 case_block->predecessors->Insert(cur_block); 411 } 412 413 /* Fall-through case */ 414 BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false, 415 /* create */ true, /* immed_pred_block_p */ NULL); 416 cur_block->fall_through = fallthrough_block; 417 fallthrough_block->predecessors->Insert(cur_block); 418} 419 420/* Process instructions with the kThrow flag */ 421BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, 422 int flags, ArenaBitVector* try_block_addr, 423 const uint16_t* code_ptr, const uint16_t* code_end) { 424 bool in_try_block = try_block_addr->IsBitSet(cur_offset); 425 426 /* In try block */ 427 if (in_try_block) { 428 CatchHandlerIterator iterator(*current_code_item_, cur_offset); 429 430 if (cur_block->successor_block_list.block_list_type != kNotUsed) { 431 LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file); 432 LOG(FATAL) << "Successor block list already in use: " 433 << static_cast<int>(cur_block->successor_block_list.block_list_type); 434 } 435 436 cur_block->successor_block_list.block_list_type = kCatch; 437 cur_block->successor_block_list.blocks = 438 new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, 2, kGrowableArraySuccessorBlocks); 439 440 for (; iterator.HasNext(); iterator.Next()) { 441 BasicBlock *catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/, 442 false /* creat */, NULL /* immed_pred_block_p */); 443 catch_block->catch_entry = true; 444 if (kIsDebugBuild) { 445 catches_.insert(catch_block->start_offset); 446 } 447 SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*> 448 (arena_->Alloc(sizeof(SuccessorBlockInfo), ArenaAllocator::kAllocSuccessor)); 449 successor_block_info->block = catch_block; 450 successor_block_info->key = iterator.GetHandlerTypeIndex(); 451 cur_block->successor_block_list.blocks->Insert(successor_block_info); 452 catch_block->predecessors->Insert(cur_block); 453 } 454 } else { 455 BasicBlock *eh_block = NewMemBB(kExceptionHandling, num_blocks_++); 456 cur_block->taken = eh_block; 457 block_list_.Insert(eh_block); 458 eh_block->start_offset = cur_offset; 459 eh_block->predecessors->Insert(cur_block); 460 } 461 462 if (insn->dalvikInsn.opcode == Instruction::THROW) { 463 cur_block->explicit_throw = true; 464 if (code_ptr < code_end) { 465 // Force creation of new block following THROW via side-effect 466 FindBlock(cur_offset + width, /* split */ false, /* create */ true, 467 /* immed_pred_block_p */ NULL); 468 } 469 if (!in_try_block) { 470 // Don't split a THROW that can't rethrow - we're done. 471 return cur_block; 472 } 473 } 474 475 /* 476 * Split the potentially-throwing instruction into two parts. 477 * The first half will be a pseudo-op that captures the exception 478 * edges and terminates the basic block. It always falls through. 479 * Then, create a new basic block that begins with the throwing instruction 480 * (minus exceptions). Note: this new basic block must NOT be entered into 481 * the block_map. If the potentially-throwing instruction is the target of a 482 * future branch, we need to find the check psuedo half. The new 483 * basic block containing the work portion of the instruction should 484 * only be entered via fallthrough from the block containing the 485 * pseudo exception edge MIR. Note also that this new block is 486 * not automatically terminated after the work portion, and may 487 * contain following instructions. 488 */ 489 BasicBlock *new_block = NewMemBB(kDalvikByteCode, num_blocks_++); 490 block_list_.Insert(new_block); 491 new_block->start_offset = insn->offset; 492 cur_block->fall_through = new_block; 493 new_block->predecessors->Insert(cur_block); 494 MIR* new_insn = static_cast<MIR*>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR)); 495 *new_insn = *insn; 496 insn->dalvikInsn.opcode = 497 static_cast<Instruction::Code>(kMirOpCheck); 498 // Associate the two halves 499 insn->meta.throw_insn = new_insn; 500 new_insn->meta.throw_insn = insn; 501 AppendMIR(new_block, new_insn); 502 return new_block; 503} 504 505/* Parse a Dex method and insert it into the MIRGraph at the current insert point. */ 506void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, 507 InvokeType invoke_type, uint16_t class_def_idx, 508 uint32_t method_idx, jobject class_loader, const DexFile& dex_file) { 509 current_code_item_ = code_item; 510 method_stack_.push_back(std::make_pair(current_method_, current_offset_)); 511 current_method_ = m_units_.size(); 512 current_offset_ = 0; 513 // TODO: will need to snapshot stack image and use that as the mir context identification. 514 m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(), 515 dex_file, current_code_item_, class_def_idx, method_idx, access_flags)); 516 const uint16_t* code_ptr = current_code_item_->insns_; 517 const uint16_t* code_end = 518 current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_; 519 520 // TODO: need to rework expansion of block list & try_block_addr when inlining activated. 521 block_list_.Resize(block_list_.Size() + current_code_item_->insns_size_in_code_units_); 522 block_map_.SetSize(block_map_.Size() + current_code_item_->insns_size_in_code_units_); 523 524 // TODO: replace with explicit resize routine. Using automatic extension side effect for now. 525 try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_); 526 try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_); 527 528 // If this is the first method, set up default entry and exit blocks. 529 if (current_method_ == 0) { 530 DCHECK(entry_block_ == NULL); 531 DCHECK(exit_block_ == NULL); 532 DCHECK_EQ(num_blocks_, 0); 533 entry_block_ = NewMemBB(kEntryBlock, num_blocks_++); 534 exit_block_ = NewMemBB(kExitBlock, num_blocks_++); 535 block_list_.Insert(entry_block_); 536 block_list_.Insert(exit_block_); 537 // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated. 538 cu_->dex_file = &dex_file; 539 cu_->class_def_idx = class_def_idx; 540 cu_->method_idx = method_idx; 541 cu_->access_flags = access_flags; 542 cu_->invoke_type = invoke_type; 543 cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); 544 cu_->num_ins = current_code_item_->ins_size_; 545 cu_->num_regs = current_code_item_->registers_size_ - cu_->num_ins; 546 cu_->num_outs = current_code_item_->outs_size_; 547 cu_->num_dalvik_registers = current_code_item_->registers_size_; 548 cu_->insns = current_code_item_->insns_; 549 cu_->code_item = current_code_item_; 550 } else { 551 UNIMPLEMENTED(FATAL) << "Nested inlining not implemented."; 552 /* 553 * Will need to manage storage for ins & outs, push prevous state and update 554 * insert point. 555 */ 556 } 557 558 /* Current block to record parsed instructions */ 559 BasicBlock *cur_block = NewMemBB(kDalvikByteCode, num_blocks_++); 560 DCHECK_EQ(current_offset_, 0); 561 cur_block->start_offset = current_offset_; 562 block_list_.Insert(cur_block); 563 /* Add first block to the fast lookup cache */ 564// FIXME: block map needs association with offset/method pair rather than just offset 565 block_map_.Put(cur_block->start_offset, cur_block); 566// FIXME: this needs to insert at the insert point rather than entry block. 567 entry_block_->fall_through = cur_block; 568 cur_block->predecessors->Insert(entry_block_); 569 570 /* Identify code range in try blocks and set up the empty catch blocks */ 571 ProcessTryCatchBlocks(); 572 573 /* Set up for simple method detection */ 574 int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]); 575 bool live_pattern = (num_patterns > 0) && !(cu_->disable_opt & (1 << kMatch)); 576 bool* dead_pattern = 577 static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_patterns, ArenaAllocator::kAllocMisc)); 578 int pattern_pos = 0; 579 580 /* Parse all instructions and put them into containing basic blocks */ 581 while (code_ptr < code_end) { 582 MIR *insn = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR)); 583 insn->offset = current_offset_; 584 insn->m_unit_index = current_method_; 585 int width = ParseInsn(code_ptr, &insn->dalvikInsn); 586 insn->width = width; 587 Instruction::Code opcode = insn->dalvikInsn.opcode; 588 if (opcode_count_ != NULL) { 589 opcode_count_[static_cast<int>(opcode)]++; 590 } 591 592 593 /* Possible simple method? */ 594 if (live_pattern) { 595 live_pattern = false; 596 special_case_ = kNoHandler; 597 for (int i = 0; i < num_patterns; i++) { 598 if (!dead_pattern[i]) { 599 if (special_patterns[i].opcodes[pattern_pos] == opcode) { 600 live_pattern = true; 601 special_case_ = special_patterns[i].handler_code; 602 } else { 603 dead_pattern[i] = true; 604 } 605 } 606 } 607 pattern_pos++; 608 } 609 610 int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode); 611 612 int df_flags = oat_data_flow_attributes_[insn->dalvikInsn.opcode]; 613 614 if (df_flags & DF_HAS_DEFS) { 615 def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1; 616 } 617 618 // Check for inline data block signatures 619 if (opcode == Instruction::NOP) { 620 // A simple NOP will have a width of 1 at this point, embedded data NOP > 1. 621 if ((width == 1) && ((current_offset_ & 0x1) == 0x1) && ((code_end - code_ptr) > 1)) { 622 // Could be an aligning nop. If an embedded data NOP follows, treat pair as single unit. 623 uint16_t following_raw_instruction = code_ptr[1]; 624 if ((following_raw_instruction == Instruction::kSparseSwitchSignature) || 625 (following_raw_instruction == Instruction::kPackedSwitchSignature) || 626 (following_raw_instruction == Instruction::kArrayDataSignature)) { 627 width += Instruction::At(code_ptr + 1)->SizeInCodeUnits(); 628 } 629 } 630 if (width == 1) { 631 // It is a simple nop - treat normally. 632 AppendMIR(cur_block, insn); 633 } else { 634 DCHECK(cur_block->fall_through == NULL); 635 DCHECK(cur_block->taken == NULL); 636 // Unreachable instruction, mark for no continuation. 637 flags &= ~Instruction::kContinue; 638 } 639 } else { 640 AppendMIR(cur_block, insn); 641 } 642 643 code_ptr += width; 644 645 if (flags & Instruction::kBranch) { 646 cur_block = ProcessCanBranch(cur_block, insn, current_offset_, 647 width, flags, code_ptr, code_end); 648 } else if (flags & Instruction::kReturn) { 649 cur_block->terminated_by_return = true; 650 cur_block->fall_through = exit_block_; 651 exit_block_->predecessors->Insert(cur_block); 652 /* 653 * Terminate the current block if there are instructions 654 * afterwards. 655 */ 656 if (code_ptr < code_end) { 657 /* 658 * Create a fallthrough block for real instructions 659 * (incl. NOP). 660 */ 661 FindBlock(current_offset_ + width, /* split */ false, /* create */ true, 662 /* immed_pred_block_p */ NULL); 663 } 664 } else if (flags & Instruction::kThrow) { 665 cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_, 666 code_ptr, code_end); 667 } else if (flags & Instruction::kSwitch) { 668 ProcessCanSwitch(cur_block, insn, current_offset_, width, flags); 669 } 670 current_offset_ += width; 671 BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */ 672 false, /* immed_pred_block_p */ NULL); 673 if (next_block) { 674 /* 675 * The next instruction could be the target of a previously parsed 676 * forward branch so a block is already created. If the current 677 * instruction is not an unconditional branch, connect them through 678 * the fall-through link. 679 */ 680 DCHECK(cur_block->fall_through == NULL || 681 cur_block->fall_through == next_block || 682 cur_block->fall_through == exit_block_); 683 684 if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) { 685 cur_block->fall_through = next_block; 686 next_block->predecessors->Insert(cur_block); 687 } 688 cur_block = next_block; 689 } 690 } 691 if (cu_->enable_debug & (1 << kDebugDumpCFG)) { 692 DumpCFG("/sdcard/1_post_parse_cfg/", true); 693 } 694 695 if (cu_->verbose) { 696 DumpMIRGraph(); 697 } 698} 699 700void MIRGraph::ShowOpcodeStats() { 701 DCHECK(opcode_count_ != NULL); 702 LOG(INFO) << "Opcode Count"; 703 for (int i = 0; i < kNumPackedOpcodes; i++) { 704 if (opcode_count_[i] != 0) { 705 LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i)) 706 << " " << opcode_count_[i]; 707 } 708 } 709} 710 711// TODO: use a configurable base prefix, and adjust callers to supply pass name. 712/* Dump the CFG into a DOT graph */ 713void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) { 714 FILE* file; 715 std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file)); 716 ReplaceSpecialChars(fname); 717 fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(), 718 GetEntryBlock()->fall_through->start_offset); 719 file = fopen(fname.c_str(), "w"); 720 if (file == NULL) { 721 return; 722 } 723 fprintf(file, "digraph G {\n"); 724 725 fprintf(file, " rankdir=TB\n"); 726 727 int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_; 728 int idx; 729 730 for (idx = 0; idx < num_blocks; idx++) { 731 int block_idx = all_blocks ? idx : dfs_order_->Get(idx); 732 BasicBlock *bb = GetBasicBlock(block_idx); 733 if (bb == NULL) break; 734 if (bb->block_type == kDead) continue; 735 if (bb->block_type == kEntryBlock) { 736 fprintf(file, " entry_%d [shape=Mdiamond];\n", bb->id); 737 } else if (bb->block_type == kExitBlock) { 738 fprintf(file, " exit_%d [shape=Mdiamond];\n", bb->id); 739 } else if (bb->block_type == kDalvikByteCode) { 740 fprintf(file, " block%04x_%d [shape=record,label = \"{ \\\n", 741 bb->start_offset, bb->id); 742 const MIR *mir; 743 fprintf(file, " {block id %d\\l}%s\\\n", bb->id, 744 bb->first_mir_insn ? " | " : " "); 745 for (mir = bb->first_mir_insn; mir; mir = mir->next) { 746 int opcode = mir->dalvikInsn.opcode; 747 fprintf(file, " {%04x %s %s %s\\l}%s\\\n", mir->offset, 748 mir->ssa_rep ? GetDalvikDisassembly(mir) : 749 (opcode < kMirOpFirst) ? Instruction::Name(mir->dalvikInsn.opcode) : 750 extended_mir_op_names_[opcode - kMirOpFirst], 751 (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ", 752 (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ", 753 mir->next ? " | " : " "); 754 } 755 fprintf(file, " }\"];\n\n"); 756 } else if (bb->block_type == kExceptionHandling) { 757 char block_name[BLOCK_NAME_LEN]; 758 759 GetBlockName(bb, block_name); 760 fprintf(file, " %s [shape=invhouse];\n", block_name); 761 } 762 763 char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN]; 764 765 if (bb->taken) { 766 GetBlockName(bb, block_name1); 767 GetBlockName(bb->taken, block_name2); 768 fprintf(file, " %s:s -> %s:n [style=dotted]\n", 769 block_name1, block_name2); 770 } 771 if (bb->fall_through) { 772 GetBlockName(bb, block_name1); 773 GetBlockName(bb->fall_through, block_name2); 774 fprintf(file, " %s:s -> %s:n\n", block_name1, block_name2); 775 } 776 777 if (bb->successor_block_list.block_list_type != kNotUsed) { 778 fprintf(file, " succ%04x_%d [shape=%s,label = \"{ \\\n", 779 bb->start_offset, bb->id, 780 (bb->successor_block_list.block_list_type == kCatch) ? 781 "Mrecord" : "record"); 782 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks); 783 SuccessorBlockInfo *successor_block_info = iterator.Next(); 784 785 int succ_id = 0; 786 while (true) { 787 if (successor_block_info == NULL) break; 788 789 BasicBlock *dest_block = successor_block_info->block; 790 SuccessorBlockInfo *next_successor_block_info = iterator.Next(); 791 792 fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n", 793 succ_id++, 794 successor_block_info->key, 795 dest_block->start_offset, 796 (next_successor_block_info != NULL) ? " | " : " "); 797 798 successor_block_info = next_successor_block_info; 799 } 800 fprintf(file, " }\"];\n\n"); 801 802 GetBlockName(bb, block_name1); 803 fprintf(file, " %s:s -> succ%04x_%d:n [style=dashed]\n", 804 block_name1, bb->start_offset, bb->id); 805 806 if (bb->successor_block_list.block_list_type == kPackedSwitch || 807 bb->successor_block_list.block_list_type == kSparseSwitch) { 808 GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks); 809 810 succ_id = 0; 811 while (true) { 812 SuccessorBlockInfo *successor_block_info = iter.Next(); 813 if (successor_block_info == NULL) break; 814 815 BasicBlock *dest_block = successor_block_info->block; 816 817 GetBlockName(dest_block, block_name2); 818 fprintf(file, " succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset, 819 bb->id, succ_id++, block_name2); 820 } 821 } 822 } 823 fprintf(file, "\n"); 824 825 if (cu_->verbose) { 826 /* Display the dominator tree */ 827 GetBlockName(bb, block_name1); 828 fprintf(file, " cfg%s [label=\"%s\", shape=none];\n", 829 block_name1, block_name1); 830 if (bb->i_dom) { 831 GetBlockName(bb->i_dom, block_name2); 832 fprintf(file, " cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1); 833 } 834 } 835 } 836 fprintf(file, "}\n"); 837 fclose(file); 838} 839 840/* Insert an MIR instruction to the end of a basic block */ 841void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir) { 842 if (bb->first_mir_insn == NULL) { 843 DCHECK(bb->last_mir_insn == NULL); 844 bb->last_mir_insn = bb->first_mir_insn = mir; 845 mir->prev = mir->next = NULL; 846 } else { 847 bb->last_mir_insn->next = mir; 848 mir->prev = bb->last_mir_insn; 849 mir->next = NULL; 850 bb->last_mir_insn = mir; 851 } 852} 853 854/* Insert an MIR instruction to the head of a basic block */ 855void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir) { 856 if (bb->first_mir_insn == NULL) { 857 DCHECK(bb->last_mir_insn == NULL); 858 bb->last_mir_insn = bb->first_mir_insn = mir; 859 mir->prev = mir->next = NULL; 860 } else { 861 bb->first_mir_insn->prev = mir; 862 mir->next = bb->first_mir_insn; 863 mir->prev = NULL; 864 bb->first_mir_insn = mir; 865 } 866} 867 868/* Insert a MIR instruction after the specified MIR */ 869void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) { 870 new_mir->prev = current_mir; 871 new_mir->next = current_mir->next; 872 current_mir->next = new_mir; 873 874 if (new_mir->next) { 875 /* Is not the last MIR in the block */ 876 new_mir->next->prev = new_mir; 877 } else { 878 /* Is the last MIR in the block */ 879 bb->last_mir_insn = new_mir; 880 } 881} 882 883char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { 884 DecodedInstruction insn = mir->dalvikInsn; 885 std::string str; 886 int flags = 0; 887 int opcode = insn.opcode; 888 char* ret; 889 bool nop = false; 890 SSARepresentation* ssa_rep = mir->ssa_rep; 891 Instruction::Format dalvik_format = Instruction::k10x; // Default to no-operand format 892 int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0; 893 int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0; 894 895 // Handle special cases. 896 if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) { 897 str.append(extended_mir_op_names_[opcode - kMirOpFirst]); 898 str.append(": "); 899 // Recover the original Dex instruction 900 insn = mir->meta.throw_insn->dalvikInsn; 901 ssa_rep = mir->meta.throw_insn->ssa_rep; 902 defs = ssa_rep->num_defs; 903 uses = ssa_rep->num_uses; 904 opcode = insn.opcode; 905 } else if (opcode == kMirOpNop) { 906 str.append("["); 907 insn.opcode = mir->meta.original_opcode; 908 opcode = mir->meta.original_opcode; 909 nop = true; 910 } 911 912 if (opcode >= kMirOpFirst) { 913 str.append(extended_mir_op_names_[opcode - kMirOpFirst]); 914 } else { 915 dalvik_format = Instruction::FormatOf(insn.opcode); 916 flags = Instruction::FlagsOf(insn.opcode); 917 str.append(Instruction::Name(insn.opcode)); 918 } 919 920 if (opcode == kMirOpPhi) { 921 int* incoming = reinterpret_cast<int*>(insn.vB); 922 str.append(StringPrintf(" %s = (%s", 923 GetSSANameWithConst(ssa_rep->defs[0], true).c_str(), 924 GetSSANameWithConst(ssa_rep->uses[0], true).c_str())); 925 str.append(StringPrintf(":%d", incoming[0])); 926 int i; 927 for (i = 1; i < uses; i++) { 928 str.append(StringPrintf(", %s:%d", 929 GetSSANameWithConst(ssa_rep->uses[i], true).c_str(), 930 incoming[i])); 931 } 932 str.append(")"); 933 } else if ((flags & Instruction::kBranch) != 0) { 934 // For branches, decode the instructions to print out the branch targets. 935 int offset = 0; 936 switch (dalvik_format) { 937 case Instruction::k21t: 938 str.append(StringPrintf(" %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str())); 939 offset = insn.vB; 940 break; 941 case Instruction::k22t: 942 str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str(), 943 GetSSANameWithConst(ssa_rep->uses[1], false).c_str())); 944 offset = insn.vC; 945 break; 946 case Instruction::k10t: 947 case Instruction::k20t: 948 case Instruction::k30t: 949 offset = insn.vA; 950 break; 951 default: 952 LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode; 953 } 954 str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset, 955 offset > 0 ? '+' : '-', offset > 0 ? offset : -offset)); 956 } else { 957 // For invokes-style formats, treat wide regs as a pair of singles 958 bool show_singles = ((dalvik_format == Instruction::k35c) || 959 (dalvik_format == Instruction::k3rc)); 960 if (defs != 0) { 961 str.append(StringPrintf(" %s", GetSSANameWithConst(ssa_rep->defs[0], false).c_str())); 962 if (uses != 0) { 963 str.append(", "); 964 } 965 } 966 for (int i = 0; i < uses; i++) { 967 str.append( 968 StringPrintf(" %s", GetSSANameWithConst(ssa_rep->uses[i], show_singles).c_str())); 969 if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) { 970 // For the listing, skip the high sreg. 971 i++; 972 } 973 if (i != (uses -1)) { 974 str.append(","); 975 } 976 } 977 switch (dalvik_format) { 978 case Instruction::k11n: // Add one immediate from vB 979 case Instruction::k21s: 980 case Instruction::k31i: 981 case Instruction::k21h: 982 str.append(StringPrintf(", #%d", insn.vB)); 983 break; 984 case Instruction::k51l: // Add one wide immediate 985 str.append(StringPrintf(", #%lld", insn.vB_wide)); 986 break; 987 case Instruction::k21c: // One register, one string/type/method index 988 case Instruction::k31c: 989 str.append(StringPrintf(", index #%d", insn.vB)); 990 break; 991 case Instruction::k22c: // Two registers, one string/type/method index 992 str.append(StringPrintf(", index #%d", insn.vC)); 993 break; 994 case Instruction::k22s: // Add one immediate from vC 995 case Instruction::k22b: 996 str.append(StringPrintf(", #%d", insn.vC)); 997 break; 998 default: { 999 // Nothing left to print 1000 } 1001 } 1002 } 1003 if (nop) { 1004 str.append("]--optimized away"); 1005 } 1006 int length = str.length() + 1; 1007 ret = static_cast<char*>(arena_->Alloc(length, ArenaAllocator::kAllocDFInfo)); 1008 strncpy(ret, str.c_str(), length); 1009 return ret; 1010} 1011 1012/* Turn method name into a legal Linux file name */ 1013void MIRGraph::ReplaceSpecialChars(std::string& str) { 1014 static const struct { const char before; const char after; } match[] = { 1015 {'/', '-'}, {';', '#'}, {' ', '#'}, {'$', '+'}, 1016 {'(', '@'}, {')', '@'}, {'<', '='}, {'>', '='} 1017 }; 1018 for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) { 1019 std::replace(str.begin(), str.end(), match[i].before, match[i].after); 1020 } 1021} 1022 1023std::string MIRGraph::GetSSAName(int ssa_reg) { 1024 // TODO: This value is needed for LLVM and debugging. Currently, we compute this and then copy to 1025 // the arena. We should be smarter and just place straight into the arena, or compute the 1026 // value more lazily. 1027 return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg)); 1028} 1029 1030// Similar to GetSSAName, but if ssa name represents an immediate show that as well. 1031std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) { 1032 if (reg_location_ == NULL) { 1033 // Pre-SSA - just use the standard name 1034 return GetSSAName(ssa_reg); 1035 } 1036 if (IsConst(reg_location_[ssa_reg])) { 1037 if (!singles_only && reg_location_[ssa_reg].wide) { 1038 return StringPrintf("v%d_%d#0x%llx", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg), 1039 ConstantValueWide(reg_location_[ssa_reg])); 1040 } else { 1041 return StringPrintf("v%d_%d#0x%x", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg), 1042 ConstantValue(reg_location_[ssa_reg])); 1043 } 1044 } else { 1045 return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg)); 1046 } 1047} 1048 1049void MIRGraph::GetBlockName(BasicBlock* bb, char* name) { 1050 switch (bb->block_type) { 1051 case kEntryBlock: 1052 snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id); 1053 break; 1054 case kExitBlock: 1055 snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id); 1056 break; 1057 case kDalvikByteCode: 1058 snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id); 1059 break; 1060 case kExceptionHandling: 1061 snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset, 1062 bb->id); 1063 break; 1064 default: 1065 snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id); 1066 break; 1067 } 1068} 1069 1070const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) { 1071 // FIXME: use current code unit for inline support. 1072 const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx); 1073 return cu_->dex_file->GetShorty(method_id.proto_idx_); 1074} 1075 1076/* Debug Utility - dump a compilation unit */ 1077void MIRGraph::DumpMIRGraph() { 1078 BasicBlock* bb; 1079 const char* block_type_names[] = { 1080 "Entry Block", 1081 "Code Block", 1082 "Exit Block", 1083 "Exception Handling", 1084 "Catch Block" 1085 }; 1086 1087 LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 1088 LOG(INFO) << cu_->insns << " insns"; 1089 LOG(INFO) << GetNumBlocks() << " blocks in total"; 1090 GrowableArray<BasicBlock*>::Iterator iterator(&block_list_); 1091 1092 while (true) { 1093 bb = iterator.Next(); 1094 if (bb == NULL) break; 1095 LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)", 1096 bb->id, 1097 block_type_names[bb->block_type], 1098 bb->start_offset, 1099 bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset, 1100 bb->last_mir_insn ? "" : " empty"); 1101 if (bb->taken) { 1102 LOG(INFO) << " Taken branch: block " << bb->taken->id 1103 << "(0x" << std::hex << bb->taken->start_offset << ")"; 1104 } 1105 if (bb->fall_through) { 1106 LOG(INFO) << " Fallthrough : block " << bb->fall_through->id 1107 << " (0x" << std::hex << bb->fall_through->start_offset << ")"; 1108 } 1109 } 1110} 1111 1112/* 1113 * Build an array of location records for the incoming arguments. 1114 * Note: one location record per word of arguments, with dummy 1115 * high-word loc for wide arguments. Also pull up any following 1116 * MOVE_RESULT and incorporate it into the invoke. 1117 */ 1118CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, 1119 bool is_range) { 1120 CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo), 1121 ArenaAllocator::kAllocMisc)); 1122 MIR* move_result_mir = FindMoveResult(bb, mir); 1123 if (move_result_mir == NULL) { 1124 info->result.location = kLocInvalid; 1125 } else { 1126 info->result = GetRawDest(move_result_mir); 1127 move_result_mir->meta.original_opcode = move_result_mir->dalvikInsn.opcode; 1128 move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop); 1129 } 1130 info->num_arg_words = mir->ssa_rep->num_uses; 1131 info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*> 1132 (arena_->Alloc(sizeof(RegLocation) * info->num_arg_words, ArenaAllocator::kAllocMisc)); 1133 for (int i = 0; i < info->num_arg_words; i++) { 1134 info->args[i] = GetRawSrc(mir, i); 1135 } 1136 info->opt_flags = mir->optimization_flags; 1137 info->type = type; 1138 info->is_range = is_range; 1139 info->index = mir->dalvikInsn.vB; 1140 info->offset = mir->offset; 1141 return info; 1142} 1143 1144// Allocate a new basic block. 1145BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) { 1146 BasicBlock* bb = static_cast<BasicBlock*>(arena_->Alloc(sizeof(BasicBlock), 1147 ArenaAllocator::kAllocBB)); 1148 bb->block_type = block_type; 1149 bb->id = block_id; 1150 // TUNING: better estimate of the exit block predecessors? 1151 bb->predecessors = new (arena_) GrowableArray<BasicBlock*>(arena_, 1152 (block_type == kExitBlock) ? 2048 : 2, 1153 kGrowableArrayPredecessors); 1154 bb->successor_block_list.block_list_type = kNotUsed; 1155 block_id_map_.Put(block_id, block_id); 1156 return bb; 1157} 1158 1159} // namespace art 1160