mir_graph.cc revision 8081d2b8d7a743729557051d0294e040e61c747a
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mir_graph.h"
18
19#include <inttypes.h>
20#include <queue>
21
22#include "base/stl_util.h"
23#include "compiler_internals.h"
24#include "dex_file-inl.h"
25#include "dex_instruction-inl.h"
26#include "dex/global_value_numbering.h"
27#include "dex/quick/dex_file_to_method_inliner_map.h"
28#include "dex/quick/dex_file_method_inliner.h"
29#include "leb128.h"
30#include "pass_driver_me_post_opt.h"
31#include "utils/scoped_arena_containers.h"
32
33namespace art {
34
35#define MAX_PATTERN_LEN 5
36
37const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
38  "Phi",
39  "Copy",
40  "FusedCmplFloat",
41  "FusedCmpgFloat",
42  "FusedCmplDouble",
43  "FusedCmpgDouble",
44  "FusedCmpLong",
45  "Nop",
46  "OpNullCheck",
47  "OpRangeCheck",
48  "OpDivZeroCheck",
49  "Check1",
50  "Check2",
51  "Select",
52  "ConstVector",
53  "MoveVector",
54  "PackedMultiply",
55  "PackedAddition",
56  "PackedSubtract",
57  "PackedShiftLeft",
58  "PackedSignedShiftRight",
59  "PackedUnsignedShiftRight",
60  "PackedAnd",
61  "PackedOr",
62  "PackedXor",
63  "PackedAddReduce",
64  "PackedReduce",
65  "PackedSet",
66  "ReserveVectorRegisters",
67  "ReturnVectorRegisters",
68};
69
70MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
71    : reg_location_(NULL),
72      block_id_map_(std::less<unsigned int>(), arena->Adapter()),
73      cu_(cu),
74      ssa_base_vregs_(NULL),
75      ssa_subscripts_(NULL),
76      vreg_to_ssa_map_(NULL),
77      ssa_last_defs_(NULL),
78      is_constant_v_(NULL),
79      constant_values_(NULL),
80      use_counts_(arena, 256, kGrowableArrayMisc),
81      raw_use_counts_(arena, 256, kGrowableArrayMisc),
82      num_reachable_blocks_(0),
83      max_num_reachable_blocks_(0),
84      dfs_order_(NULL),
85      dfs_post_order_(NULL),
86      dom_post_order_traversal_(NULL),
87      topological_order_(nullptr),
88      topological_order_loop_ends_(nullptr),
89      topological_order_indexes_(nullptr),
90      topological_order_loop_head_stack_(nullptr),
91      i_dom_list_(NULL),
92      def_block_matrix_(NULL),
93      temp_scoped_alloc_(),
94      temp_insn_data_(nullptr),
95      temp_bit_vector_size_(0u),
96      temp_bit_vector_(nullptr),
97      temp_gvn_(),
98      block_list_(arena, 100, kGrowableArrayBlockList),
99      try_block_addr_(NULL),
100      entry_block_(NULL),
101      exit_block_(NULL),
102      num_blocks_(0),
103      current_code_item_(NULL),
104      dex_pc_to_block_map_(arena, 0, kGrowableArrayMisc),
105      m_units_(arena->Adapter()),
106      method_stack_(arena->Adapter()),
107      current_method_(kInvalidEntry),
108      current_offset_(kInvalidEntry),
109      def_count_(0),
110      opcode_count_(NULL),
111      num_ssa_regs_(0),
112      extended_basic_blocks_(arena->Adapter()),
113      method_sreg_(0),
114      attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
115      checkstats_(NULL),
116      arena_(arena),
117      backward_branches_(0),
118      forward_branches_(0),
119      compiler_temps_(arena, 6, kGrowableArrayMisc),
120      num_non_special_compiler_temps_(0),
121      max_available_non_special_compiler_temps_(0),
122      punt_to_interpreter_(false),
123      merged_df_flags_(0u),
124      ifield_lowering_infos_(arena, 0u),
125      sfield_lowering_infos_(arena, 0u),
126      method_lowering_infos_(arena, 0u),
127      gen_suspend_test_list_(arena, 0u) {
128  try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
129  max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
130      - std::abs(static_cast<int>(kVRegTempBaseReg));
131}
132
133MIRGraph::~MIRGraph() {
134  STLDeleteElements(&m_units_);
135}
136
137/*
138 * Parse an instruction, return the length of the instruction
139 */
140int MIRGraph::ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction) {
141  const Instruction* inst = Instruction::At(code_ptr);
142  decoded_instruction->opcode = inst->Opcode();
143  decoded_instruction->vA = inst->HasVRegA() ? inst->VRegA() : 0;
144  decoded_instruction->vB = inst->HasVRegB() ? inst->VRegB() : 0;
145  decoded_instruction->vB_wide = inst->HasWideVRegB() ? inst->WideVRegB() : 0;
146  decoded_instruction->vC = inst->HasVRegC() ?  inst->VRegC() : 0;
147  if (inst->HasVarArgs()) {
148    inst->GetVarArgs(decoded_instruction->arg);
149  }
150  return inst->SizeInCodeUnits();
151}
152
153
154/* Split an existing block from the specified code offset into two */
155BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
156                                 BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
157  DCHECK_GT(code_offset, orig_block->start_offset);
158  MIR* insn = orig_block->first_mir_insn;
159  MIR* prev = NULL;
160  while (insn) {
161    if (insn->offset == code_offset) break;
162    prev = insn;
163    insn = insn->next;
164  }
165  if (insn == NULL) {
166    LOG(FATAL) << "Break split failed";
167  }
168  BasicBlock* bottom_block = NewMemBB(kDalvikByteCode, num_blocks_++);
169  block_list_.Insert(bottom_block);
170
171  bottom_block->start_offset = code_offset;
172  bottom_block->first_mir_insn = insn;
173  bottom_block->last_mir_insn = orig_block->last_mir_insn;
174
175  /* If this block was terminated by a return, the flag needs to go with the bottom block */
176  bottom_block->terminated_by_return = orig_block->terminated_by_return;
177  orig_block->terminated_by_return = false;
178
179  /* Handle the taken path */
180  bottom_block->taken = orig_block->taken;
181  if (bottom_block->taken != NullBasicBlockId) {
182    orig_block->taken = NullBasicBlockId;
183    BasicBlock* bb_taken = GetBasicBlock(bottom_block->taken);
184    bb_taken->predecessors->Delete(orig_block->id);
185    bb_taken->predecessors->Insert(bottom_block->id);
186  }
187
188  /* Handle the fallthrough path */
189  bottom_block->fall_through = orig_block->fall_through;
190  orig_block->fall_through = bottom_block->id;
191  bottom_block->predecessors->Insert(orig_block->id);
192  if (bottom_block->fall_through != NullBasicBlockId) {
193    BasicBlock* bb_fall_through = GetBasicBlock(bottom_block->fall_through);
194    bb_fall_through->predecessors->Delete(orig_block->id);
195    bb_fall_through->predecessors->Insert(bottom_block->id);
196  }
197
198  /* Handle the successor list */
199  if (orig_block->successor_block_list_type != kNotUsed) {
200    bottom_block->successor_block_list_type = orig_block->successor_block_list_type;
201    bottom_block->successor_blocks = orig_block->successor_blocks;
202    orig_block->successor_block_list_type = kNotUsed;
203    orig_block->successor_blocks = nullptr;
204    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_blocks);
205    while (true) {
206      SuccessorBlockInfo* successor_block_info = iterator.Next();
207      if (successor_block_info == nullptr) break;
208      BasicBlock* bb = GetBasicBlock(successor_block_info->block);
209      if (bb != nullptr) {
210        bb->predecessors->Delete(orig_block->id);
211        bb->predecessors->Insert(bottom_block->id);
212      }
213    }
214  }
215
216  orig_block->last_mir_insn = prev;
217  prev->next = nullptr;
218
219  /*
220   * Update the immediate predecessor block pointer so that outgoing edges
221   * can be applied to the proper block.
222   */
223  if (immed_pred_block_p) {
224    DCHECK_EQ(*immed_pred_block_p, orig_block);
225    *immed_pred_block_p = bottom_block;
226  }
227
228  // Associate dex instructions in the bottom block with the new container.
229  DCHECK(insn != nullptr);
230  DCHECK(insn != orig_block->first_mir_insn);
231  DCHECK(insn == bottom_block->first_mir_insn);
232  DCHECK_EQ(insn->offset, bottom_block->start_offset);
233  DCHECK(static_cast<int>(insn->dalvikInsn.opcode) == kMirOpCheck ||
234         !MIR::DecodedInstruction::IsPseudoMirOp(insn->dalvikInsn.opcode));
235  DCHECK_EQ(dex_pc_to_block_map_.Get(insn->offset), orig_block->id);
236  MIR* p = insn;
237  dex_pc_to_block_map_.Put(p->offset, bottom_block->id);
238  while (p != bottom_block->last_mir_insn) {
239    p = p->next;
240    DCHECK(p != nullptr);
241    p->bb = bottom_block->id;
242    int opcode = p->dalvikInsn.opcode;
243    /*
244     * Some messiness here to ensure that we only enter real opcodes and only the
245     * first half of a potentially throwing instruction that has been split into
246     * CHECK and work portions. Since the 2nd half of a split operation is always
247     * the first in a BasicBlock, we can't hit it here.
248     */
249    if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
250      DCHECK_EQ(dex_pc_to_block_map_.Get(p->offset), orig_block->id);
251      dex_pc_to_block_map_.Put(p->offset, bottom_block->id);
252    }
253  }
254
255  return bottom_block;
256}
257
258/*
259 * Given a code offset, find out the block that starts with it. If the offset
260 * is in the middle of an existing block, split it into two.  If immed_pred_block_p
261 * is not non-null and is the block being split, update *immed_pred_block_p to
262 * point to the bottom block so that outgoing edges can be set up properly
263 * (by the caller)
264 * Utilizes a map for fast lookup of the typical cases.
265 */
266BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
267                                BasicBlock** immed_pred_block_p) {
268  if (code_offset >= cu_->code_item->insns_size_in_code_units_) {
269    return NULL;
270  }
271
272  int block_id = dex_pc_to_block_map_.Get(code_offset);
273  BasicBlock* bb = (block_id == 0) ? NULL : block_list_.Get(block_id);
274
275  if ((bb != NULL) && (bb->start_offset == code_offset)) {
276    // Does this containing block start with the desired instruction?
277    return bb;
278  }
279
280  // No direct hit.
281  if (!create) {
282    return NULL;
283  }
284
285  if (bb != NULL) {
286    // The target exists somewhere in an existing block.
287    return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ?  immed_pred_block_p : NULL);
288  }
289
290  // Create a new block.
291  bb = NewMemBB(kDalvikByteCode, num_blocks_++);
292  block_list_.Insert(bb);
293  bb->start_offset = code_offset;
294  dex_pc_to_block_map_.Put(bb->start_offset, bb->id);
295  return bb;
296}
297
298
299/* Identify code range in try blocks and set up the empty catch blocks */
300void MIRGraph::ProcessTryCatchBlocks() {
301  int tries_size = current_code_item_->tries_size_;
302  DexOffset offset;
303
304  if (tries_size == 0) {
305    return;
306  }
307
308  for (int i = 0; i < tries_size; i++) {
309    const DexFile::TryItem* pTry =
310        DexFile::GetTryItems(*current_code_item_, i);
311    DexOffset start_offset = pTry->start_addr_;
312    DexOffset end_offset = start_offset + pTry->insn_count_;
313    for (offset = start_offset; offset < end_offset; offset++) {
314      try_block_addr_->SetBit(offset);
315    }
316  }
317
318  // Iterate over each of the handlers to enqueue the empty Catch blocks.
319  const byte* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
320  uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
321  for (uint32_t idx = 0; idx < handlers_size; idx++) {
322    CatchHandlerIterator iterator(handlers_ptr);
323    for (; iterator.HasNext(); iterator.Next()) {
324      uint32_t address = iterator.GetHandlerAddress();
325      FindBlock(address, false /* split */, true /*create*/,
326                /* immed_pred_block_p */ NULL);
327    }
328    handlers_ptr = iterator.EndDataPointer();
329  }
330}
331
332bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
333                                     NarrowDexOffset catch_offset) {
334  // Catches for monitor-exit during stack unwinding have the pattern
335  //   move-exception (move)* (goto)? monitor-exit throw
336  // In the currently generated dex bytecode we see these catching a bytecode range including
337  // either its own or an identical monitor-exit, http://b/15745363 . This function checks if
338  // it's the case for a given monitor-exit and catch block so that we can ignore it.
339  // (We don't want to ignore all monitor-exit catches since one could enclose a synchronized
340  // block in a try-block and catch the NPE, Error or Throwable and we should let it through;
341  // even though a throwing monitor-exit certainly indicates a bytecode error.)
342  const Instruction* monitor_exit = Instruction::At(cu_->code_item->insns_ + monitor_exit_offset);
343  DCHECK(monitor_exit->Opcode() == Instruction::MONITOR_EXIT);
344  int monitor_reg = monitor_exit->VRegA_11x();
345  const Instruction* check_insn = Instruction::At(cu_->code_item->insns_ + catch_offset);
346  DCHECK(check_insn->Opcode() == Instruction::MOVE_EXCEPTION);
347  if (check_insn->VRegA_11x() == monitor_reg) {
348    // Unexpected move-exception to the same register. Probably not the pattern we're looking for.
349    return false;
350  }
351  check_insn = check_insn->Next();
352  while (true) {
353    int dest = -1;
354    bool wide = false;
355    switch (check_insn->Opcode()) {
356      case Instruction::MOVE_WIDE:
357        wide = true;
358        // Intentional fall-through.
359      case Instruction::MOVE_OBJECT:
360      case Instruction::MOVE:
361        dest = check_insn->VRegA_12x();
362        break;
363
364      case Instruction::MOVE_WIDE_FROM16:
365        wide = true;
366        // Intentional fall-through.
367      case Instruction::MOVE_OBJECT_FROM16:
368      case Instruction::MOVE_FROM16:
369        dest = check_insn->VRegA_22x();
370        break;
371
372      case Instruction::MOVE_WIDE_16:
373        wide = true;
374        // Intentional fall-through.
375      case Instruction::MOVE_OBJECT_16:
376      case Instruction::MOVE_16:
377        dest = check_insn->VRegA_32x();
378        break;
379
380      case Instruction::GOTO:
381      case Instruction::GOTO_16:
382      case Instruction::GOTO_32:
383        check_insn = check_insn->RelativeAt(check_insn->GetTargetOffset());
384        // Intentional fall-through.
385      default:
386        return check_insn->Opcode() == Instruction::MONITOR_EXIT &&
387            check_insn->VRegA_11x() == monitor_reg;
388    }
389
390    if (dest == monitor_reg || (wide && dest + 1 == monitor_reg)) {
391      return false;
392    }
393
394    check_insn = check_insn->Next();
395  }
396}
397
398/* Process instructions with the kBranch flag */
399BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
400                                       int width, int flags, const uint16_t* code_ptr,
401                                       const uint16_t* code_end) {
402  DexOffset target = cur_offset;
403  switch (insn->dalvikInsn.opcode) {
404    case Instruction::GOTO:
405    case Instruction::GOTO_16:
406    case Instruction::GOTO_32:
407      target += insn->dalvikInsn.vA;
408      break;
409    case Instruction::IF_EQ:
410    case Instruction::IF_NE:
411    case Instruction::IF_LT:
412    case Instruction::IF_GE:
413    case Instruction::IF_GT:
414    case Instruction::IF_LE:
415      cur_block->conditional_branch = true;
416      target += insn->dalvikInsn.vC;
417      break;
418    case Instruction::IF_EQZ:
419    case Instruction::IF_NEZ:
420    case Instruction::IF_LTZ:
421    case Instruction::IF_GEZ:
422    case Instruction::IF_GTZ:
423    case Instruction::IF_LEZ:
424      cur_block->conditional_branch = true;
425      target += insn->dalvikInsn.vB;
426      break;
427    default:
428      LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
429  }
430  CountBranch(target);
431  BasicBlock* taken_block = FindBlock(target, /* split */ true, /* create */ true,
432                                      /* immed_pred_block_p */ &cur_block);
433  cur_block->taken = taken_block->id;
434  taken_block->predecessors->Insert(cur_block->id);
435
436  /* Always terminate the current block for conditional branches */
437  if (flags & Instruction::kContinue) {
438    BasicBlock* fallthrough_block = FindBlock(cur_offset +  width,
439                                             /*
440                                              * If the method is processed
441                                              * in sequential order from the
442                                              * beginning, we don't need to
443                                              * specify split for continue
444                                              * blocks. However, this
445                                              * routine can be called by
446                                              * compileLoop, which starts
447                                              * parsing the method from an
448                                              * arbitrary address in the
449                                              * method body.
450                                              */
451                                             true,
452                                             /* create */
453                                             true,
454                                             /* immed_pred_block_p */
455                                             &cur_block);
456    cur_block->fall_through = fallthrough_block->id;
457    fallthrough_block->predecessors->Insert(cur_block->id);
458  } else if (code_ptr < code_end) {
459    FindBlock(cur_offset + width, /* split */ false, /* create */ true,
460                /* immed_pred_block_p */ NULL);
461  }
462  return cur_block;
463}
464
465/* Process instructions with the kSwitch flag */
466BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
467                                       int width, int flags) {
468  const uint16_t* switch_data =
469      reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
470  int size;
471  const int* keyTable;
472  const int* target_table;
473  int i;
474  int first_key;
475
476  /*
477   * Packed switch data format:
478   *  ushort ident = 0x0100   magic value
479   *  ushort size             number of entries in the table
480   *  int first_key           first (and lowest) switch case value
481   *  int targets[size]       branch targets, relative to switch opcode
482   *
483   * Total size is (4+size*2) 16-bit code units.
484   */
485  if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
486    DCHECK_EQ(static_cast<int>(switch_data[0]),
487              static_cast<int>(Instruction::kPackedSwitchSignature));
488    size = switch_data[1];
489    first_key = switch_data[2] | (switch_data[3] << 16);
490    target_table = reinterpret_cast<const int*>(&switch_data[4]);
491    keyTable = NULL;        // Make the compiler happy.
492  /*
493   * Sparse switch data format:
494   *  ushort ident = 0x0200   magic value
495   *  ushort size             number of entries in the table; > 0
496   *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
497   *  int targets[size]       branch targets, relative to switch opcode
498   *
499   * Total size is (2+size*4) 16-bit code units.
500   */
501  } else {
502    DCHECK_EQ(static_cast<int>(switch_data[0]),
503              static_cast<int>(Instruction::kSparseSwitchSignature));
504    size = switch_data[1];
505    keyTable = reinterpret_cast<const int*>(&switch_data[2]);
506    target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
507    first_key = 0;   // To make the compiler happy.
508  }
509
510  if (cur_block->successor_block_list_type != kNotUsed) {
511    LOG(FATAL) << "Successor block list already in use: "
512               << static_cast<int>(cur_block->successor_block_list_type);
513  }
514  cur_block->successor_block_list_type =
515      (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?  kPackedSwitch : kSparseSwitch;
516  cur_block->successor_blocks =
517      new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
518
519  for (i = 0; i < size; i++) {
520    BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
521                                      /* create */ true, /* immed_pred_block_p */ &cur_block);
522    SuccessorBlockInfo* successor_block_info =
523        static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
524                                                       kArenaAllocSuccessor));
525    successor_block_info->block = case_block->id;
526    successor_block_info->key =
527        (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
528        first_key + i : keyTable[i];
529    cur_block->successor_blocks->Insert(successor_block_info);
530    case_block->predecessors->Insert(cur_block->id);
531  }
532
533  /* Fall-through case */
534  BasicBlock* fallthrough_block = FindBlock(cur_offset +  width, /* split */ false,
535                                            /* create */ true, /* immed_pred_block_p */ NULL);
536  cur_block->fall_through = fallthrough_block->id;
537  fallthrough_block->predecessors->Insert(cur_block->id);
538  return cur_block;
539}
540
541/* Process instructions with the kThrow flag */
542BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
543                                      int width, int flags, ArenaBitVector* try_block_addr,
544                                      const uint16_t* code_ptr, const uint16_t* code_end) {
545  bool in_try_block = try_block_addr->IsBitSet(cur_offset);
546  bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
547  bool build_all_edges =
548      (cu_->disable_opt & (1 << kSuppressExceptionEdges)) || is_throw || in_try_block;
549
550  /* In try block */
551  if (in_try_block) {
552    CatchHandlerIterator iterator(*current_code_item_, cur_offset);
553
554    if (cur_block->successor_block_list_type != kNotUsed) {
555      LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
556      LOG(FATAL) << "Successor block list already in use: "
557                 << static_cast<int>(cur_block->successor_block_list_type);
558    }
559
560    for (; iterator.HasNext(); iterator.Next()) {
561      BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
562                                         false /* creat */, NULL  /* immed_pred_block_p */);
563      if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT &&
564          IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) {
565        // Don't allow monitor-exit to catch its own exception, http://b/15745363 .
566        continue;
567      }
568      if (cur_block->successor_block_list_type == kNotUsed) {
569        cur_block->successor_block_list_type = kCatch;
570        cur_block->successor_blocks = new (arena_) GrowableArray<SuccessorBlockInfo*>(
571            arena_, 2, kGrowableArraySuccessorBlocks);
572      }
573      catch_block->catch_entry = true;
574      if (kIsDebugBuild) {
575        catches_.insert(catch_block->start_offset);
576      }
577      SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
578          (arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
579      successor_block_info->block = catch_block->id;
580      successor_block_info->key = iterator.GetHandlerTypeIndex();
581      cur_block->successor_blocks->Insert(successor_block_info);
582      catch_block->predecessors->Insert(cur_block->id);
583    }
584    in_try_block = (cur_block->successor_block_list_type != kNotUsed);
585  }
586  if (!in_try_block && build_all_edges) {
587    BasicBlock* eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
588    cur_block->taken = eh_block->id;
589    block_list_.Insert(eh_block);
590    eh_block->start_offset = cur_offset;
591    eh_block->predecessors->Insert(cur_block->id);
592  }
593
594  if (is_throw) {
595    cur_block->explicit_throw = true;
596    if (code_ptr < code_end) {
597      // Force creation of new block following THROW via side-effect.
598      FindBlock(cur_offset + width, /* split */ false, /* create */ true,
599                /* immed_pred_block_p */ NULL);
600    }
601    if (!in_try_block) {
602       // Don't split a THROW that can't rethrow - we're done.
603      return cur_block;
604    }
605  }
606
607  if (!build_all_edges) {
608    /*
609     * Even though there is an exception edge here, control cannot return to this
610     * method.  Thus, for the purposes of dataflow analysis and optimization, we can
611     * ignore the edge.  Doing this reduces compile time, and increases the scope
612     * of the basic-block level optimization pass.
613     */
614    return cur_block;
615  }
616
617  /*
618   * Split the potentially-throwing instruction into two parts.
619   * The first half will be a pseudo-op that captures the exception
620   * edges and terminates the basic block.  It always falls through.
621   * Then, create a new basic block that begins with the throwing instruction
622   * (minus exceptions).  Note: this new basic block must NOT be entered into
623   * the block_map.  If the potentially-throwing instruction is the target of a
624   * future branch, we need to find the check psuedo half.  The new
625   * basic block containing the work portion of the instruction should
626   * only be entered via fallthrough from the block containing the
627   * pseudo exception edge MIR.  Note also that this new block is
628   * not automatically terminated after the work portion, and may
629   * contain following instructions.
630   *
631   * Note also that the dex_pc_to_block_map_ entry for the potentially
632   * throwing instruction will refer to the original basic block.
633   */
634  BasicBlock* new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
635  block_list_.Insert(new_block);
636  new_block->start_offset = insn->offset;
637  cur_block->fall_through = new_block->id;
638  new_block->predecessors->Insert(cur_block->id);
639  MIR* new_insn = NewMIR();
640  *new_insn = *insn;
641  insn->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheck);
642  // Associate the two halves.
643  insn->meta.throw_insn = new_insn;
644  new_block->AppendMIR(new_insn);
645  return new_block;
646}
647
648/* Parse a Dex method and insert it into the MIRGraph at the current insert point. */
649void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
650                           InvokeType invoke_type, uint16_t class_def_idx,
651                           uint32_t method_idx, jobject class_loader, const DexFile& dex_file) {
652  current_code_item_ = code_item;
653  method_stack_.push_back(std::make_pair(current_method_, current_offset_));
654  current_method_ = m_units_.size();
655  current_offset_ = 0;
656  // TODO: will need to snapshot stack image and use that as the mir context identification.
657  m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(),
658                     dex_file, current_code_item_, class_def_idx, method_idx, access_flags,
659                     cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
660  const uint16_t* code_ptr = current_code_item_->insns_;
661  const uint16_t* code_end =
662      current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
663
664  // TODO: need to rework expansion of block list & try_block_addr when inlining activated.
665  // TUNING: use better estimate of basic blocks for following resize.
666  block_list_.Resize(block_list_.Size() + current_code_item_->insns_size_in_code_units_);
667  dex_pc_to_block_map_.SetSize(dex_pc_to_block_map_.Size() + current_code_item_->insns_size_in_code_units_);
668
669  // TODO: replace with explicit resize routine.  Using automatic extension side effect for now.
670  try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
671  try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_);
672
673  // If this is the first method, set up default entry and exit blocks.
674  if (current_method_ == 0) {
675    DCHECK(entry_block_ == NULL);
676    DCHECK(exit_block_ == NULL);
677    DCHECK_EQ(num_blocks_, 0U);
678    // Use id 0 to represent a null block.
679    BasicBlock* null_block = NewMemBB(kNullBlock, num_blocks_++);
680    DCHECK_EQ(null_block->id, NullBasicBlockId);
681    null_block->hidden = true;
682    block_list_.Insert(null_block);
683    entry_block_ = NewMemBB(kEntryBlock, num_blocks_++);
684    block_list_.Insert(entry_block_);
685    exit_block_ = NewMemBB(kExitBlock, num_blocks_++);
686    block_list_.Insert(exit_block_);
687    // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated.
688    cu_->dex_file = &dex_file;
689    cu_->class_def_idx = class_def_idx;
690    cu_->method_idx = method_idx;
691    cu_->access_flags = access_flags;
692    cu_->invoke_type = invoke_type;
693    cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
694    cu_->num_ins = current_code_item_->ins_size_;
695    cu_->num_regs = current_code_item_->registers_size_ - cu_->num_ins;
696    cu_->num_outs = current_code_item_->outs_size_;
697    cu_->num_dalvik_registers = current_code_item_->registers_size_;
698    cu_->insns = current_code_item_->insns_;
699    cu_->code_item = current_code_item_;
700  } else {
701    UNIMPLEMENTED(FATAL) << "Nested inlining not implemented.";
702    /*
703     * Will need to manage storage for ins & outs, push prevous state and update
704     * insert point.
705     */
706  }
707
708  /* Current block to record parsed instructions */
709  BasicBlock* cur_block = NewMemBB(kDalvikByteCode, num_blocks_++);
710  DCHECK_EQ(current_offset_, 0U);
711  cur_block->start_offset = current_offset_;
712  block_list_.Insert(cur_block);
713  // TODO: for inlining support, insert at the insert point rather than entry block.
714  entry_block_->fall_through = cur_block->id;
715  cur_block->predecessors->Insert(entry_block_->id);
716
717  /* Identify code range in try blocks and set up the empty catch blocks */
718  ProcessTryCatchBlocks();
719
720  uint64_t merged_df_flags = 0u;
721
722  /* Parse all instructions and put them into containing basic blocks */
723  while (code_ptr < code_end) {
724    MIR *insn = NewMIR();
725    insn->offset = current_offset_;
726    insn->m_unit_index = current_method_;
727    int width = ParseInsn(code_ptr, &insn->dalvikInsn);
728    Instruction::Code opcode = insn->dalvikInsn.opcode;
729    if (opcode_count_ != NULL) {
730      opcode_count_[static_cast<int>(opcode)]++;
731    }
732
733    int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
734    int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode);
735
736    uint64_t df_flags = GetDataFlowAttributes(insn);
737    merged_df_flags |= df_flags;
738
739    if (df_flags & DF_HAS_DEFS) {
740      def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1;
741    }
742
743    if (df_flags & DF_LVN) {
744      cur_block->use_lvn = true;  // Run local value numbering on this basic block.
745    }
746
747    // Check for inline data block signatures.
748    if (opcode == Instruction::NOP) {
749      // A simple NOP will have a width of 1 at this point, embedded data NOP > 1.
750      if ((width == 1) && ((current_offset_ & 0x1) == 0x1) && ((code_end - code_ptr) > 1)) {
751        // Could be an aligning nop.  If an embedded data NOP follows, treat pair as single unit.
752        uint16_t following_raw_instruction = code_ptr[1];
753        if ((following_raw_instruction == Instruction::kSparseSwitchSignature) ||
754            (following_raw_instruction == Instruction::kPackedSwitchSignature) ||
755            (following_raw_instruction == Instruction::kArrayDataSignature)) {
756          width += Instruction::At(code_ptr + 1)->SizeInCodeUnits();
757        }
758      }
759      if (width == 1) {
760        // It is a simple nop - treat normally.
761        cur_block->AppendMIR(insn);
762      } else {
763        DCHECK(cur_block->fall_through == NullBasicBlockId);
764        DCHECK(cur_block->taken == NullBasicBlockId);
765        // Unreachable instruction, mark for no continuation.
766        flags &= ~Instruction::kContinue;
767      }
768    } else {
769      cur_block->AppendMIR(insn);
770    }
771
772    // Associate the starting dex_pc for this opcode with its containing basic block.
773    dex_pc_to_block_map_.Put(insn->offset, cur_block->id);
774
775    code_ptr += width;
776
777    if (flags & Instruction::kBranch) {
778      cur_block = ProcessCanBranch(cur_block, insn, current_offset_,
779                                   width, flags, code_ptr, code_end);
780    } else if (flags & Instruction::kReturn) {
781      cur_block->terminated_by_return = true;
782      cur_block->fall_through = exit_block_->id;
783      exit_block_->predecessors->Insert(cur_block->id);
784      /*
785       * Terminate the current block if there are instructions
786       * afterwards.
787       */
788      if (code_ptr < code_end) {
789        /*
790         * Create a fallthrough block for real instructions
791         * (incl. NOP).
792         */
793         FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
794                   /* immed_pred_block_p */ NULL);
795      }
796    } else if (flags & Instruction::kThrow) {
797      cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
798                                  code_ptr, code_end);
799    } else if (flags & Instruction::kSwitch) {
800      cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
801    }
802    if (verify_flags & Instruction::kVerifyVarArgRange ||
803        verify_flags & Instruction::kVerifyVarArgRangeNonZero) {
804      /*
805       * The Quick backend's runtime model includes a gap between a method's
806       * argument ("in") vregs and the rest of its vregs.  Handling a range instruction
807       * which spans the gap is somewhat complicated, and should not happen
808       * in normal usage of dx.  Punt to the interpreter.
809       */
810      int first_reg_in_range = insn->dalvikInsn.vC;
811      int last_reg_in_range = first_reg_in_range + insn->dalvikInsn.vA - 1;
812      if (IsInVReg(first_reg_in_range) != IsInVReg(last_reg_in_range)) {
813        punt_to_interpreter_ = true;
814      }
815    }
816    current_offset_ += width;
817    BasicBlock* next_block = FindBlock(current_offset_, /* split */ false, /* create */
818                                      false, /* immed_pred_block_p */ NULL);
819    if (next_block) {
820      /*
821       * The next instruction could be the target of a previously parsed
822       * forward branch so a block is already created. If the current
823       * instruction is not an unconditional branch, connect them through
824       * the fall-through link.
825       */
826      DCHECK(cur_block->fall_through == NullBasicBlockId ||
827             GetBasicBlock(cur_block->fall_through) == next_block ||
828             GetBasicBlock(cur_block->fall_through) == exit_block_);
829
830      if ((cur_block->fall_through == NullBasicBlockId) && (flags & Instruction::kContinue)) {
831        cur_block->fall_through = next_block->id;
832        next_block->predecessors->Insert(cur_block->id);
833      }
834      cur_block = next_block;
835    }
836  }
837  merged_df_flags_ = merged_df_flags;
838
839  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
840    DumpCFG("/sdcard/1_post_parse_cfg/", true);
841  }
842
843  if (cu_->verbose) {
844    DumpMIRGraph();
845  }
846}
847
848void MIRGraph::ShowOpcodeStats() {
849  DCHECK(opcode_count_ != NULL);
850  LOG(INFO) << "Opcode Count";
851  for (int i = 0; i < kNumPackedOpcodes; i++) {
852    if (opcode_count_[i] != 0) {
853      LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i))
854                << " " << opcode_count_[i];
855    }
856  }
857}
858
859uint64_t MIRGraph::GetDataFlowAttributes(Instruction::Code opcode) {
860  DCHECK_LT((size_t) opcode, (sizeof(oat_data_flow_attributes_) / sizeof(oat_data_flow_attributes_[0])));
861  return oat_data_flow_attributes_[opcode];
862}
863
864uint64_t MIRGraph::GetDataFlowAttributes(MIR* mir) {
865  DCHECK(mir != nullptr);
866  Instruction::Code opcode = mir->dalvikInsn.opcode;
867  return GetDataFlowAttributes(opcode);
868}
869
870// TODO: use a configurable base prefix, and adjust callers to supply pass name.
871/* Dump the CFG into a DOT graph */
872void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suffix) {
873  FILE* file;
874  static AtomicInteger cnt(0);
875
876  // Increment counter to get a unique file number.
877  cnt++;
878
879  std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
880  ReplaceSpecialChars(fname);
881  fname = StringPrintf("%s%s%x%s_%d.dot", dir_prefix, fname.c_str(),
882                      GetBasicBlock(GetEntryBlock()->fall_through)->start_offset,
883                      suffix == nullptr ? "" : suffix,
884                      cnt.LoadRelaxed());
885  file = fopen(fname.c_str(), "w");
886  if (file == NULL) {
887    return;
888  }
889  fprintf(file, "digraph G {\n");
890
891  fprintf(file, "  rankdir=TB\n");
892
893  int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_;
894  int idx;
895
896  for (idx = 0; idx < num_blocks; idx++) {
897    int block_idx = all_blocks ? idx : dfs_order_->Get(idx);
898    BasicBlock* bb = GetBasicBlock(block_idx);
899    if (bb == NULL) continue;
900    if (bb->block_type == kDead) continue;
901    if (bb->hidden) continue;
902    if (bb->block_type == kEntryBlock) {
903      fprintf(file, "  entry_%d [shape=Mdiamond];\n", bb->id);
904    } else if (bb->block_type == kExitBlock) {
905      fprintf(file, "  exit_%d [shape=Mdiamond];\n", bb->id);
906    } else if (bb->block_type == kDalvikByteCode) {
907      fprintf(file, "  block%04x_%d [shape=record,label = \"{ \\\n",
908              bb->start_offset, bb->id);
909      const MIR* mir;
910        fprintf(file, "    {block id %d\\l}%s\\\n", bb->id,
911                bb->first_mir_insn ? " | " : " ");
912        for (mir = bb->first_mir_insn; mir; mir = mir->next) {
913            int opcode = mir->dalvikInsn.opcode;
914            if (opcode > kMirOpSelect && opcode < kMirOpLast) {
915              if (opcode == kMirOpConstVector) {
916                fprintf(file, "    {%04x %s %d %d %d %d %d %d\\l}%s\\\n", mir->offset,
917                        extended_mir_op_names_[kMirOpConstVector - kMirOpFirst],
918                        mir->dalvikInsn.vA,
919                        mir->dalvikInsn.vB,
920                        mir->dalvikInsn.arg[0],
921                        mir->dalvikInsn.arg[1],
922                        mir->dalvikInsn.arg[2],
923                        mir->dalvikInsn.arg[3],
924                        mir->next ? " | " : " ");
925              } else {
926                fprintf(file, "    {%04x %s %d %d %d\\l}%s\\\n", mir->offset,
927                        extended_mir_op_names_[opcode - kMirOpFirst],
928                        mir->dalvikInsn.vA,
929                        mir->dalvikInsn.vB,
930                        mir->dalvikInsn.vC,
931                        mir->next ? " | " : " ");
932              }
933            } else {
934              fprintf(file, "    {%04x %s %s %s %s\\l}%s\\\n", mir->offset,
935                      mir->ssa_rep ? GetDalvikDisassembly(mir) :
936                      !MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
937                        Instruction::Name(mir->dalvikInsn.opcode) :
938                        extended_mir_op_names_[opcode - kMirOpFirst],
939                      (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
940                      (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
941                      (mir->optimization_flags & MIR_IGNORE_SUSPEND_CHECK) != 0 ? " no_suspendcheck" : " ",
942                      mir->next ? " | " : " ");
943            }
944        }
945        fprintf(file, "  }\"];\n\n");
946    } else if (bb->block_type == kExceptionHandling) {
947      char block_name[BLOCK_NAME_LEN];
948
949      GetBlockName(bb, block_name);
950      fprintf(file, "  %s [shape=invhouse];\n", block_name);
951    }
952
953    char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
954
955    if (bb->taken != NullBasicBlockId) {
956      GetBlockName(bb, block_name1);
957      GetBlockName(GetBasicBlock(bb->taken), block_name2);
958      fprintf(file, "  %s:s -> %s:n [style=dotted]\n",
959              block_name1, block_name2);
960    }
961    if (bb->fall_through != NullBasicBlockId) {
962      GetBlockName(bb, block_name1);
963      GetBlockName(GetBasicBlock(bb->fall_through), block_name2);
964      fprintf(file, "  %s:s -> %s:n\n", block_name1, block_name2);
965    }
966
967    if (bb->successor_block_list_type != kNotUsed) {
968      fprintf(file, "  succ%04x_%d [shape=%s,label = \"{ \\\n",
969              bb->start_offset, bb->id,
970              (bb->successor_block_list_type == kCatch) ?  "Mrecord" : "record");
971      GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
972      SuccessorBlockInfo* successor_block_info = iterator.Next();
973
974      int succ_id = 0;
975      while (true) {
976        if (successor_block_info == NULL) break;
977
978        BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
979        SuccessorBlockInfo *next_successor_block_info = iterator.Next();
980
981        fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
982                succ_id++,
983                successor_block_info->key,
984                dest_block->start_offset,
985                (next_successor_block_info != NULL) ? " | " : " ");
986
987        successor_block_info = next_successor_block_info;
988      }
989      fprintf(file, "  }\"];\n\n");
990
991      GetBlockName(bb, block_name1);
992      fprintf(file, "  %s:s -> succ%04x_%d:n [style=dashed]\n",
993              block_name1, bb->start_offset, bb->id);
994
995      // Link the successor pseudo-block with all of its potential targets.
996      GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_blocks);
997
998      succ_id = 0;
999      while (true) {
1000        SuccessorBlockInfo* successor_block_info = iter.Next();
1001        if (successor_block_info == NULL) break;
1002
1003        BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
1004
1005        GetBlockName(dest_block, block_name2);
1006        fprintf(file, "  succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
1007                bb->id, succ_id++, block_name2);
1008      }
1009    }
1010    fprintf(file, "\n");
1011
1012    if (cu_->verbose) {
1013      /* Display the dominator tree */
1014      GetBlockName(bb, block_name1);
1015      fprintf(file, "  cfg%s [label=\"%s\", shape=none];\n",
1016              block_name1, block_name1);
1017      if (bb->i_dom) {
1018        GetBlockName(GetBasicBlock(bb->i_dom), block_name2);
1019        fprintf(file, "  cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
1020      }
1021    }
1022  }
1023  fprintf(file, "}\n");
1024  fclose(file);
1025}
1026
1027/* Insert an MIR instruction to the end of a basic block. */
1028void BasicBlock::AppendMIR(MIR* mir) {
1029  // Insert it after the last MIR.
1030  InsertMIRListAfter(last_mir_insn, mir, mir);
1031}
1032
1033void BasicBlock::AppendMIRList(MIR* first_list_mir, MIR* last_list_mir) {
1034  // Insert it after the last MIR.
1035  InsertMIRListAfter(last_mir_insn, first_list_mir, last_list_mir);
1036}
1037
1038void BasicBlock::AppendMIRList(const std::vector<MIR*>& insns) {
1039  for (std::vector<MIR*>::const_iterator it = insns.begin(); it != insns.end(); it++) {
1040    MIR* new_mir = *it;
1041
1042    // Add a copy of each MIR.
1043    InsertMIRListAfter(last_mir_insn, new_mir, new_mir);
1044  }
1045}
1046
1047/* Insert a MIR instruction after the specified MIR. */
1048void BasicBlock::InsertMIRAfter(MIR* current_mir, MIR* new_mir) {
1049  InsertMIRListAfter(current_mir, new_mir, new_mir);
1050}
1051
1052void BasicBlock::InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir) {
1053  // If no MIR, we are done.
1054  if (first_list_mir == nullptr || last_list_mir == nullptr) {
1055    return;
1056  }
1057
1058  // If insert_after is null, assume BB is empty.
1059  if (insert_after == nullptr) {
1060    first_mir_insn = first_list_mir;
1061    last_mir_insn = last_list_mir;
1062    last_list_mir->next = nullptr;
1063  } else {
1064    MIR* after_list = insert_after->next;
1065    insert_after->next = first_list_mir;
1066    last_list_mir->next = after_list;
1067    if (after_list == nullptr) {
1068      last_mir_insn = last_list_mir;
1069    }
1070  }
1071
1072  // Set this BB to be the basic block of the MIRs.
1073  MIR* last = last_list_mir->next;
1074  for (MIR* mir = first_list_mir; mir != last; mir = mir->next) {
1075    mir->bb = id;
1076  }
1077}
1078
1079/* Insert an MIR instruction to the head of a basic block. */
1080void BasicBlock::PrependMIR(MIR* mir) {
1081  InsertMIRListBefore(first_mir_insn, mir, mir);
1082}
1083
1084void BasicBlock::PrependMIRList(MIR* first_list_mir, MIR* last_list_mir) {
1085  // Insert it before the first MIR.
1086  InsertMIRListBefore(first_mir_insn, first_list_mir, last_list_mir);
1087}
1088
1089void BasicBlock::PrependMIRList(const std::vector<MIR*>& to_add) {
1090  for (std::vector<MIR*>::const_iterator it = to_add.begin(); it != to_add.end(); it++) {
1091    MIR* mir = *it;
1092
1093    InsertMIRListBefore(first_mir_insn, mir, mir);
1094  }
1095}
1096
1097/* Insert a MIR instruction before the specified MIR. */
1098void BasicBlock::InsertMIRBefore(MIR* current_mir, MIR* new_mir) {
1099  // Insert as a single element list.
1100  return InsertMIRListBefore(current_mir, new_mir, new_mir);
1101}
1102
1103MIR* BasicBlock::FindPreviousMIR(MIR* mir) {
1104  MIR* current = first_mir_insn;
1105
1106  while (current != nullptr) {
1107    MIR* next = current->next;
1108
1109    if (next == mir) {
1110      return current;
1111    }
1112
1113    current = next;
1114  }
1115
1116  return nullptr;
1117}
1118
1119void BasicBlock::InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir) {
1120  // If no MIR, we are done.
1121  if (first_list_mir == nullptr || last_list_mir == nullptr) {
1122    return;
1123  }
1124
1125  // If insert_before is null, assume BB is empty.
1126  if (insert_before == nullptr) {
1127    first_mir_insn = first_list_mir;
1128    last_mir_insn = last_list_mir;
1129    last_list_mir->next = nullptr;
1130  } else {
1131    if (first_mir_insn == insert_before) {
1132      last_list_mir->next = first_mir_insn;
1133      first_mir_insn = first_list_mir;
1134    } else {
1135      // Find the preceding MIR.
1136      MIR* before_list = FindPreviousMIR(insert_before);
1137      DCHECK(before_list != nullptr);
1138      before_list->next = first_list_mir;
1139      last_list_mir->next = insert_before;
1140    }
1141  }
1142
1143  // Set this BB to be the basic block of the MIRs.
1144  for (MIR* mir = first_list_mir; mir != last_list_mir->next; mir = mir->next) {
1145    mir->bb = id;
1146  }
1147}
1148
1149bool BasicBlock::RemoveMIR(MIR* mir) {
1150  // Remove as a single element list.
1151  return RemoveMIRList(mir, mir);
1152}
1153
1154bool BasicBlock::RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir) {
1155  if (first_list_mir == nullptr) {
1156    return false;
1157  }
1158
1159  // Try to find the MIR.
1160  MIR* before_list = nullptr;
1161  MIR* after_list = nullptr;
1162
1163  // If we are removing from the beginning of the MIR list.
1164  if (first_mir_insn == first_list_mir) {
1165    before_list = nullptr;
1166  } else {
1167    before_list = FindPreviousMIR(first_list_mir);
1168    if (before_list == nullptr) {
1169      // We did not find the mir.
1170      return false;
1171    }
1172  }
1173
1174  // Remove the BB information and also find the after_list.
1175  for (MIR* mir = first_list_mir; mir != last_list_mir; mir = mir->next) {
1176    mir->bb = NullBasicBlockId;
1177  }
1178
1179  after_list = last_list_mir->next;
1180
1181  // If there is nothing before the list, after_list is the first_mir.
1182  if (before_list == nullptr) {
1183    first_mir_insn = after_list;
1184  } else {
1185    before_list->next = after_list;
1186  }
1187
1188  // If there is nothing after the list, before_list is last_mir.
1189  if (after_list == nullptr) {
1190    last_mir_insn = before_list;
1191  }
1192
1193  return true;
1194}
1195
1196MIR* BasicBlock::GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current) {
1197  MIR* next_mir = nullptr;
1198
1199  if (current != nullptr) {
1200    next_mir = current->next;
1201  }
1202
1203  if (next_mir == nullptr) {
1204    // Only look for next MIR that follows unconditionally.
1205    if ((taken == NullBasicBlockId) && (fall_through != NullBasicBlockId)) {
1206      next_mir = mir_graph->GetBasicBlock(fall_through)->first_mir_insn;
1207    }
1208  }
1209
1210  return next_mir;
1211}
1212
1213char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
1214  MIR::DecodedInstruction insn = mir->dalvikInsn;
1215  std::string str;
1216  int flags = 0;
1217  int opcode = insn.opcode;
1218  char* ret;
1219  bool nop = false;
1220  SSARepresentation* ssa_rep = mir->ssa_rep;
1221  Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format.
1222  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
1223  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
1224
1225  // Handle special cases.
1226  if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
1227    str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
1228    str.append(": ");
1229    // Recover the original Dex instruction.
1230    insn = mir->meta.throw_insn->dalvikInsn;
1231    ssa_rep = mir->meta.throw_insn->ssa_rep;
1232    defs = ssa_rep->num_defs;
1233    uses = ssa_rep->num_uses;
1234    opcode = insn.opcode;
1235  } else if (opcode == kMirOpNop) {
1236    str.append("[");
1237    // Recover original opcode.
1238    insn.opcode = Instruction::At(current_code_item_->insns_ + mir->offset)->Opcode();
1239    opcode = insn.opcode;
1240    nop = true;
1241  }
1242
1243  if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
1244    str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
1245  } else {
1246    dalvik_format = Instruction::FormatOf(insn.opcode);
1247    flags = Instruction::FlagsOf(insn.opcode);
1248    str.append(Instruction::Name(insn.opcode));
1249  }
1250
1251  if (opcode == kMirOpPhi) {
1252    BasicBlockId* incoming = mir->meta.phi_incoming;
1253    str.append(StringPrintf(" %s = (%s",
1254               GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
1255               GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
1256    str.append(StringPrintf(":%d", incoming[0]));
1257    int i;
1258    for (i = 1; i < uses; i++) {
1259      str.append(StringPrintf(", %s:%d",
1260                              GetSSANameWithConst(ssa_rep->uses[i], true).c_str(),
1261                              incoming[i]));
1262    }
1263    str.append(")");
1264  } else if ((flags & Instruction::kBranch) != 0) {
1265    // For branches, decode the instructions to print out the branch targets.
1266    int offset = 0;
1267    switch (dalvik_format) {
1268      case Instruction::k21t:
1269        str.append(StringPrintf(" %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
1270        offset = insn.vB;
1271        break;
1272      case Instruction::k22t:
1273        str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str(),
1274                   GetSSANameWithConst(ssa_rep->uses[1], false).c_str()));
1275        offset = insn.vC;
1276        break;
1277      case Instruction::k10t:
1278      case Instruction::k20t:
1279      case Instruction::k30t:
1280        offset = insn.vA;
1281        break;
1282      default:
1283        LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
1284    }
1285    str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
1286                            offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
1287  } else {
1288    // For invokes-style formats, treat wide regs as a pair of singles.
1289    bool show_singles = ((dalvik_format == Instruction::k35c) ||
1290                         (dalvik_format == Instruction::k3rc));
1291    if (defs != 0) {
1292      str.append(StringPrintf(" %s", GetSSANameWithConst(ssa_rep->defs[0], false).c_str()));
1293      if (uses != 0) {
1294        str.append(", ");
1295      }
1296    }
1297    for (int i = 0; i < uses; i++) {
1298      str.append(
1299          StringPrintf(" %s", GetSSANameWithConst(ssa_rep->uses[i], show_singles).c_str()));
1300      if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
1301        // For the listing, skip the high sreg.
1302        i++;
1303      }
1304      if (i != (uses -1)) {
1305        str.append(",");
1306      }
1307    }
1308    switch (dalvik_format) {
1309      case Instruction::k11n:  // Add one immediate from vB.
1310      case Instruction::k21s:
1311      case Instruction::k31i:
1312      case Instruction::k21h:
1313        str.append(StringPrintf(", #%d", insn.vB));
1314        break;
1315      case Instruction::k51l:  // Add one wide immediate.
1316        str.append(StringPrintf(", #%" PRId64, insn.vB_wide));
1317        break;
1318      case Instruction::k21c:  // One register, one string/type/method index.
1319      case Instruction::k31c:
1320        str.append(StringPrintf(", index #%d", insn.vB));
1321        break;
1322      case Instruction::k22c:  // Two registers, one string/type/method index.
1323        str.append(StringPrintf(", index #%d", insn.vC));
1324        break;
1325      case Instruction::k22s:  // Add one immediate from vC.
1326      case Instruction::k22b:
1327        str.append(StringPrintf(", #%d", insn.vC));
1328        break;
1329      default: {
1330        // Nothing left to print.
1331      }
1332    }
1333  }
1334  if (nop) {
1335    str.append("]--optimized away");
1336  }
1337  int length = str.length() + 1;
1338  ret = static_cast<char*>(arena_->Alloc(length, kArenaAllocDFInfo));
1339  strncpy(ret, str.c_str(), length);
1340  return ret;
1341}
1342
1343/* Turn method name into a legal Linux file name */
1344void MIRGraph::ReplaceSpecialChars(std::string& str) {
1345  static const struct { const char before; const char after; } match[] = {
1346    {'/', '-'}, {';', '#'}, {' ', '#'}, {'$', '+'},
1347    {'(', '@'}, {')', '@'}, {'<', '='}, {'>', '='}
1348  };
1349  for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
1350    std::replace(str.begin(), str.end(), match[i].before, match[i].after);
1351  }
1352}
1353
1354std::string MIRGraph::GetSSAName(int ssa_reg) {
1355  // TODO: This value is needed for LLVM and debugging. Currently, we compute this and then copy to
1356  //       the arena. We should be smarter and just place straight into the arena, or compute the
1357  //       value more lazily.
1358  return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
1359}
1360
1361// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
1362std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
1363  if (reg_location_ == NULL) {
1364    // Pre-SSA - just use the standard name.
1365    return GetSSAName(ssa_reg);
1366  }
1367  if (IsConst(reg_location_[ssa_reg])) {
1368    if (!singles_only && reg_location_[ssa_reg].wide) {
1369      return StringPrintf("v%d_%d#0x%" PRIx64, SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
1370                          ConstantValueWide(reg_location_[ssa_reg]));
1371    } else {
1372      return StringPrintf("v%d_%d#0x%x", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
1373                          ConstantValue(reg_location_[ssa_reg]));
1374    }
1375  } else {
1376    return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
1377  }
1378}
1379
1380void MIRGraph::GetBlockName(BasicBlock* bb, char* name) {
1381  switch (bb->block_type) {
1382    case kEntryBlock:
1383      snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
1384      break;
1385    case kExitBlock:
1386      snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
1387      break;
1388    case kDalvikByteCode:
1389      snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id);
1390      break;
1391    case kExceptionHandling:
1392      snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset,
1393               bb->id);
1394      break;
1395    default:
1396      snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id);
1397      break;
1398  }
1399}
1400
1401const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) {
1402  // TODO: for inlining support, use current code unit.
1403  const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx);
1404  return cu_->dex_file->GetShorty(method_id.proto_idx_);
1405}
1406
1407/* Debug Utility - dump a compilation unit */
1408void MIRGraph::DumpMIRGraph() {
1409  BasicBlock* bb;
1410  const char* block_type_names[] = {
1411    "Null Block",
1412    "Entry Block",
1413    "Code Block",
1414    "Exit Block",
1415    "Exception Handling",
1416    "Catch Block"
1417  };
1418
1419  LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
1420  LOG(INFO) << cu_->insns << " insns";
1421  LOG(INFO) << GetNumBlocks() << " blocks in total";
1422  GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
1423
1424  while (true) {
1425    bb = iterator.Next();
1426    if (bb == NULL) break;
1427    LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
1428        bb->id,
1429        block_type_names[bb->block_type],
1430        bb->start_offset,
1431        bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
1432        bb->last_mir_insn ? "" : " empty");
1433    if (bb->taken != NullBasicBlockId) {
1434      LOG(INFO) << "  Taken branch: block " << bb->taken
1435                << "(0x" << std::hex << GetBasicBlock(bb->taken)->start_offset << ")";
1436    }
1437    if (bb->fall_through != NullBasicBlockId) {
1438      LOG(INFO) << "  Fallthrough : block " << bb->fall_through
1439                << " (0x" << std::hex << GetBasicBlock(bb->fall_through)->start_offset << ")";
1440    }
1441  }
1442}
1443
1444/*
1445 * Build an array of location records for the incoming arguments.
1446 * Note: one location record per word of arguments, with dummy
1447 * high-word loc for wide arguments.  Also pull up any following
1448 * MOVE_RESULT and incorporate it into the invoke.
1449 */
1450CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
1451                                  bool is_range) {
1452  CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
1453                                                        kArenaAllocMisc));
1454  MIR* move_result_mir = FindMoveResult(bb, mir);
1455  if (move_result_mir == NULL) {
1456    info->result.location = kLocInvalid;
1457  } else {
1458    info->result = GetRawDest(move_result_mir);
1459    move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
1460  }
1461  info->num_arg_words = mir->ssa_rep->num_uses;
1462  info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
1463      (arena_->Alloc(sizeof(RegLocation) * info->num_arg_words, kArenaAllocMisc));
1464  for (int i = 0; i < info->num_arg_words; i++) {
1465    info->args[i] = GetRawSrc(mir, i);
1466  }
1467  info->opt_flags = mir->optimization_flags;
1468  info->type = type;
1469  info->is_range = is_range;
1470  info->index = mir->dalvikInsn.vB;
1471  info->offset = mir->offset;
1472  info->mir = mir;
1473  return info;
1474}
1475
1476// Allocate a new MIR.
1477MIR* MIRGraph::NewMIR() {
1478  MIR* mir = new (arena_) MIR();
1479  return mir;
1480}
1481
1482// Allocate a new basic block.
1483BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
1484  BasicBlock* bb = new (arena_) BasicBlock();
1485
1486  bb->block_type = block_type;
1487  bb->id = block_id;
1488  // TUNING: better estimate of the exit block predecessors?
1489  bb->predecessors = new (arena_) GrowableArray<BasicBlockId>(arena_,
1490                                                             (block_type == kExitBlock) ? 2048 : 2,
1491                                                             kGrowableArrayPredecessors);
1492  bb->successor_block_list_type = kNotUsed;
1493  block_id_map_.Put(block_id, block_id);
1494  return bb;
1495}
1496
1497void MIRGraph::InitializeConstantPropagation() {
1498  is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
1499  constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(), kArenaAllocDFInfo));
1500}
1501
1502void MIRGraph::InitializeMethodUses() {
1503  // The gate starts by initializing the use counts.
1504  int num_ssa_regs = GetNumSSARegs();
1505  use_counts_.Resize(num_ssa_regs + 32);
1506  raw_use_counts_.Resize(num_ssa_regs + 32);
1507  // Initialize list.
1508  for (int i = 0; i < num_ssa_regs; i++) {
1509    use_counts_.Insert(0);
1510    raw_use_counts_.Insert(0);
1511  }
1512}
1513
1514void MIRGraph::SSATransformationStart() {
1515  DCHECK(temp_scoped_alloc_.get() == nullptr);
1516  temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
1517  temp_bit_vector_size_ = cu_->num_dalvik_registers;
1518  temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
1519      temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapRegisterV);
1520
1521  // Update the maximum number of reachable blocks.
1522  max_num_reachable_blocks_ = num_reachable_blocks_;
1523}
1524
1525void MIRGraph::SSATransformationEnd() {
1526  // Verify the dataflow information after the pass.
1527  if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
1528    VerifyDataflow();
1529  }
1530
1531  temp_bit_vector_size_ = 0u;
1532  temp_bit_vector_ = nullptr;
1533  DCHECK(temp_scoped_alloc_.get() != nullptr);
1534  temp_scoped_alloc_.reset();
1535}
1536
1537static BasicBlock* SelectTopologicalSortOrderFallBack(
1538    MIRGraph* mir_graph, const ArenaBitVector* current_loop,
1539    const ScopedArenaVector<size_t>* visited_cnt_values, ScopedArenaAllocator* allocator,
1540    ScopedArenaVector<BasicBlockId>* tmp_stack) {
1541  // No true loop head has been found but there may be true loop heads after the mess we need
1542  // to resolve. To avoid taking one of those, pick the candidate with the highest number of
1543  // reachable unvisited nodes. That candidate will surely be a part of a loop.
1544  BasicBlock* fall_back = nullptr;
1545  size_t fall_back_num_reachable = 0u;
1546  // Reuse the same bit vector for each candidate to mark reachable unvisited blocks.
1547  ArenaBitVector candidate_reachable(allocator, mir_graph->GetNumBlocks(), false, kBitMapMisc);
1548  AllNodesIterator iter(mir_graph);
1549  for (BasicBlock* candidate = iter.Next(); candidate != nullptr; candidate = iter.Next()) {
1550    if (candidate->hidden ||                            // Hidden, or
1551        candidate->visited ||                           // already processed, or
1552        (*visited_cnt_values)[candidate->id] == 0u ||   // no processed predecessors, or
1553        (current_loop != nullptr &&                     // outside current loop.
1554         !current_loop->IsBitSet(candidate->id))) {
1555      continue;
1556    }
1557    DCHECK(tmp_stack->empty());
1558    tmp_stack->push_back(candidate->id);
1559    candidate_reachable.ClearAllBits();
1560    size_t num_reachable = 0u;
1561    while (!tmp_stack->empty()) {
1562      BasicBlockId current_id = tmp_stack->back();
1563      tmp_stack->pop_back();
1564      BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id);
1565      DCHECK(current_bb != nullptr);
1566      ChildBlockIterator child_iter(current_bb, mir_graph);
1567      BasicBlock* child_bb = child_iter.Next();
1568      for ( ; child_bb != nullptr; child_bb = child_iter.Next()) {
1569        DCHECK(!child_bb->hidden);
1570        if (child_bb->visited ||                            // Already processed, or
1571            (current_loop != nullptr &&                     // outside current loop.
1572             !current_loop->IsBitSet(child_bb->id))) {
1573          continue;
1574        }
1575        if (!candidate_reachable.IsBitSet(child_bb->id)) {
1576          candidate_reachable.SetBit(child_bb->id);
1577          tmp_stack->push_back(child_bb->id);
1578          num_reachable += 1u;
1579        }
1580      }
1581    }
1582    if (fall_back_num_reachable < num_reachable) {
1583      fall_back_num_reachable = num_reachable;
1584      fall_back = candidate;
1585    }
1586  }
1587  return fall_back;
1588}
1589
1590// Compute from which unvisited blocks is bb_id reachable through unvisited blocks.
1591static void ComputeUnvisitedReachableFrom(MIRGraph* mir_graph, BasicBlockId bb_id,
1592                                          ArenaBitVector* reachable,
1593                                          ScopedArenaVector<BasicBlockId>* tmp_stack) {
1594  // NOTE: Loop heads indicated by the "visited" flag.
1595  DCHECK(tmp_stack->empty());
1596  reachable->ClearAllBits();
1597  tmp_stack->push_back(bb_id);
1598  while (!tmp_stack->empty()) {
1599    BasicBlockId current_id = tmp_stack->back();
1600    tmp_stack->pop_back();
1601    BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id);
1602    DCHECK(current_bb != nullptr);
1603    GrowableArray<BasicBlockId>::Iterator iter(current_bb->predecessors);
1604    BasicBlock* pred_bb = mir_graph->GetBasicBlock(iter.Next());
1605    for ( ; pred_bb != nullptr; pred_bb = mir_graph->GetBasicBlock(iter.Next())) {
1606      if (!pred_bb->visited && !reachable->IsBitSet(pred_bb->id)) {
1607        reachable->SetBit(pred_bb->id);
1608        tmp_stack->push_back(pred_bb->id);
1609      }
1610    }
1611  }
1612}
1613
1614void MIRGraph::ComputeTopologicalSortOrder() {
1615  ScopedArenaAllocator allocator(&cu_->arena_stack);
1616  unsigned int num_blocks = GetNumBlocks();
1617
1618  ScopedArenaQueue<BasicBlock*> q(allocator.Adapter());
1619  ScopedArenaVector<size_t> visited_cnt_values(num_blocks, 0u, allocator.Adapter());
1620  ScopedArenaVector<BasicBlockId> loop_head_stack(allocator.Adapter());
1621  size_t max_nested_loops = 0u;
1622  ArenaBitVector loop_exit_blocks(&allocator, num_blocks, false, kBitMapMisc);
1623  loop_exit_blocks.ClearAllBits();
1624
1625  // Count the number of blocks to process and add the entry block(s).
1626  GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
1627  unsigned int num_blocks_to_process = 0u;
1628  for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
1629    if (bb->hidden == true) {
1630      continue;
1631    }
1632
1633    num_blocks_to_process += 1u;
1634
1635    if (bb->predecessors->Size() == 0u) {
1636      // Add entry block to the queue.
1637      q.push(bb);
1638    }
1639  }
1640
1641  // Create the topological order if need be.
1642  if (topological_order_ == nullptr) {
1643    topological_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, num_blocks);
1644    topological_order_loop_ends_ = new (arena_) GrowableArray<uint16_t>(arena_, num_blocks);
1645    topological_order_indexes_ = new (arena_) GrowableArray<uint16_t>(arena_, num_blocks);
1646  }
1647  topological_order_->Reset();
1648  topological_order_loop_ends_->Reset();
1649  topological_order_indexes_->Reset();
1650  topological_order_loop_ends_->Resize(num_blocks);
1651  topological_order_indexes_->Resize(num_blocks);
1652  for (BasicBlockId i = 0; i != num_blocks; ++i) {
1653    topological_order_loop_ends_->Insert(0u);
1654    topological_order_indexes_->Insert(static_cast<uint16_t>(-1));
1655  }
1656
1657  // Mark all blocks as unvisited.
1658  ClearAllVisitedFlags();
1659
1660  // For loop heads, keep track from which blocks they are reachable not going through other
1661  // loop heads. Other loop heads are excluded to detect the heads of nested loops. The children
1662  // in this set go into the loop body, the other children are jumping over the loop.
1663  ScopedArenaVector<ArenaBitVector*> loop_head_reachable_from(allocator.Adapter());
1664  loop_head_reachable_from.resize(num_blocks, nullptr);
1665  // Reuse the same temp stack whenever calculating a loop_head_reachable_from[loop_head_id].
1666  ScopedArenaVector<BasicBlockId> tmp_stack(allocator.Adapter());
1667
1668  while (num_blocks_to_process != 0u) {
1669    BasicBlock* bb = nullptr;
1670    if (!q.empty()) {
1671      num_blocks_to_process -= 1u;
1672      // Get top.
1673      bb = q.front();
1674      q.pop();
1675      if (bb->visited) {
1676        // Loop head: it was already processed, mark end and copy exit blocks to the queue.
1677        DCHECK(q.empty()) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
1678        uint16_t idx = static_cast<uint16_t>(topological_order_->Size());
1679        topological_order_loop_ends_->Put(topological_order_indexes_->Get(bb->id), idx);
1680        DCHECK_EQ(loop_head_stack.back(), bb->id);
1681        loop_head_stack.pop_back();
1682        ArenaBitVector* reachable =
1683            loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()];
1684        for (BasicBlockId candidate_id : loop_exit_blocks.Indexes()) {
1685          if (reachable == nullptr || reachable->IsBitSet(candidate_id)) {
1686            q.push(GetBasicBlock(candidate_id));
1687            // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again,
1688            // so clearing the bit has no effect on the iterator.
1689            loop_exit_blocks.ClearBit(candidate_id);
1690          }
1691        }
1692        continue;
1693      }
1694    } else {
1695      // Find the new loop head.
1696      AllNodesIterator iter(this);
1697      while (true) {
1698        BasicBlock* candidate = iter.Next();
1699        if (candidate == nullptr) {
1700          // We did not find a true loop head, fall back to a reachable block in any loop.
1701          ArenaBitVector* current_loop =
1702              loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()];
1703          bb = SelectTopologicalSortOrderFallBack(this, current_loop, &visited_cnt_values,
1704                                                  &allocator, &tmp_stack);
1705          DCHECK(bb != nullptr) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
1706          if (kIsDebugBuild && cu_->dex_file != nullptr) {
1707            LOG(INFO) << "Topological sort order: Using fall-back in "
1708                << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " BB #" << bb->id
1709                << " @0x" << std::hex << bb->start_offset
1710                << ", num_blocks = " << std::dec << num_blocks;
1711          }
1712          break;
1713        }
1714        if (candidate->hidden ||                            // Hidden, or
1715            candidate->visited ||                           // already processed, or
1716            visited_cnt_values[candidate->id] == 0u ||      // no processed predecessors, or
1717            (!loop_head_stack.empty() &&                    // outside current loop.
1718             !loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(candidate->id))) {
1719          continue;
1720        }
1721
1722        GrowableArray<BasicBlockId>::Iterator pred_iter(candidate->predecessors);
1723        BasicBlock* pred_bb = GetBasicBlock(pred_iter.Next());
1724        for ( ; pred_bb != nullptr; pred_bb = GetBasicBlock(pred_iter.Next())) {
1725          if (pred_bb != candidate && !pred_bb->visited &&
1726              !pred_bb->dominators->IsBitSet(candidate->id)) {
1727            break;  // Keep non-null pred_bb to indicate failure.
1728          }
1729        }
1730        if (pred_bb == nullptr) {
1731          bb = candidate;
1732          break;
1733        }
1734      }
1735      // Compute blocks from which the loop head is reachable and process those blocks first.
1736      ArenaBitVector* reachable =
1737          new (&allocator) ArenaBitVector(&allocator, num_blocks, false, kBitMapMisc);
1738      loop_head_reachable_from[bb->id] = reachable;
1739      ComputeUnvisitedReachableFrom(this, bb->id, reachable, &tmp_stack);
1740      // Now mark as loop head. (Even if it's only a fall back when we don't find a true loop.)
1741      loop_head_stack.push_back(bb->id);
1742      max_nested_loops = std::max(max_nested_loops, loop_head_stack.size());
1743    }
1744
1745    DCHECK_EQ(bb->hidden, false);
1746    DCHECK_EQ(bb->visited, false);
1747    bb->visited = true;
1748
1749    // Now add the basic block.
1750    uint16_t idx = static_cast<uint16_t>(topological_order_->Size());
1751    topological_order_indexes_->Put(bb->id, idx);
1752    topological_order_->Insert(bb->id);
1753
1754    // Update visited_cnt_values for children.
1755    ChildBlockIterator succIter(bb, this);
1756    BasicBlock* successor = succIter.Next();
1757    for ( ; successor != nullptr; successor = succIter.Next()) {
1758      if (successor->hidden) {
1759        continue;
1760      }
1761
1762      // One more predecessor was visited.
1763      visited_cnt_values[successor->id] += 1u;
1764      if (visited_cnt_values[successor->id] == successor->predecessors->Size()) {
1765        if (loop_head_stack.empty() ||
1766            loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(successor->id)) {
1767          q.push(successor);
1768        } else {
1769          DCHECK(!loop_exit_blocks.IsBitSet(successor->id));
1770          loop_exit_blocks.SetBit(successor->id);
1771        }
1772      }
1773    }
1774  }
1775
1776  // Prepare the loop head stack for iteration.
1777  topological_order_loop_head_stack_ =
1778      new (arena_) GrowableArray<std::pair<uint16_t, bool>>(arena_, max_nested_loops);
1779}
1780
1781bool BasicBlock::IsExceptionBlock() const {
1782  if (block_type == kExceptionHandling) {
1783    return true;
1784  }
1785  return false;
1786}
1787
1788bool MIRGraph::HasSuspendTestBetween(BasicBlock* source, BasicBlockId target_id) {
1789  BasicBlock* target = GetBasicBlock(target_id);
1790
1791  if (source == nullptr || target == nullptr)
1792    return false;
1793
1794  int idx;
1795  for (idx = gen_suspend_test_list_.Size() - 1; idx >= 0; idx--) {
1796    BasicBlock* bb = gen_suspend_test_list_.Get(idx);
1797    if (bb == source)
1798      return true;  // The block has been inserted by a suspend check before.
1799    if (source->dominators->IsBitSet(bb->id) && bb->dominators->IsBitSet(target_id))
1800      return true;
1801  }
1802
1803  return false;
1804}
1805
1806ChildBlockIterator::ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph)
1807    : basic_block_(bb), mir_graph_(mir_graph), visited_fallthrough_(false),
1808      visited_taken_(false), have_successors_(false) {
1809  // Check if we actually do have successors.
1810  if (basic_block_ != 0 && basic_block_->successor_block_list_type != kNotUsed) {
1811    have_successors_ = true;
1812    successor_iter_.Reset(basic_block_->successor_blocks);
1813  }
1814}
1815
1816BasicBlock* ChildBlockIterator::Next() {
1817  // We check if we have a basic block. If we don't we cannot get next child.
1818  if (basic_block_ == nullptr) {
1819    return nullptr;
1820  }
1821
1822  // If we haven't visited fallthrough, return that.
1823  if (visited_fallthrough_ == false) {
1824    visited_fallthrough_ = true;
1825
1826    BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->fall_through);
1827    if (result != nullptr) {
1828      return result;
1829    }
1830  }
1831
1832  // If we haven't visited taken, return that.
1833  if (visited_taken_ == false) {
1834    visited_taken_ = true;
1835
1836    BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->taken);
1837    if (result != nullptr) {
1838      return result;
1839    }
1840  }
1841
1842  // We visited both taken and fallthrough. Now check if we have successors we need to visit.
1843  if (have_successors_ == true) {
1844    // Get information about next successor block.
1845    for (SuccessorBlockInfo* successor_block_info = successor_iter_.Next();
1846      successor_block_info != nullptr;
1847      successor_block_info = successor_iter_.Next()) {
1848      // If block was replaced by zero block, take next one.
1849      if (successor_block_info->block != NullBasicBlockId) {
1850        return mir_graph_->GetBasicBlock(successor_block_info->block);
1851      }
1852    }
1853  }
1854
1855  // We do not have anything.
1856  return nullptr;
1857}
1858
1859BasicBlock* BasicBlock::Copy(CompilationUnit* c_unit) {
1860  MIRGraph* mir_graph = c_unit->mir_graph.get();
1861  return Copy(mir_graph);
1862}
1863
1864BasicBlock* BasicBlock::Copy(MIRGraph* mir_graph) {
1865  BasicBlock* result_bb = mir_graph->CreateNewBB(block_type);
1866
1867  // We don't do a memcpy style copy here because it would lead to a lot of things
1868  // to clean up. Let us do it by hand instead.
1869  // Copy in taken and fallthrough.
1870  result_bb->fall_through = fall_through;
1871  result_bb->taken = taken;
1872
1873  // Copy successor links if needed.
1874  ArenaAllocator* arena = mir_graph->GetArena();
1875
1876  result_bb->successor_block_list_type = successor_block_list_type;
1877  if (result_bb->successor_block_list_type != kNotUsed) {
1878    size_t size = successor_blocks->Size();
1879    result_bb->successor_blocks = new (arena) GrowableArray<SuccessorBlockInfo*>(arena, size, kGrowableArraySuccessorBlocks);
1880    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(successor_blocks);
1881    while (true) {
1882      SuccessorBlockInfo* sbi_old = iterator.Next();
1883      if (sbi_old == nullptr) {
1884        break;
1885      }
1886      SuccessorBlockInfo* sbi_new = static_cast<SuccessorBlockInfo*>(arena->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
1887      memcpy(sbi_new, sbi_old, sizeof(SuccessorBlockInfo));
1888      result_bb->successor_blocks->Insert(sbi_new);
1889    }
1890  }
1891
1892  // Copy offset, method.
1893  result_bb->start_offset = start_offset;
1894
1895  // Now copy instructions.
1896  for (MIR* mir = first_mir_insn; mir != 0; mir = mir->next) {
1897    // Get a copy first.
1898    MIR* copy = mir->Copy(mir_graph);
1899
1900    // Append it.
1901    result_bb->AppendMIR(copy);
1902  }
1903
1904  return result_bb;
1905}
1906
1907MIR* MIR::Copy(MIRGraph* mir_graph) {
1908  MIR* res = mir_graph->NewMIR();
1909  *res = *this;
1910
1911  // Remove links
1912  res->next = nullptr;
1913  res->bb = NullBasicBlockId;
1914  res->ssa_rep = nullptr;
1915
1916  return res;
1917}
1918
1919MIR* MIR::Copy(CompilationUnit* c_unit) {
1920  return Copy(c_unit->mir_graph.get());
1921}
1922
1923uint32_t SSARepresentation::GetStartUseIndex(Instruction::Code opcode) {
1924  // Default result.
1925  int res = 0;
1926
1927  // We are basically setting the iputs to their igets counterparts.
1928  switch (opcode) {
1929    case Instruction::IPUT:
1930    case Instruction::IPUT_OBJECT:
1931    case Instruction::IPUT_BOOLEAN:
1932    case Instruction::IPUT_BYTE:
1933    case Instruction::IPUT_CHAR:
1934    case Instruction::IPUT_SHORT:
1935    case Instruction::IPUT_QUICK:
1936    case Instruction::IPUT_OBJECT_QUICK:
1937    case Instruction::APUT:
1938    case Instruction::APUT_OBJECT:
1939    case Instruction::APUT_BOOLEAN:
1940    case Instruction::APUT_BYTE:
1941    case Instruction::APUT_CHAR:
1942    case Instruction::APUT_SHORT:
1943    case Instruction::SPUT:
1944    case Instruction::SPUT_OBJECT:
1945    case Instruction::SPUT_BOOLEAN:
1946    case Instruction::SPUT_BYTE:
1947    case Instruction::SPUT_CHAR:
1948    case Instruction::SPUT_SHORT:
1949      // Skip the VR containing what to store.
1950      res = 1;
1951      break;
1952    case Instruction::IPUT_WIDE:
1953    case Instruction::IPUT_WIDE_QUICK:
1954    case Instruction::APUT_WIDE:
1955    case Instruction::SPUT_WIDE:
1956      // Skip the two VRs containing what to store.
1957      res = 2;
1958      break;
1959    default:
1960      // Do nothing in the general case.
1961      break;
1962  }
1963
1964  return res;
1965}
1966
1967/**
1968 * @brief Given a decoded instruction, it checks whether the instruction
1969 * sets a constant and if it does, more information is provided about the
1970 * constant being set.
1971 * @param ptr_value pointer to a 64-bit holder for the constant.
1972 * @param wide Updated by function whether a wide constant is being set by bytecode.
1973 * @return Returns false if the decoded instruction does not represent a constant bytecode.
1974 */
1975bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const {
1976  bool sets_const = true;
1977  int64_t value = vB;
1978
1979  DCHECK(ptr_value != nullptr);
1980  DCHECK(wide != nullptr);
1981
1982  switch (opcode) {
1983    case Instruction::CONST_4:
1984    case Instruction::CONST_16:
1985    case Instruction::CONST:
1986      *wide = false;
1987      value <<= 32;      // In order to get the sign extend.
1988      value >>= 32;
1989      break;
1990    case Instruction::CONST_HIGH16:
1991      *wide = false;
1992      value <<= 48;      // In order to get the sign extend.
1993      value >>= 32;
1994      break;
1995    case Instruction::CONST_WIDE_16:
1996    case Instruction::CONST_WIDE_32:
1997      *wide = true;
1998      value <<= 32;      // In order to get the sign extend.
1999      value >>= 32;
2000      break;
2001    case Instruction::CONST_WIDE:
2002      *wide = true;
2003      value = vB_wide;
2004      break;
2005    case Instruction::CONST_WIDE_HIGH16:
2006      *wide = true;
2007      value <<= 48;      // In order to get the sign extend.
2008      break;
2009    default:
2010      sets_const = false;
2011      break;
2012  }
2013
2014  if (sets_const) {
2015    *ptr_value = value;
2016  }
2017
2018  return sets_const;
2019}
2020
2021void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
2022  // Reset flags for all MIRs in bb.
2023  for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) {
2024    mir->optimization_flags &= (~reset_flags);
2025  }
2026}
2027
2028void BasicBlock::Hide(CompilationUnit* c_unit) {
2029  // First lets make it a dalvik bytecode block so it doesn't have any special meaning.
2030  block_type = kDalvikByteCode;
2031
2032  // Mark it as hidden.
2033  hidden = true;
2034
2035  // Detach it from its MIRs so we don't generate code for them. Also detached MIRs
2036  // are updated to know that they no longer have a parent.
2037  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
2038    mir->bb = NullBasicBlockId;
2039  }
2040  first_mir_insn = nullptr;
2041  last_mir_insn = nullptr;
2042
2043  GrowableArray<BasicBlockId>::Iterator iterator(predecessors);
2044
2045  MIRGraph* mir_graph = c_unit->mir_graph.get();
2046  while (true) {
2047    BasicBlock* pred_bb = mir_graph->GetBasicBlock(iterator.Next());
2048    if (pred_bb == nullptr) {
2049      break;
2050    }
2051
2052    // Sadly we have to go through the children by hand here.
2053    pred_bb->ReplaceChild(id, NullBasicBlockId);
2054  }
2055
2056  // Iterate through children of bb we are hiding.
2057  ChildBlockIterator successorChildIter(this, mir_graph);
2058
2059  for (BasicBlock* childPtr = successorChildIter.Next(); childPtr != 0; childPtr = successorChildIter.Next()) {
2060    // Replace child with null child.
2061    childPtr->predecessors->Delete(id);
2062  }
2063}
2064
2065bool BasicBlock::IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg) {
2066  // In order to determine if the ssa reg is live out, we scan all the MIRs. We remember
2067  // the last SSA number of the same dalvik register. At the end, if it is different than ssa_reg,
2068  // then it is not live out of this BB.
2069  int dalvik_reg = c_unit->mir_graph->SRegToVReg(ssa_reg);
2070
2071  int last_ssa_reg = -1;
2072
2073  // Walk through the MIRs backwards.
2074  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
2075    // Get ssa rep.
2076    SSARepresentation *ssa_rep = mir->ssa_rep;
2077
2078    // Go through the defines for this MIR.
2079    for (int i = 0; i < ssa_rep->num_defs; i++) {
2080      DCHECK(ssa_rep->defs != nullptr);
2081
2082      // Get the ssa reg.
2083      int def_ssa_reg = ssa_rep->defs[i];
2084
2085      // Get dalvik reg.
2086      int def_dalvik_reg = c_unit->mir_graph->SRegToVReg(def_ssa_reg);
2087
2088      // Compare dalvik regs.
2089      if (dalvik_reg == def_dalvik_reg) {
2090        // We found a def of the register that we are being asked about.
2091        // Remember it.
2092        last_ssa_reg = def_ssa_reg;
2093      }
2094    }
2095  }
2096
2097  if (last_ssa_reg == -1) {
2098    // If we get to this point we couldn't find a define of register user asked about.
2099    // Let's assume the user knows what he's doing so we can be safe and say that if we
2100    // couldn't find a def, it is live out.
2101    return true;
2102  }
2103
2104  // If it is not -1, we found a match, is it ssa_reg?
2105  return (ssa_reg == last_ssa_reg);
2106}
2107
2108bool BasicBlock::ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb) {
2109  // We need to check taken, fall_through, and successor_blocks to replace.
2110  bool found = false;
2111  if (taken == old_bb) {
2112    taken = new_bb;
2113    found = true;
2114  }
2115
2116  if (fall_through == old_bb) {
2117    fall_through = new_bb;
2118    found = true;
2119  }
2120
2121  if (successor_block_list_type != kNotUsed) {
2122    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(successor_blocks);
2123    while (true) {
2124      SuccessorBlockInfo* successor_block_info = iterator.Next();
2125      if (successor_block_info == nullptr) {
2126        break;
2127      }
2128      if (successor_block_info->block == old_bb) {
2129        successor_block_info->block = new_bb;
2130        found = true;
2131      }
2132    }
2133  }
2134
2135  return found;
2136}
2137
2138void BasicBlock::UpdatePredecessor(BasicBlockId old_parent, BasicBlockId new_parent) {
2139  GrowableArray<BasicBlockId>::Iterator iterator(predecessors);
2140  bool found = false;
2141
2142  while (true) {
2143    BasicBlockId pred_bb_id = iterator.Next();
2144
2145    if (pred_bb_id == NullBasicBlockId) {
2146      break;
2147    }
2148
2149    if (pred_bb_id == old_parent) {
2150      size_t idx = iterator.GetIndex() - 1;
2151      predecessors->Put(idx, new_parent);
2152      found = true;
2153      break;
2154    }
2155  }
2156
2157  // If not found, add it.
2158  if (found == false) {
2159    predecessors->Insert(new_parent);
2160  }
2161}
2162
2163// Create a new basic block with block_id as num_blocks_ that is
2164// post-incremented.
2165BasicBlock* MIRGraph::CreateNewBB(BBType block_type) {
2166  BasicBlock* res = NewMemBB(block_type, num_blocks_++);
2167  block_list_.Insert(res);
2168  return res;
2169}
2170
2171void MIRGraph::CalculateBasicBlockInformation() {
2172  PassDriverMEPostOpt driver(cu_);
2173  driver.Launch();
2174}
2175
2176void MIRGraph::InitializeBasicBlockData() {
2177  num_blocks_ = block_list_.Size();
2178}
2179
2180}  // namespace art
2181