mir_optimization.cc revision b5c9b4008760c9042061490f22aaff990ed04c9a
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "compiler_internals.h"
18#include "local_value_numbering.h"
19#include "dataflow_iterator-inl.h"
20#include "dex/quick/dex_file_method_inliner.h"
21#include "dex/quick/dex_file_to_method_inliner_map.h"
22
23namespace art {
24
25static unsigned int Predecessors(BasicBlock* bb) {
26  return bb->predecessors->Size();
27}
28
29/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
30void MIRGraph::SetConstant(int32_t ssa_reg, int value) {
31  is_constant_v_->SetBit(ssa_reg);
32  constant_values_[ssa_reg] = value;
33}
34
35void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) {
36  is_constant_v_->SetBit(ssa_reg);
37  constant_values_[ssa_reg] = Low32Bits(value);
38  constant_values_[ssa_reg + 1] = High32Bits(value);
39}
40
41void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
42  MIR* mir;
43
44  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
45    // Skip pass if BB has MIR without SSA representation.
46    if (mir->ssa_rep == nullptr) {
47       return;
48    }
49
50    uint64_t df_attributes = GetDataFlowAttributes(mir);
51
52    MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
53
54    if (!(df_attributes & DF_HAS_DEFS)) continue;
55
56    /* Handle instructions that set up constants directly */
57    if (df_attributes & DF_SETS_CONST) {
58      if (df_attributes & DF_DA) {
59        int32_t vB = static_cast<int32_t>(d_insn->vB);
60        switch (d_insn->opcode) {
61          case Instruction::CONST_4:
62          case Instruction::CONST_16:
63          case Instruction::CONST:
64            SetConstant(mir->ssa_rep->defs[0], vB);
65            break;
66          case Instruction::CONST_HIGH16:
67            SetConstant(mir->ssa_rep->defs[0], vB << 16);
68            break;
69          case Instruction::CONST_WIDE_16:
70          case Instruction::CONST_WIDE_32:
71            SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
72            break;
73          case Instruction::CONST_WIDE:
74            SetConstantWide(mir->ssa_rep->defs[0], d_insn->vB_wide);
75            break;
76          case Instruction::CONST_WIDE_HIGH16:
77            SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
78            break;
79          default:
80            break;
81        }
82      }
83      /* Handle instructions that set up constants directly */
84    } else if (df_attributes & DF_IS_MOVE) {
85      int i;
86
87      for (i = 0; i < mir->ssa_rep->num_uses; i++) {
88        if (!is_constant_v_->IsBitSet(mir->ssa_rep->uses[i])) break;
89      }
90      /* Move a register holding a constant to another register */
91      if (i == mir->ssa_rep->num_uses) {
92        SetConstant(mir->ssa_rep->defs[0], constant_values_[mir->ssa_rep->uses[0]]);
93        if (df_attributes & DF_A_WIDE) {
94          SetConstant(mir->ssa_rep->defs[1], constant_values_[mir->ssa_rep->uses[1]]);
95        }
96      }
97    }
98  }
99  /* TODO: implement code to handle arithmetic operations */
100}
101
102/* Advance to next strictly dominated MIR node in an extended basic block */
103MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
104  BasicBlock* bb = *p_bb;
105  if (mir != NULL) {
106    mir = mir->next;
107    if (mir == NULL) {
108      bb = GetBasicBlock(bb->fall_through);
109      if ((bb == NULL) || Predecessors(bb) != 1) {
110        mir = NULL;
111      } else {
112      *p_bb = bb;
113      mir = bb->first_mir_insn;
114      }
115    }
116  }
117  return mir;
118}
119
120/*
121 * To be used at an invoke mir.  If the logically next mir node represents
122 * a move-result, return it.  Else, return NULL.  If a move-result exists,
123 * it is required to immediately follow the invoke with no intervening
124 * opcodes or incoming arcs.  However, if the result of the invoke is not
125 * used, a move-result may not be present.
126 */
127MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
128  BasicBlock* tbb = bb;
129  mir = AdvanceMIR(&tbb, mir);
130  while (mir != NULL) {
131    int opcode = mir->dalvikInsn.opcode;
132    if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
133        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
134        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
135      break;
136    }
137    // Keep going if pseudo op, otherwise terminate
138    if (opcode < kNumPackedOpcodes) {
139      mir = NULL;
140    } else {
141      mir = AdvanceMIR(&tbb, mir);
142    }
143  }
144  return mir;
145}
146
147BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
148  if (bb->block_type == kDead) {
149    return NULL;
150  }
151  DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
152      || (bb->block_type == kExitBlock));
153  BasicBlock* bb_taken = GetBasicBlock(bb->taken);
154  BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
155  if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
156      ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
157    // Follow simple unconditional branches.
158    bb = bb_taken;
159  } else {
160    // Follow simple fallthrough
161    bb = (bb_taken != NULL) ? NULL : bb_fall_through;
162  }
163  if (bb == NULL || (Predecessors(bb) != 1)) {
164    return NULL;
165  }
166  DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
167  return bb;
168}
169
170static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
171  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
172    if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
173      for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
174        if (mir->ssa_rep->uses[i] == ssa_name) {
175          return mir;
176        }
177      }
178    }
179  }
180  return NULL;
181}
182
183static SelectInstructionKind SelectKind(MIR* mir) {
184  switch (mir->dalvikInsn.opcode) {
185    case Instruction::MOVE:
186    case Instruction::MOVE_OBJECT:
187    case Instruction::MOVE_16:
188    case Instruction::MOVE_OBJECT_16:
189    case Instruction::MOVE_FROM16:
190    case Instruction::MOVE_OBJECT_FROM16:
191      return kSelectMove;
192    case Instruction::CONST:
193    case Instruction::CONST_4:
194    case Instruction::CONST_16:
195      return kSelectConst;
196    case Instruction::GOTO:
197    case Instruction::GOTO_16:
198    case Instruction::GOTO_32:
199      return kSelectGoto;
200    default:
201      return kSelectNone;
202  }
203}
204
205static constexpr ConditionCode kIfCcZConditionCodes[] = {
206    kCondEq, kCondNe, kCondLt, kCondGe, kCondGt, kCondLe
207};
208
209COMPILE_ASSERT(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
210               if_ccz_ccodes_size1);
211
212static constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
213  return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
214}
215
216static constexpr ConditionCode ConditionCodeForIfCcZ(Instruction::Code opcode) {
217  return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
218}
219
220COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, check_if_eqz_ccode);
221COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, check_if_nez_ccode);
222COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, check_if_ltz_ccode);
223COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, check_if_gez_ccode);
224COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, check_if_gtz_ccode);
225COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, check_if_lez_ccode);
226
227int MIRGraph::GetSSAUseCount(int s_reg) {
228  return raw_use_counts_.Get(s_reg);
229}
230
231size_t MIRGraph::GetNumAvailableNonSpecialCompilerTemps() {
232  if (num_non_special_compiler_temps_ >= max_available_non_special_compiler_temps_) {
233    return 0;
234  } else {
235    return max_available_non_special_compiler_temps_ - num_non_special_compiler_temps_;
236  }
237}
238
239
240// FIXME - will probably need to revisit all uses of this, as type not defined.
241static const RegLocation temp_loc = {kLocCompilerTemp,
242                                     0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
243                                     RegStorage(), INVALID_SREG, INVALID_SREG};
244
245CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
246  // There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
247  if (ct_type == kCompilerTempVR) {
248    size_t available_temps = GetNumAvailableNonSpecialCompilerTemps();
249    if (available_temps <= 0 || (available_temps <= 1 && wide)) {
250      return 0;
251    }
252  }
253
254  CompilerTemp *compiler_temp = static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp),
255                                                            kArenaAllocRegAlloc));
256
257  // Create the type of temp requested. Special temps need special handling because
258  // they have a specific virtual register assignment.
259  if (ct_type == kCompilerTempSpecialMethodPtr) {
260    DCHECK_EQ(wide, false);
261    compiler_temp->v_reg = static_cast<int>(kVRegMethodPtrBaseReg);
262    compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
263
264    // The MIR graph keeps track of the sreg for method pointer specially, so record that now.
265    method_sreg_ = compiler_temp->s_reg_low;
266  } else {
267    DCHECK_EQ(ct_type, kCompilerTempVR);
268
269    // The new non-special compiler temp must receive a unique v_reg with a negative value.
270    compiler_temp->v_reg = static_cast<int>(kVRegNonSpecialTempBaseReg) - num_non_special_compiler_temps_;
271    compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
272    num_non_special_compiler_temps_++;
273
274    if (wide) {
275      // Ensure that the two registers are consecutive. Since the virtual registers used for temps grow in a
276      // negative fashion, we need the smaller to refer to the low part. Thus, we redefine the v_reg and s_reg_low.
277      compiler_temp->v_reg--;
278      int ssa_reg_high = compiler_temp->s_reg_low;
279      compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
280      int ssa_reg_low = compiler_temp->s_reg_low;
281
282      // If needed initialize the register location for the high part.
283      // The low part is handled later in this method on a common path.
284      if (reg_location_ != nullptr) {
285        reg_location_[ssa_reg_high] = temp_loc;
286        reg_location_[ssa_reg_high].high_word = 1;
287        reg_location_[ssa_reg_high].s_reg_low = ssa_reg_low;
288        reg_location_[ssa_reg_high].wide = true;
289
290        // A new SSA needs new use counts.
291        use_counts_.Insert(0);
292        raw_use_counts_.Insert(0);
293      }
294
295      num_non_special_compiler_temps_++;
296    }
297  }
298
299  // Have we already allocated the register locations?
300  if (reg_location_ != nullptr) {
301    int ssa_reg_low = compiler_temp->s_reg_low;
302    reg_location_[ssa_reg_low] = temp_loc;
303    reg_location_[ssa_reg_low].s_reg_low = ssa_reg_low;
304    reg_location_[ssa_reg_low].wide = wide;
305
306    // A new SSA needs new use counts.
307    use_counts_.Insert(0);
308    raw_use_counts_.Insert(0);
309  }
310
311  compiler_temps_.Insert(compiler_temp);
312  return compiler_temp;
313}
314
315/* Do some MIR-level extended basic block optimizations */
316bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
317  if (bb->block_type == kDead) {
318    return true;
319  }
320  bool use_lvn = bb->use_lvn;
321  UniquePtr<LocalValueNumbering> local_valnum;
322  if (use_lvn) {
323    local_valnum.reset(LocalValueNumbering::Create(cu_));
324  }
325  while (bb != NULL) {
326    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
327      // TUNING: use the returned value number for CSE.
328      if (use_lvn) {
329        local_valnum->GetValueNumber(mir);
330      }
331      // Look for interesting opcodes, skip otherwise
332      Instruction::Code opcode = mir->dalvikInsn.opcode;
333      switch (opcode) {
334        case Instruction::CMPL_FLOAT:
335        case Instruction::CMPL_DOUBLE:
336        case Instruction::CMPG_FLOAT:
337        case Instruction::CMPG_DOUBLE:
338        case Instruction::CMP_LONG:
339          if ((cu_->disable_opt & (1 << kBranchFusing)) != 0) {
340            // Bitcode doesn't allow this optimization.
341            break;
342          }
343          if (mir->next != NULL) {
344            MIR* mir_next = mir->next;
345            // Make sure result of cmp is used by next insn and nowhere else
346            if (IsInstructionIfCcZ(mir->next->dalvikInsn.opcode) &&
347                (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
348                (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
349              mir_next->meta.ccode = ConditionCodeForIfCcZ(mir_next->dalvikInsn.opcode);
350              switch (opcode) {
351                case Instruction::CMPL_FLOAT:
352                  mir_next->dalvikInsn.opcode =
353                      static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
354                  break;
355                case Instruction::CMPL_DOUBLE:
356                  mir_next->dalvikInsn.opcode =
357                      static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
358                  break;
359                case Instruction::CMPG_FLOAT:
360                  mir_next->dalvikInsn.opcode =
361                      static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
362                  break;
363                case Instruction::CMPG_DOUBLE:
364                  mir_next->dalvikInsn.opcode =
365                      static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
366                  break;
367                case Instruction::CMP_LONG:
368                  mir_next->dalvikInsn.opcode =
369                      static_cast<Instruction::Code>(kMirOpFusedCmpLong);
370                  break;
371                default: LOG(ERROR) << "Unexpected opcode: " << opcode;
372              }
373              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
374              mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
375              mir_next->ssa_rep->uses = mir->ssa_rep->uses;
376              mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
377              mir_next->ssa_rep->num_defs = 0;
378              mir->ssa_rep->num_uses = 0;
379              mir->ssa_rep->num_defs = 0;
380            }
381          }
382          break;
383        case Instruction::GOTO:
384        case Instruction::GOTO_16:
385        case Instruction::GOTO_32:
386        case Instruction::IF_EQ:
387        case Instruction::IF_NE:
388        case Instruction::IF_LT:
389        case Instruction::IF_GE:
390        case Instruction::IF_GT:
391        case Instruction::IF_LE:
392        case Instruction::IF_EQZ:
393        case Instruction::IF_NEZ:
394        case Instruction::IF_LTZ:
395        case Instruction::IF_GEZ:
396        case Instruction::IF_GTZ:
397        case Instruction::IF_LEZ:
398          // If we've got a backwards branch to return, no need to suspend check.
399          if ((IsBackedge(bb, bb->taken) && GetBasicBlock(bb->taken)->dominates_return) ||
400              (IsBackedge(bb, bb->fall_through) &&
401                          GetBasicBlock(bb->fall_through)->dominates_return)) {
402            mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
403            if (cu_->verbose) {
404              LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex
405                        << mir->offset;
406            }
407          }
408          break;
409        default:
410          break;
411      }
412      // Is this the select pattern?
413      // TODO: flesh out support for Mips.  NOTE: llvm's select op doesn't quite work here.
414      // TUNING: expand to support IF_xx compare & branches
415      if (!cu_->compiler->IsPortable() &&
416          (cu_->instruction_set == kThumb2 || cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
417          IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
418        BasicBlock* ft = GetBasicBlock(bb->fall_through);
419        DCHECK(ft != NULL);
420        BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
421        BasicBlock* ft_tk = GetBasicBlock(ft->taken);
422
423        BasicBlock* tk = GetBasicBlock(bb->taken);
424        DCHECK(tk != NULL);
425        BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
426        BasicBlock* tk_tk = GetBasicBlock(tk->taken);
427
428        /*
429         * In the select pattern, the taken edge goes to a block that unconditionally
430         * transfers to the rejoin block and the fall_though edge goes to a block that
431         * unconditionally falls through to the rejoin block.
432         */
433        if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
434            (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
435          /*
436           * Okay - we have the basic diamond shape.  At the very least, we can eliminate the
437           * suspend check on the taken-taken branch back to the join point.
438           */
439          if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
440              tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
441          }
442          // Are the block bodies something we can handle?
443          if ((ft->first_mir_insn == ft->last_mir_insn) &&
444              (tk->first_mir_insn != tk->last_mir_insn) &&
445              (tk->first_mir_insn->next == tk->last_mir_insn) &&
446              ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
447              (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
448              (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
449              (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
450            // Almost there.  Are the instructions targeting the same vreg?
451            MIR* if_true = tk->first_mir_insn;
452            MIR* if_false = ft->first_mir_insn;
453            // It's possible that the target of the select isn't used - skip those (rare) cases.
454            MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
455            if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
456              /*
457               * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
458               * Phi node in the merge block and delete it (while using the SSA name
459               * of the merge as the target of the SELECT.  Delete both taken and
460               * fallthrough blocks, and set fallthrough to merge block.
461               * NOTE: not updating other dataflow info (no longer used at this point).
462               * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
463               */
464              mir->meta.ccode = ConditionCodeForIfCcZ(mir->dalvikInsn.opcode);
465              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
466              bool const_form = (SelectKind(if_true) == kSelectConst);
467              if ((SelectKind(if_true) == kSelectMove)) {
468                if (IsConst(if_true->ssa_rep->uses[0]) &&
469                    IsConst(if_false->ssa_rep->uses[0])) {
470                    const_form = true;
471                    if_true->dalvikInsn.vB = ConstantValue(if_true->ssa_rep->uses[0]);
472                    if_false->dalvikInsn.vB = ConstantValue(if_false->ssa_rep->uses[0]);
473                }
474              }
475              if (const_form) {
476                /*
477                 * TODO: If both constants are the same value, then instead of generating
478                 * a select, we should simply generate a const bytecode. This should be
479                 * considered after inlining which can lead to CFG of this form.
480                 */
481                // "true" set val in vB
482                mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
483                // "false" set val in vC
484                mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
485              } else {
486                DCHECK_EQ(SelectKind(if_true), kSelectMove);
487                DCHECK_EQ(SelectKind(if_false), kSelectMove);
488                int* src_ssa =
489                    static_cast<int*>(arena_->Alloc(sizeof(int) * 3, kArenaAllocDFInfo));
490                src_ssa[0] = mir->ssa_rep->uses[0];
491                src_ssa[1] = if_true->ssa_rep->uses[0];
492                src_ssa[2] = if_false->ssa_rep->uses[0];
493                mir->ssa_rep->uses = src_ssa;
494                mir->ssa_rep->num_uses = 3;
495              }
496              mir->ssa_rep->num_defs = 1;
497              mir->ssa_rep->defs =
498                  static_cast<int*>(arena_->Alloc(sizeof(int) * 1, kArenaAllocDFInfo));
499              mir->ssa_rep->fp_def =
500                  static_cast<bool*>(arena_->Alloc(sizeof(bool) * 1, kArenaAllocDFInfo));
501              mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
502              // Match type of uses to def.
503              mir->ssa_rep->fp_use =
504                  static_cast<bool*>(arena_->Alloc(sizeof(bool) * mir->ssa_rep->num_uses,
505                                                   kArenaAllocDFInfo));
506              for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
507                mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
508              }
509              /*
510               * There is usually a Phi node in the join block for our two cases.  If the
511               * Phi node only contains our two cases as input, we will use the result
512               * SSA name of the Phi node as our select result and delete the Phi.  If
513               * the Phi node has more than two operands, we will arbitrarily use the SSA
514               * name of the "true" path, delete the SSA name of the "false" path from the
515               * Phi node (and fix up the incoming arc list).
516               */
517              if (phi->ssa_rep->num_uses == 2) {
518                mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
519                phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
520              } else {
521                int dead_def = if_false->ssa_rep->defs[0];
522                int live_def = if_true->ssa_rep->defs[0];
523                mir->ssa_rep->defs[0] = live_def;
524                BasicBlockId* incoming = phi->meta.phi_incoming;
525                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
526                  if (phi->ssa_rep->uses[i] == live_def) {
527                    incoming[i] = bb->id;
528                  }
529                }
530                for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
531                  if (phi->ssa_rep->uses[i] == dead_def) {
532                    int last_slot = phi->ssa_rep->num_uses - 1;
533                    phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
534                    incoming[i] = incoming[last_slot];
535                  }
536                }
537              }
538              phi->ssa_rep->num_uses--;
539              bb->taken = NullBasicBlockId;
540              tk->block_type = kDead;
541              for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
542                tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
543              }
544            }
545          }
546        }
547      }
548    }
549    bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
550  }
551
552  return true;
553}
554
555/* Collect stats on number of checks removed */
556void MIRGraph::CountChecks(struct BasicBlock* bb) {
557  if (bb->data_flow_info != NULL) {
558    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
559      if (mir->ssa_rep == NULL) {
560        continue;
561      }
562      uint64_t df_attributes = GetDataFlowAttributes(mir);
563      if (df_attributes & DF_HAS_NULL_CHKS) {
564        checkstats_->null_checks++;
565        if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
566          checkstats_->null_checks_eliminated++;
567        }
568      }
569      if (df_attributes & DF_HAS_RANGE_CHKS) {
570        checkstats_->range_checks++;
571        if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
572          checkstats_->range_checks_eliminated++;
573        }
574      }
575    }
576  }
577}
578
579/* Try to make common case the fallthrough path */
580bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
581  // TODO: For now, just looking for direct throws.  Consider generalizing for profile feedback
582  if (!bb->explicit_throw) {
583    return false;
584  }
585  BasicBlock* walker = bb;
586  while (true) {
587    // Check termination conditions
588    if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
589      break;
590    }
591    BasicBlock* prev = GetBasicBlock(walker->predecessors->Get(0));
592    if (prev->conditional_branch) {
593      if (GetBasicBlock(prev->fall_through) == walker) {
594        // Already done - return
595        break;
596      }
597      DCHECK_EQ(walker, GetBasicBlock(prev->taken));
598      // Got one.  Flip it and exit
599      Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
600      switch (opcode) {
601        case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
602        case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
603        case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
604        case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
605        case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
606        case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
607        case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
608        case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
609        case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
610        case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
611        case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
612        case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
613        default: LOG(FATAL) << "Unexpected opcode " << opcode;
614      }
615      prev->last_mir_insn->dalvikInsn.opcode = opcode;
616      BasicBlockId t_bb = prev->taken;
617      prev->taken = prev->fall_through;
618      prev->fall_through = t_bb;
619      break;
620    }
621    walker = prev;
622  }
623  return false;
624}
625
626/* Combine any basic blocks terminated by instructions that we now know can't throw */
627void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
628  // Loop here to allow combining a sequence of blocks
629  while (true) {
630    // Check termination conditions
631    if ((bb->first_mir_insn == NULL)
632        || (bb->data_flow_info == NULL)
633        || (bb->block_type == kExceptionHandling)
634        || (bb->block_type == kExitBlock)
635        || (bb->block_type == kDead)
636        || (bb->taken == NullBasicBlockId)
637        || (GetBasicBlock(bb->taken)->block_type != kExceptionHandling)
638        || (bb->successor_block_list_type != kNotUsed)
639        || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
640      break;
641    }
642
643    // Test the kMirOpCheck instruction
644    MIR* mir = bb->last_mir_insn;
645    // Grab the attributes from the paired opcode
646    MIR* throw_insn = mir->meta.throw_insn;
647    uint64_t df_attributes = GetDataFlowAttributes(throw_insn);
648    bool can_combine = true;
649    if (df_attributes & DF_HAS_NULL_CHKS) {
650      can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
651    }
652    if (df_attributes & DF_HAS_RANGE_CHKS) {
653      can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
654    }
655    if (!can_combine) {
656      break;
657    }
658    // OK - got one.  Combine
659    BasicBlock* bb_next = GetBasicBlock(bb->fall_through);
660    DCHECK(!bb_next->catch_entry);
661    DCHECK_EQ(Predecessors(bb_next), 1U);
662    // Overwrite the kOpCheck insn with the paired opcode
663    DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
664    *bb->last_mir_insn = *throw_insn;
665    // Use the successor info from the next block
666    bb->successor_block_list_type = bb_next->successor_block_list_type;
667    bb->successor_blocks = bb_next->successor_blocks;
668    // Use the ending block linkage from the next block
669    bb->fall_through = bb_next->fall_through;
670    GetBasicBlock(bb->taken)->block_type = kDead;  // Kill the unused exception block
671    bb->taken = bb_next->taken;
672    // Include the rest of the instructions
673    bb->last_mir_insn = bb_next->last_mir_insn;
674    /*
675     * If lower-half of pair of blocks to combine contained a return, move the flag
676     * to the newly combined block.
677     */
678    bb->terminated_by_return = bb_next->terminated_by_return;
679
680    /*
681     * NOTE: we aren't updating all dataflow info here.  Should either make sure this pass
682     * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
683     */
684
685    // Kill bb_next and remap now-dead id to parent
686    bb_next->block_type = kDead;
687    block_id_map_.Overwrite(bb_next->id, bb->id);
688
689    // Now, loop back and see if we can keep going
690  }
691}
692
693void MIRGraph::EliminateNullChecksAndInferTypesStart() {
694  if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
695    if (kIsDebugBuild) {
696      AllNodesIterator iter(this);
697      for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
698        CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
699      }
700    }
701
702    DCHECK(temp_scoped_alloc_.get() == nullptr);
703    temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
704    temp_bit_vector_size_ = GetNumSSARegs();
705    temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
706        temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapTempSSARegisterV);
707  }
708}
709
710/*
711 * Eliminate unnecessary null checks for a basic block.   Also, while we're doing
712 * an iterative walk go ahead and perform type and size inference.
713 */
714bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
715  if (bb->data_flow_info == NULL) return false;
716  bool infer_changed = false;
717  bool do_nce = ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0);
718
719  ArenaBitVector* ssa_regs_to_check = temp_bit_vector_;
720  if (do_nce) {
721    /*
722     * Set initial state.  Be conservative with catch
723     * blocks and start with no assumptions about null check
724     * status (except for "this").
725     */
726    if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
727      ssa_regs_to_check->ClearAllBits();
728      // Assume all ins are objects.
729      for (uint16_t in_reg = cu_->num_dalvik_registers - cu_->num_ins;
730           in_reg < cu_->num_dalvik_registers; in_reg++) {
731        ssa_regs_to_check->SetBit(in_reg);
732      }
733      if ((cu_->access_flags & kAccStatic) == 0) {
734        // If non-static method, mark "this" as non-null
735        int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
736        ssa_regs_to_check->ClearBit(this_reg);
737      }
738    } else if (bb->predecessors->Size() == 1) {
739      BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
740      // pred_bb must have already been processed at least once.
741      DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
742      ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
743      if (pred_bb->block_type == kDalvikByteCode) {
744        // Check to see if predecessor had an explicit null-check.
745        MIR* last_insn = pred_bb->last_mir_insn;
746        if (last_insn != nullptr) {
747          Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
748          if (last_opcode == Instruction::IF_EQZ) {
749            if (pred_bb->fall_through == bb->id) {
750              // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
751              // it can't be null.
752              ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
753            }
754          } else if (last_opcode == Instruction::IF_NEZ) {
755            if (pred_bb->taken == bb->id) {
756              // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
757              // null.
758              ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
759            }
760          }
761        }
762      }
763    } else {
764      // Starting state is union of all incoming arcs
765      GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
766      BasicBlock* pred_bb = GetBasicBlock(iter.Next());
767      CHECK(pred_bb != NULL);
768      while (pred_bb->data_flow_info->ending_check_v == nullptr) {
769        pred_bb = GetBasicBlock(iter.Next());
770        // At least one predecessor must have been processed before this bb.
771        DCHECK(pred_bb != nullptr);
772        DCHECK(pred_bb->data_flow_info != nullptr);
773      }
774      ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
775      while (true) {
776        pred_bb = GetBasicBlock(iter.Next());
777        if (!pred_bb) break;
778        DCHECK(pred_bb->data_flow_info != nullptr);
779        if (pred_bb->data_flow_info->ending_check_v == nullptr) {
780          continue;
781        }
782        ssa_regs_to_check->Union(pred_bb->data_flow_info->ending_check_v);
783      }
784    }
785    // At this point, ssa_regs_to_check shows which sregs have an object definition with
786    // no intervening uses.
787  }
788
789  // Walk through the instruction in the block, updating as necessary
790  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
791    if (mir->ssa_rep == NULL) {
792        continue;
793    }
794
795    // Propagate type info.
796    infer_changed = InferTypeAndSize(bb, mir, infer_changed);
797    if (!do_nce) {
798      continue;
799    }
800
801    uint64_t df_attributes = GetDataFlowAttributes(mir);
802
803    // Might need a null check?
804    if (df_attributes & DF_HAS_NULL_CHKS) {
805      int src_idx;
806      if (df_attributes & DF_NULL_CHK_1) {
807        src_idx = 1;
808      } else if (df_attributes & DF_NULL_CHK_2) {
809        src_idx = 2;
810      } else {
811        src_idx = 0;
812      }
813      int src_sreg = mir->ssa_rep->uses[src_idx];
814      if (!ssa_regs_to_check->IsBitSet(src_sreg)) {
815        // Eliminate the null check.
816        mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
817      } else {
818        // Do the null check.
819        mir->optimization_flags &= ~MIR_IGNORE_NULL_CHECK;
820        // Mark s_reg as null-checked
821        ssa_regs_to_check->ClearBit(src_sreg);
822      }
823    }
824
825    if ((df_attributes & DF_A_WIDE) ||
826        (df_attributes & (DF_REF_A | DF_SETS_CONST | DF_NULL_TRANSFER)) == 0) {
827      continue;
828    }
829
830    /*
831     * First, mark all object definitions as requiring null check.
832     * Note: we can't tell if a CONST definition might be used as an object, so treat
833     * them all as object definitions.
834     */
835    if (((df_attributes & (DF_DA | DF_REF_A)) == (DF_DA | DF_REF_A)) ||
836        (df_attributes & DF_SETS_CONST))  {
837      ssa_regs_to_check->SetBit(mir->ssa_rep->defs[0]);
838    }
839
840    // Now, remove mark from all object definitions we know are non-null.
841    if (df_attributes & DF_NON_NULL_DST) {
842      // Mark target of NEW* as non-null
843      ssa_regs_to_check->ClearBit(mir->ssa_rep->defs[0]);
844    }
845
846    // Mark non-null returns from invoke-style NEW*
847    if (df_attributes & DF_NON_NULL_RET) {
848      MIR* next_mir = mir->next;
849      // Next should be an MOVE_RESULT_OBJECT
850      if (next_mir &&
851          next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
852        // Mark as null checked
853        ssa_regs_to_check->ClearBit(next_mir->ssa_rep->defs[0]);
854      } else {
855        if (next_mir) {
856          LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
857        } else if (bb->fall_through != NullBasicBlockId) {
858          // Look in next basic block
859          struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through);
860          for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
861            tmir =tmir->next) {
862            if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
863              continue;
864            }
865            // First non-pseudo should be MOVE_RESULT_OBJECT
866            if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
867              // Mark as null checked
868              ssa_regs_to_check->ClearBit(tmir->ssa_rep->defs[0]);
869            } else {
870              LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
871            }
872            break;
873          }
874        }
875      }
876    }
877
878    /*
879     * Propagate nullcheck state on register copies (including
880     * Phi pseudo copies.  For the latter, nullcheck state is
881     * the "or" of all the Phi's operands.
882     */
883    if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
884      int tgt_sreg = mir->ssa_rep->defs[0];
885      int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
886          mir->ssa_rep->num_uses;
887      bool needs_null_check = false;
888      for (int i = 0; i < operands; i++) {
889        needs_null_check |= ssa_regs_to_check->IsBitSet(mir->ssa_rep->uses[i]);
890      }
891      if (needs_null_check) {
892        ssa_regs_to_check->SetBit(tgt_sreg);
893      } else {
894        ssa_regs_to_check->ClearBit(tgt_sreg);
895      }
896    }
897  }
898
899  // Did anything change?
900  bool nce_changed = false;
901  if (do_nce) {
902    if (bb->data_flow_info->ending_check_v == nullptr) {
903      DCHECK(temp_scoped_alloc_.get() != nullptr);
904      bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
905          temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
906      nce_changed = ssa_regs_to_check->GetHighestBitSet() != -1;
907      bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
908    } else if (!ssa_regs_to_check->SameBitsSet(bb->data_flow_info->ending_check_v)) {
909      nce_changed = true;
910      bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
911    }
912  }
913  return infer_changed | nce_changed;
914}
915
916void MIRGraph::EliminateNullChecksAndInferTypesEnd() {
917  if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
918    // Clean up temporaries.
919    temp_bit_vector_size_ = 0u;
920    temp_bit_vector_ = nullptr;
921    AllNodesIterator iter(this);
922    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
923      if (bb->data_flow_info != nullptr) {
924        bb->data_flow_info->ending_check_v = nullptr;
925      }
926    }
927    DCHECK(temp_scoped_alloc_.get() != nullptr);
928    temp_scoped_alloc_.reset();
929  }
930}
931
932bool MIRGraph::EliminateClassInitChecksGate() {
933  if ((cu_->disable_opt & (1 << kClassInitCheckElimination)) != 0 ||
934      !cu_->mir_graph->HasStaticFieldAccess()) {
935    return false;
936  }
937
938  if (kIsDebugBuild) {
939    AllNodesIterator iter(this);
940    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
941      CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
942    }
943  }
944
945  DCHECK(temp_scoped_alloc_.get() == nullptr);
946  temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
947
948  // Each insn we use here has at least 2 code units, offset/2 will be a unique index.
949  const size_t end = (cu_->code_item->insns_size_in_code_units_ + 1u) / 2u;
950  temp_insn_data_ = static_cast<uint16_t*>(
951      temp_scoped_alloc_->Alloc(end * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
952
953  uint32_t unique_class_count = 0u;
954  {
955    // Get unique_class_count and store indexes in temp_insn_data_ using a map on a nested
956    // ScopedArenaAllocator.
957
958    // Embed the map value in the entry to save space.
959    struct MapEntry {
960      // Map key: the class identified by the declaring dex file and type index.
961      const DexFile* declaring_dex_file;
962      uint16_t declaring_class_idx;
963      // Map value: index into bit vectors of classes requiring initialization checks.
964      uint16_t index;
965    };
966    struct MapEntryComparator {
967      bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
968        if (lhs.declaring_class_idx != rhs.declaring_class_idx) {
969          return lhs.declaring_class_idx < rhs.declaring_class_idx;
970        }
971        return lhs.declaring_dex_file < rhs.declaring_dex_file;
972      }
973    };
974
975    typedef std::set<MapEntry, MapEntryComparator, ScopedArenaAllocatorAdapter<MapEntry> >
976        ClassToIndexMap;
977
978    ScopedArenaAllocator allocator(&cu_->arena_stack);
979    ClassToIndexMap class_to_index_map(MapEntryComparator(), allocator.Adapter());
980
981    // First, find all SGET/SPUTs that may need class initialization checks, record INVOKE_STATICs.
982    AllNodesIterator iter(this);
983    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
984      for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
985        DCHECK(bb->data_flow_info != nullptr);
986        if (mir->dalvikInsn.opcode >= Instruction::SGET &&
987            mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
988          const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
989          uint16_t index = 0xffffu;
990          if (field_info.IsResolved() && !field_info.IsInitialized()) {
991            DCHECK_LT(class_to_index_map.size(), 0xffffu);
992            MapEntry entry = {
993                field_info.DeclaringDexFile(),
994                field_info.DeclaringClassIndex(),
995                static_cast<uint16_t>(class_to_index_map.size())
996            };
997            index = class_to_index_map.insert(entry).first->index;
998          }
999          // Using offset/2 for index into temp_insn_data_.
1000          temp_insn_data_[mir->offset / 2u] = index;
1001        }
1002      }
1003    }
1004    unique_class_count = static_cast<uint32_t>(class_to_index_map.size());
1005  }
1006
1007  if (unique_class_count == 0u) {
1008    // All SGET/SPUTs refer to initialized classes. Nothing to do.
1009    temp_insn_data_ = nullptr;
1010    temp_scoped_alloc_.reset();
1011    return false;
1012  }
1013
1014  temp_bit_vector_size_ = unique_class_count;
1015  temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
1016      temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
1017  DCHECK_GT(temp_bit_vector_size_, 0u);
1018  return true;
1019}
1020
1021/*
1022 * Eliminate unnecessary class initialization checks for a basic block.
1023 */
1024bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
1025  DCHECK_EQ((cu_->disable_opt & (1 << kClassInitCheckElimination)), 0u);
1026  if (bb->data_flow_info == NULL) {
1027    return false;
1028  }
1029
1030  /*
1031   * Set initial state.  Be conservative with catch
1032   * blocks and start with no assumptions about class init check status.
1033   */
1034  ArenaBitVector* classes_to_check = temp_bit_vector_;
1035  DCHECK(classes_to_check != nullptr);
1036  if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
1037    classes_to_check->SetInitialBits(temp_bit_vector_size_);
1038  } else if (bb->predecessors->Size() == 1) {
1039    BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
1040    // pred_bb must have already been processed at least once.
1041    DCHECK(pred_bb != nullptr);
1042    DCHECK(pred_bb->data_flow_info != nullptr);
1043    DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
1044    classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
1045  } else {
1046    // Starting state is union of all incoming arcs
1047    GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
1048    BasicBlock* pred_bb = GetBasicBlock(iter.Next());
1049    DCHECK(pred_bb != NULL);
1050    DCHECK(pred_bb->data_flow_info != NULL);
1051    while (pred_bb->data_flow_info->ending_check_v == nullptr) {
1052      pred_bb = GetBasicBlock(iter.Next());
1053      // At least one predecessor must have been processed before this bb.
1054      DCHECK(pred_bb != nullptr);
1055      DCHECK(pred_bb->data_flow_info != nullptr);
1056    }
1057    classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
1058    while (true) {
1059      pred_bb = GetBasicBlock(iter.Next());
1060      if (!pred_bb) break;
1061      DCHECK(pred_bb->data_flow_info != nullptr);
1062      if (pred_bb->data_flow_info->ending_check_v == nullptr) {
1063        continue;
1064      }
1065      classes_to_check->Union(pred_bb->data_flow_info->ending_check_v);
1066    }
1067  }
1068  // At this point, classes_to_check shows which classes need clinit checks.
1069
1070  // Walk through the instruction in the block, updating as necessary
1071  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1072    if (mir->dalvikInsn.opcode >= Instruction::SGET &&
1073        mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
1074      uint16_t index = temp_insn_data_[mir->offset / 2u];
1075      if (index != 0xffffu) {
1076        if (mir->dalvikInsn.opcode >= Instruction::SGET &&
1077            mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
1078          if (!classes_to_check->IsBitSet(index)) {
1079            // Eliminate the class init check.
1080            mir->optimization_flags |= MIR_IGNORE_CLINIT_CHECK;
1081          } else {
1082            // Do the class init check.
1083            mir->optimization_flags &= ~MIR_IGNORE_CLINIT_CHECK;
1084          }
1085        }
1086        // Mark the class as initialized.
1087        classes_to_check->ClearBit(index);
1088      }
1089    }
1090  }
1091
1092  // Did anything change?
1093  bool changed = false;
1094  if (bb->data_flow_info->ending_check_v == nullptr) {
1095    DCHECK(temp_scoped_alloc_.get() != nullptr);
1096    DCHECK(bb->data_flow_info != nullptr);
1097    bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
1098        temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
1099    changed = classes_to_check->GetHighestBitSet() != -1;
1100    bb->data_flow_info->ending_check_v->Copy(classes_to_check);
1101  } else if (!classes_to_check->Equal(bb->data_flow_info->ending_check_v)) {
1102    changed = true;
1103    bb->data_flow_info->ending_check_v->Copy(classes_to_check);
1104  }
1105  return changed;
1106}
1107
1108void MIRGraph::EliminateClassInitChecksEnd() {
1109  // Clean up temporaries.
1110  temp_bit_vector_size_ = 0u;
1111  temp_bit_vector_ = nullptr;
1112  AllNodesIterator iter(this);
1113  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1114    if (bb->data_flow_info != nullptr) {
1115      bb->data_flow_info->ending_check_v = nullptr;
1116    }
1117  }
1118
1119  DCHECK(temp_insn_data_ != nullptr);
1120  temp_insn_data_ = nullptr;
1121  DCHECK(temp_scoped_alloc_.get() != nullptr);
1122  temp_scoped_alloc_.reset();
1123}
1124
1125void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput) {
1126  uint32_t method_index = invoke->meta.method_lowering_info;
1127  if (temp_bit_vector_->IsBitSet(method_index)) {
1128    iget_or_iput->meta.ifield_lowering_info = temp_insn_data_[method_index];
1129    DCHECK_EQ(field_idx, GetIFieldLoweringInfo(iget_or_iput).FieldIndex());
1130    return;
1131  }
1132
1133  const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(invoke);
1134  MethodReference target = method_info.GetTargetMethod();
1135  DexCompilationUnit inlined_unit(
1136      cu_, cu_->class_loader, cu_->class_linker, *target.dex_file,
1137      nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index,
1138      0u /* access_flags not used */, nullptr /* verified_method not used */);
1139  MirIFieldLoweringInfo inlined_field_info(field_idx);
1140  MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
1141  DCHECK(inlined_field_info.IsResolved());
1142
1143  uint32_t field_info_index = ifield_lowering_infos_.Size();
1144  ifield_lowering_infos_.Insert(inlined_field_info);
1145  temp_bit_vector_->SetBit(method_index);
1146  temp_insn_data_[method_index] = field_info_index;
1147  iget_or_iput->meta.ifield_lowering_info = field_info_index;
1148}
1149
1150bool MIRGraph::InlineCallsGate() {
1151  if ((cu_->disable_opt & (1 << kSuppressMethodInlining)) != 0 ||
1152      method_lowering_infos_.Size() == 0u) {
1153    return false;
1154  }
1155  if (cu_->compiler_driver->GetMethodInlinerMap() == nullptr) {
1156    // This isn't the Quick compiler.
1157    return false;
1158  }
1159  return true;
1160}
1161
1162void MIRGraph::InlineCallsStart() {
1163  // Prepare for inlining getters/setters. Since we're inlining at most 1 IGET/IPUT from
1164  // each INVOKE, we can index the data by the MIR::meta::method_lowering_info index.
1165
1166  DCHECK(temp_scoped_alloc_.get() == nullptr);
1167  temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
1168  temp_bit_vector_size_ = method_lowering_infos_.Size();
1169  temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
1170      temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapMisc);
1171  temp_bit_vector_->ClearAllBits();
1172  temp_insn_data_ = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
1173      temp_bit_vector_size_ * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
1174}
1175
1176void MIRGraph::InlineCalls(BasicBlock* bb) {
1177  if (bb->block_type != kDalvikByteCode) {
1178    return;
1179  }
1180  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
1181    if (!(Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke)) {
1182      continue;
1183    }
1184    const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
1185    if (!method_info.FastPath()) {
1186      continue;
1187    }
1188    InvokeType sharp_type = method_info.GetSharpType();
1189    if ((sharp_type != kDirect) &&
1190        (sharp_type != kStatic || method_info.NeedsClassInitialization())) {
1191      continue;
1192    }
1193    DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1194    MethodReference target = method_info.GetTargetMethod();
1195    if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(target.dex_file)
1196            ->GenInline(this, bb, mir, target.dex_method_index)) {
1197      if (cu_->verbose) {
1198        LOG(INFO) << "In \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
1199            << "\" @0x" << std::hex << mir->offset
1200            << " inlined " << method_info.GetInvokeType() << " (" << sharp_type << ") call to \""
1201            << PrettyMethod(target.dex_method_index, *target.dex_file) << "\"";
1202      }
1203    }
1204  }
1205}
1206
1207void MIRGraph::InlineCallsEnd() {
1208  DCHECK(temp_insn_data_ != nullptr);
1209  temp_insn_data_ = nullptr;
1210  DCHECK(temp_bit_vector_ != nullptr);
1211  temp_bit_vector_ = nullptr;
1212  DCHECK(temp_scoped_alloc_.get() != nullptr);
1213  temp_scoped_alloc_.reset();
1214}
1215
1216void MIRGraph::DumpCheckStats() {
1217  Checkstats* stats =
1218      static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
1219  checkstats_ = stats;
1220  AllNodesIterator iter(this);
1221  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
1222    CountChecks(bb);
1223  }
1224  if (stats->null_checks > 0) {
1225    float eliminated = static_cast<float>(stats->null_checks_eliminated);
1226    float checks = static_cast<float>(stats->null_checks);
1227    LOG(INFO) << "Null Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
1228              << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
1229              << (eliminated/checks) * 100.0 << "%";
1230    }
1231  if (stats->range_checks > 0) {
1232    float eliminated = static_cast<float>(stats->range_checks_eliminated);
1233    float checks = static_cast<float>(stats->range_checks);
1234    LOG(INFO) << "Range Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
1235              << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
1236              << (eliminated/checks) * 100.0 << "%";
1237  }
1238}
1239
1240bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) {
1241  if (bb->visited) return false;
1242  if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
1243      || (bb->block_type == kExitBlock))) {
1244    // Ignore special blocks
1245    bb->visited = true;
1246    return false;
1247  }
1248  // Must be head of extended basic block.
1249  BasicBlock* start_bb = bb;
1250  extended_basic_blocks_.push_back(bb->id);
1251  bool terminated_by_return = false;
1252  bool do_local_value_numbering = false;
1253  // Visit blocks strictly dominated by this head.
1254  while (bb != NULL) {
1255    bb->visited = true;
1256    terminated_by_return |= bb->terminated_by_return;
1257    do_local_value_numbering |= bb->use_lvn;
1258    bb = NextDominatedBlock(bb);
1259  }
1260  if (terminated_by_return || do_local_value_numbering) {
1261    // Do lvn for all blocks in this extended set.
1262    bb = start_bb;
1263    while (bb != NULL) {
1264      bb->use_lvn = do_local_value_numbering;
1265      bb->dominates_return = terminated_by_return;
1266      bb = NextDominatedBlock(bb);
1267    }
1268  }
1269  return false;  // Not iterative - return value will be ignored
1270}
1271
1272void MIRGraph::BasicBlockOptimization() {
1273  if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
1274    ClearAllVisitedFlags();
1275    PreOrderDfsIterator iter2(this);
1276    for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
1277      BuildExtendedBBList(bb);
1278    }
1279    // Perform extended basic block optimizations.
1280    for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
1281      BasicBlockOpt(GetBasicBlock(extended_basic_blocks_[i]));
1282    }
1283  } else {
1284    PreOrderDfsIterator iter(this);
1285    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
1286      BasicBlockOpt(bb);
1287    }
1288  }
1289}
1290
1291}  // namespace art
1292