mir_graph.h revision 060e6febbe2db5e1d754d2743d6534b217d868fe
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_DEX_MIR_GRAPH_H_
18#define ART_COMPILER_DEX_MIR_GRAPH_H_
19
20#include <stdint.h>
21
22#include "dex_file.h"
23#include "dex_instruction.h"
24#include "compiler_ir.h"
25#include "invoke_type.h"
26#include "mir_field_info.h"
27#include "mir_method_info.h"
28#include "utils/arena_bit_vector.h"
29#include "utils/growable_array.h"
30#include "reg_storage.h"
31
32namespace art {
33
34enum InstructionAnalysisAttributePos {
35  kUninterestingOp = 0,
36  kArithmeticOp,
37  kFPOp,
38  kSingleOp,
39  kDoubleOp,
40  kIntOp,
41  kLongOp,
42  kBranchOp,
43  kInvokeOp,
44  kArrayOp,
45  kHeavyweightOp,
46  kSimpleConstOp,
47  kMoveOp,
48  kSwitch
49};
50
51#define AN_NONE (1 << kUninterestingOp)
52#define AN_MATH (1 << kArithmeticOp)
53#define AN_FP (1 << kFPOp)
54#define AN_LONG (1 << kLongOp)
55#define AN_INT (1 << kIntOp)
56#define AN_SINGLE (1 << kSingleOp)
57#define AN_DOUBLE (1 << kDoubleOp)
58#define AN_FLOATMATH (1 << kFPOp)
59#define AN_BRANCH (1 << kBranchOp)
60#define AN_INVOKE (1 << kInvokeOp)
61#define AN_ARRAYOP (1 << kArrayOp)
62#define AN_HEAVYWEIGHT (1 << kHeavyweightOp)
63#define AN_SIMPLECONST (1 << kSimpleConstOp)
64#define AN_MOVE (1 << kMoveOp)
65#define AN_SWITCH (1 << kSwitch)
66#define AN_COMPUTATIONAL (AN_MATH | AN_ARRAYOP | AN_MOVE | AN_SIMPLECONST)
67
68enum DataFlowAttributePos {
69  kUA = 0,
70  kUB,
71  kUC,
72  kAWide,
73  kBWide,
74  kCWide,
75  kDA,
76  kIsMove,
77  kSetsConst,
78  kFormat35c,
79  kFormat3rc,
80  kNullCheckSrc0,        // Null check of uses[0].
81  kNullCheckSrc1,        // Null check of uses[1].
82  kNullCheckSrc2,        // Null check of uses[2].
83  kNullCheckOut0,        // Null check out outgoing arg0.
84  kDstNonNull,           // May assume dst is non-null.
85  kRetNonNull,           // May assume retval is non-null.
86  kNullTransferSrc0,     // Object copy src[0] -> dst.
87  kNullTransferSrcN,     // Phi null check state transfer.
88  kRangeCheckSrc1,       // Range check of uses[1].
89  kRangeCheckSrc2,       // Range check of uses[2].
90  kRangeCheckSrc3,       // Range check of uses[3].
91  kFPA,
92  kFPB,
93  kFPC,
94  kCoreA,
95  kCoreB,
96  kCoreC,
97  kRefA,
98  kRefB,
99  kRefC,
100  kUsesMethodStar,       // Implicit use of Method*.
101  kUsesIField,           // Accesses an instance field (IGET/IPUT).
102  kUsesSField,           // Accesses a static field (SGET/SPUT).
103  kDoLVN,                // Worth computing local value numbers.
104};
105
106#define DF_NOP                  UINT64_C(0)
107#define DF_UA                   (UINT64_C(1) << kUA)
108#define DF_UB                   (UINT64_C(1) << kUB)
109#define DF_UC                   (UINT64_C(1) << kUC)
110#define DF_A_WIDE               (UINT64_C(1) << kAWide)
111#define DF_B_WIDE               (UINT64_C(1) << kBWide)
112#define DF_C_WIDE               (UINT64_C(1) << kCWide)
113#define DF_DA                   (UINT64_C(1) << kDA)
114#define DF_IS_MOVE              (UINT64_C(1) << kIsMove)
115#define DF_SETS_CONST           (UINT64_C(1) << kSetsConst)
116#define DF_FORMAT_35C           (UINT64_C(1) << kFormat35c)
117#define DF_FORMAT_3RC           (UINT64_C(1) << kFormat3rc)
118#define DF_NULL_CHK_0           (UINT64_C(1) << kNullCheckSrc0)
119#define DF_NULL_CHK_1           (UINT64_C(1) << kNullCheckSrc1)
120#define DF_NULL_CHK_2           (UINT64_C(1) << kNullCheckSrc2)
121#define DF_NULL_CHK_OUT0        (UINT64_C(1) << kNullCheckOut0)
122#define DF_NON_NULL_DST         (UINT64_C(1) << kDstNonNull)
123#define DF_NON_NULL_RET         (UINT64_C(1) << kRetNonNull)
124#define DF_NULL_TRANSFER_0      (UINT64_C(1) << kNullTransferSrc0)
125#define DF_NULL_TRANSFER_N      (UINT64_C(1) << kNullTransferSrcN)
126#define DF_RANGE_CHK_1          (UINT64_C(1) << kRangeCheckSrc1)
127#define DF_RANGE_CHK_2          (UINT64_C(1) << kRangeCheckSrc2)
128#define DF_RANGE_CHK_3          (UINT64_C(1) << kRangeCheckSrc3)
129#define DF_FP_A                 (UINT64_C(1) << kFPA)
130#define DF_FP_B                 (UINT64_C(1) << kFPB)
131#define DF_FP_C                 (UINT64_C(1) << kFPC)
132#define DF_CORE_A               (UINT64_C(1) << kCoreA)
133#define DF_CORE_B               (UINT64_C(1) << kCoreB)
134#define DF_CORE_C               (UINT64_C(1) << kCoreC)
135#define DF_REF_A                (UINT64_C(1) << kRefA)
136#define DF_REF_B                (UINT64_C(1) << kRefB)
137#define DF_REF_C                (UINT64_C(1) << kRefC)
138#define DF_UMS                  (UINT64_C(1) << kUsesMethodStar)
139#define DF_IFIELD               (UINT64_C(1) << kUsesIField)
140#define DF_SFIELD               (UINT64_C(1) << kUsesSField)
141#define DF_LVN                  (UINT64_C(1) << kDoLVN)
142
143#define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
144
145#define DF_HAS_DEFS             (DF_DA)
146
147#define DF_HAS_NULL_CHKS        (DF_NULL_CHK_0 | \
148                                 DF_NULL_CHK_1 | \
149                                 DF_NULL_CHK_2 | \
150                                 DF_NULL_CHK_OUT0)
151
152#define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_1 | \
153                                 DF_RANGE_CHK_2 | \
154                                 DF_RANGE_CHK_3)
155
156#define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
157                                 DF_HAS_RANGE_CHKS)
158
159#define DF_A_IS_REG             (DF_UA | DF_DA)
160#define DF_B_IS_REG             (DF_UB)
161#define DF_C_IS_REG             (DF_UC)
162#define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
163#define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
164#define DF_NULL_TRANSFER        (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)
165enum OatMethodAttributes {
166  kIsLeaf,            // Method is leaf.
167  kHasLoop,           // Method contains simple loop.
168};
169
170#define METHOD_IS_LEAF          (1 << kIsLeaf)
171#define METHOD_HAS_LOOP         (1 << kHasLoop)
172
173// Minimum field size to contain Dalvik v_reg number.
174#define VREG_NUM_WIDTH 16
175
176#define INVALID_SREG (-1)
177#define INVALID_VREG (0xFFFFU)
178#define INVALID_OFFSET (0xDEADF00FU)
179
180#define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
181#define MIR_NULL_CHECK_ONLY             (1 << kMIRNullCheckOnly)
182#define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
183#define MIR_RANGE_CHECK_ONLY            (1 << kMIRRangeCheckOnly)
184#define MIR_IGNORE_CLINIT_CHECK         (1 << kMIRIgnoreClInitCheck)
185#define MIR_INLINED                     (1 << kMIRInlined)
186#define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
187#define MIR_CALLEE                      (1 << kMIRCallee)
188#define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
189#define MIR_DUP                         (1 << kMIRDup)
190
191#define BLOCK_NAME_LEN 80
192
193typedef uint16_t BasicBlockId;
194static const BasicBlockId NullBasicBlockId = 0;
195static constexpr bool kLeafOptimization = false;
196
197/*
198 * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
199 * it is useful to have compiler-generated temporary registers and have them treated
200 * in the same manner as dx-generated virtual registers.  This struct records the SSA
201 * name of compiler-introduced temporaries.
202 */
203struct CompilerTemp {
204  int32_t v_reg;      // Virtual register number for temporary.
205  int32_t s_reg_low;  // SSA name for low Dalvik word.
206};
207
208enum CompilerTempType {
209  kCompilerTempVR,                // A virtual register temporary.
210  kCompilerTempSpecialMethodPtr,  // Temporary that keeps track of current method pointer.
211};
212
213// When debug option enabled, records effectiveness of null and range check elimination.
214struct Checkstats {
215  int32_t null_checks;
216  int32_t null_checks_eliminated;
217  int32_t range_checks;
218  int32_t range_checks_eliminated;
219};
220
221// Dataflow attributes of a basic block.
222struct BasicBlockDataFlow {
223  ArenaBitVector* use_v;
224  ArenaBitVector* def_v;
225  ArenaBitVector* live_in_v;
226  ArenaBitVector* phi_v;
227  int32_t* vreg_to_ssa_map_exit;
228  ArenaBitVector* ending_check_v;  // For null check and class init check elimination.
229};
230
231/*
232 * Normalized use/def for a MIR operation using SSA names rather than vregs.  Note that
233 * uses/defs retain the Dalvik convention that long operations operate on a pair of 32-bit
234 * vregs.  For example, "ADD_LONG v0, v2, v3" would have 2 defs (v0/v1) and 4 uses (v2/v3, v4/v5).
235 * Following SSA renaming, this is the primary struct used by code generators to locate
236 * operand and result registers.  This is a somewhat confusing and unhelpful convention that
237 * we may want to revisit in the future.
238 *
239 * TODO:
240 *  1. Add accessors for uses/defs and make data private
241 *  2. Change fp_use/fp_def to a bit array (could help memory usage)
242 *  3. Combine array storage into internal array and handled via accessors from 1.
243 */
244struct SSARepresentation {
245  int32_t* uses;
246  bool* fp_use;
247  int32_t* defs;
248  bool* fp_def;
249  int16_t num_uses_allocated;
250  int16_t num_defs_allocated;
251  int16_t num_uses;
252  int16_t num_defs;
253
254  static uint32_t GetStartUseIndex(Instruction::Code opcode);
255};
256
257/*
258 * The Midlevel Intermediate Representation node, which may be largely considered a
259 * wrapper around a Dalvik byte code.
260 */
261struct MIR {
262  /*
263   * TODO: remove embedded DecodedInstruction to save space, keeping only opcode.  Recover
264   * additional fields on as-needed basis.  Question: how to support MIR Pseudo-ops; probably
265   * need to carry aux data pointer.
266   */
267  struct DecodedInstruction {
268    uint32_t vA;
269    uint32_t vB;
270    uint64_t vB_wide;        /* for k51l */
271    uint32_t vC;
272    uint32_t arg[5];         /* vC/D/E/F/G in invoke or filled-new-array */
273    Instruction::Code opcode;
274
275    explicit DecodedInstruction():vA(0), vB(0), vB_wide(0), vC(0), opcode(Instruction::NOP) {
276    }
277
278    /*
279     * Given a decoded instruction representing a const bytecode, it updates
280     * the out arguments with proper values as dictated by the constant bytecode.
281     */
282    bool GetConstant(int64_t* ptr_value, bool* wide) const;
283
284    bool IsStore() const {
285      return ((Instruction::FlagsOf(opcode) & Instruction::kStore) == Instruction::kStore);
286    }
287
288    bool IsLoad() const {
289      return ((Instruction::FlagsOf(opcode) & Instruction::kLoad) == Instruction::kLoad);
290    }
291
292    bool IsConditionalBranch() const {
293      return (Instruction::FlagsOf(opcode) == (Instruction::kContinue | Instruction::kBranch));
294    }
295
296    /**
297     * @brief Is the register C component of the decoded instruction a constant?
298     */
299    bool IsCFieldOrConstant() const {
300      return ((Instruction::FlagsOf(opcode) & Instruction::kRegCFieldOrConstant) == Instruction::kRegCFieldOrConstant);
301    }
302
303    /**
304     * @brief Is the register C component of the decoded instruction a constant?
305     */
306    bool IsBFieldOrConstant() const {
307      return ((Instruction::FlagsOf(opcode) & Instruction::kRegBFieldOrConstant) == Instruction::kRegBFieldOrConstant);
308    }
309
310    bool IsCast() const {
311      return ((Instruction::FlagsOf(opcode) & Instruction::kCast) == Instruction::kCast);
312    }
313
314    /**
315     * @brief Does the instruction clobber memory?
316     * @details Clobber means that the instruction changes the memory not in a punctual way.
317     *          Therefore any supposition on memory aliasing or memory contents should be disregarded
318     *            when crossing such an instruction.
319     */
320    bool Clobbers() const {
321      return ((Instruction::FlagsOf(opcode) & Instruction::kClobber) == Instruction::kClobber);
322    }
323
324    bool IsLinear() const {
325      return (Instruction::FlagsOf(opcode) & (Instruction::kAdd | Instruction::kSubtract)) != 0;
326    }
327  } dalvikInsn;
328
329  NarrowDexOffset offset;         // Offset of the instruction in code units.
330  uint16_t optimization_flags;
331  int16_t m_unit_index;           // From which method was this MIR included
332  BasicBlockId bb;
333  MIR* next;
334  SSARepresentation* ssa_rep;
335  union {
336    // Incoming edges for phi node.
337    BasicBlockId* phi_incoming;
338    // Establish link from check instruction (kMirOpCheck) to the actual throwing instruction.
339    MIR* throw_insn;
340    // Branch condition for fused cmp or select.
341    ConditionCode ccode;
342    // IGET/IPUT lowering info index, points to MIRGraph::ifield_lowering_infos_. Due to limit on
343    // the number of code points (64K) and size of IGET/IPUT insn (2), this will never exceed 32K.
344    uint32_t ifield_lowering_info;
345    // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
346    // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
347    uint32_t sfield_lowering_info;
348    // INVOKE data index, points to MIRGraph::method_lowering_infos_.
349    uint32_t method_lowering_info;
350  } meta;
351
352  explicit MIR():offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId),
353                 next(nullptr), ssa_rep(nullptr) {
354    memset(&meta, 0, sizeof(meta));
355  }
356
357  uint32_t GetStartUseIndex() const {
358    return SSARepresentation::GetStartUseIndex(dalvikInsn.opcode);
359  }
360
361  MIR* Copy(CompilationUnit *c_unit);
362  MIR* Copy(MIRGraph* mir_Graph);
363
364  static void* operator new(size_t size, ArenaAllocator* arena) {
365    return arena->Alloc(sizeof(MIR), kArenaAllocMIR);
366  }
367  static void operator delete(void* p) {}  // Nop.
368};
369
370struct SuccessorBlockInfo;
371
372struct BasicBlock {
373  BasicBlockId id;
374  BasicBlockId dfs_id;
375  NarrowDexOffset start_offset;     // Offset in code units.
376  BasicBlockId fall_through;
377  BasicBlockId taken;
378  BasicBlockId i_dom;               // Immediate dominator.
379  uint16_t nesting_depth;
380  BBType block_type:4;
381  BlockListType successor_block_list_type:4;
382  bool visited:1;
383  bool hidden:1;
384  bool catch_entry:1;
385  bool explicit_throw:1;
386  bool conditional_branch:1;
387  bool terminated_by_return:1;  // Block ends with a Dalvik return opcode.
388  bool dominates_return:1;      // Is a member of return extended basic block.
389  bool use_lvn:1;               // Run local value numbering on this block.
390  MIR* first_mir_insn;
391  MIR* last_mir_insn;
392  BasicBlockDataFlow* data_flow_info;
393  ArenaBitVector* dominators;
394  ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
395  ArenaBitVector* dom_frontier;     // Dominance frontier.
396  GrowableArray<BasicBlockId>* predecessors;
397  GrowableArray<SuccessorBlockInfo*>* successor_blocks;
398
399  void AppendMIR(MIR* mir);
400  void AppendMIRList(MIR* first_list_mir, MIR* last_list_mir);
401  void AppendMIRList(const std::vector<MIR*>& insns);
402  void PrependMIR(MIR* mir);
403  void PrependMIRList(MIR* first_list_mir, MIR* last_list_mir);
404  void PrependMIRList(const std::vector<MIR*>& to_add);
405  void InsertMIRAfter(MIR* current_mir, MIR* new_mir);
406  void InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir);
407  MIR* FindPreviousMIR(MIR* mir);
408  void InsertMIRBefore(MIR* insert_before, MIR* list);
409  void InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir);
410  bool RemoveMIR(MIR* mir);
411  bool RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir);
412
413  BasicBlock* Copy(CompilationUnit* c_unit);
414  BasicBlock* Copy(MIRGraph* mir_graph);
415
416  /**
417   * @brief Reset the optimization_flags field of each MIR.
418   */
419  void ResetOptimizationFlags(uint16_t reset_flags);
420
421  /**
422   * @brief Hide the BasicBlock.
423   * @details Set it to kDalvikByteCode, set hidden to true, remove all MIRs,
424   *          remove itself from any predecessor edges, remove itself from any
425   *          child's predecessor growable array.
426   */
427  void Hide(CompilationUnit* c_unit);
428
429  /**
430   * @brief Is ssa_reg the last SSA definition of that VR in the block?
431   */
432  bool IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg);
433
434  /**
435   * @brief Replace the edge going to old_bb to now go towards new_bb.
436   */
437  bool ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb);
438
439  /**
440   * @brief Update the predecessor growable array from old_pred to new_pred.
441   */
442  void UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred);
443
444  /**
445   * @brief Used to obtain the next MIR that follows unconditionally.
446   * @details The implementation does not guarantee that a MIR does not
447   * follow even if this method returns nullptr.
448   * @param mir_graph the MIRGraph.
449   * @param current The MIR for which to find an unconditional follower.
450   * @return Returns the following MIR if one can be found.
451   */
452  MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
453  bool IsExceptionBlock() const;
454
455  static void* operator new(size_t size, ArenaAllocator* arena) {
456    return arena->Alloc(sizeof(BasicBlock), kArenaAllocBB);
457  }
458  static void operator delete(void* p) {}  // Nop.
459};
460
461/*
462 * The "blocks" field in "successor_block_list" points to an array of elements with the type
463 * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For switch
464 * blocks, key is the case value.
465 */
466struct SuccessorBlockInfo {
467  BasicBlockId block;
468  int key;
469};
470
471/**
472 * @class ChildBlockIterator
473 * @brief Enable an easy iteration of the children.
474 */
475class ChildBlockIterator {
476 public:
477  /**
478   * @brief Constructs a child iterator.
479   * @param bb The basic whose children we need to iterate through.
480   * @param mir_graph The MIRGraph used to get the basic block during iteration.
481   */
482  ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph);
483  BasicBlock* Next();
484
485 private:
486  BasicBlock* basic_block_;
487  MIRGraph* mir_graph_;
488  bool visited_fallthrough_;
489  bool visited_taken_;
490  bool have_successors_;
491  GrowableArray<SuccessorBlockInfo*>::Iterator successor_iter_;
492};
493
494/*
495 * Whereas a SSA name describes a definition of a Dalvik vreg, the RegLocation describes
496 * the type of an SSA name (and, can also be used by code generators to record where the
497 * value is located (i.e. - physical register, frame, spill, etc.).  For each SSA name (SReg)
498 * there is a RegLocation.
499 * A note on SSA names:
500 *   o SSA names for Dalvik vRegs v0..vN will be assigned 0..N.  These represent the "vN_0"
501 *     names.  Negative SSA names represent special values not present in the Dalvik byte code.
502 *     For example, SSA name -1 represents an invalid SSA name, and SSA name -2 represents the
503 *     the Method pointer.  SSA names < -2 are reserved for future use.
504 *   o The vN_0 names for non-argument Dalvik should in practice never be used (as they would
505 *     represent the read of an undefined local variable).  The first definition of the
506 *     underlying Dalvik vReg will result in a vN_1 name.
507 *
508 * FIXME: The orig_sreg field was added as a workaround for llvm bitcode generation.  With
509 * the latest restructuring, we should be able to remove it and rely on s_reg_low throughout.
510 */
511struct RegLocation {
512  RegLocationType location:3;
513  unsigned wide:1;
514  unsigned defined:1;   // Do we know the type?
515  unsigned is_const:1;  // Constant, value in mir_graph->constant_values[].
516  unsigned fp:1;        // Floating point?
517  unsigned core:1;      // Non-floating point?
518  unsigned ref:1;       // Something GC cares about.
519  unsigned high_word:1;  // High word of pair?
520  unsigned home:1;      // Does this represent the home location?
521  RegStorage reg;       // Encoded physical registers.
522  int16_t s_reg_low;    // SSA name for low Dalvik word.
523  int16_t orig_sreg;    // TODO: remove after Bitcode gen complete
524                        // and consolidate usage w/ s_reg_low.
525};
526
527/*
528 * Collection of information describing an invoke, and the destination of
529 * the subsequent MOVE_RESULT (if applicable).  Collected as a unit to enable
530 * more efficient invoke code generation.
531 */
532struct CallInfo {
533  int num_arg_words;    // Note: word count, not arg count.
534  RegLocation* args;    // One for each word of arguments.
535  RegLocation result;   // Eventual target of MOVE_RESULT.
536  int opt_flags;
537  InvokeType type;
538  uint32_t dex_idx;
539  uint32_t index;       // Method idx for invokes, type idx for FilledNewArray.
540  uintptr_t direct_code;
541  uintptr_t direct_method;
542  RegLocation target;    // Target of following move_result.
543  bool skip_this;
544  bool is_range;
545  DexOffset offset;      // Offset in code units.
546  MIR* mir;
547};
548
549
550const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, RegStorage(), INVALID_SREG,
551                             INVALID_SREG};
552
553class MIRGraph {
554 public:
555  MIRGraph(CompilationUnit* cu, ArenaAllocator* arena);
556  ~MIRGraph();
557
558  /*
559   * Examine the graph to determine whether it's worthwile to spend the time compiling
560   * this method.
561   */
562  bool SkipCompilation(std::string* skip_message);
563
564  /*
565   * Should we skip the compilation of this method based on its name?
566   */
567  bool SkipCompilationByName(const std::string& methodname);
568
569  /*
570   * Parse dex method and add MIR at current insert point.  Returns id (which is
571   * actually the index of the method in the m_units_ array).
572   */
573  void InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
574                    InvokeType invoke_type, uint16_t class_def_idx,
575                    uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
576
577  /* Find existing block */
578  BasicBlock* FindBlock(DexOffset code_offset) {
579    return FindBlock(code_offset, false, false, NULL);
580  }
581
582  const uint16_t* GetCurrentInsns() const {
583    return current_code_item_->insns_;
584  }
585
586  const uint16_t* GetInsns(int m_unit_index) const {
587    return m_units_[m_unit_index]->GetCodeItem()->insns_;
588  }
589
590  unsigned int GetNumBlocks() const {
591    return num_blocks_;
592  }
593
594  size_t GetNumDalvikInsns() const {
595    return cu_->code_item->insns_size_in_code_units_;
596  }
597
598  ArenaBitVector* GetTryBlockAddr() const {
599    return try_block_addr_;
600  }
601
602  BasicBlock* GetEntryBlock() const {
603    return entry_block_;
604  }
605
606  BasicBlock* GetExitBlock() const {
607    return exit_block_;
608  }
609
610  BasicBlock* GetBasicBlock(unsigned int block_id) const {
611    return (block_id == NullBasicBlockId) ? NULL : block_list_.Get(block_id);
612  }
613
614  size_t GetBasicBlockListCount() const {
615    return block_list_.Size();
616  }
617
618  GrowableArray<BasicBlock*>* GetBlockList() {
619    return &block_list_;
620  }
621
622  GrowableArray<BasicBlockId>* GetDfsOrder() {
623    return dfs_order_;
624  }
625
626  GrowableArray<BasicBlockId>* GetDfsPostOrder() {
627    return dfs_post_order_;
628  }
629
630  GrowableArray<BasicBlockId>* GetDomPostOrder() {
631    return dom_post_order_traversal_;
632  }
633
634  int GetDefCount() const {
635    return def_count_;
636  }
637
638  ArenaAllocator* GetArena() {
639    return arena_;
640  }
641
642  void EnableOpcodeCounting() {
643    opcode_count_ = static_cast<int*>(arena_->Alloc(kNumPackedOpcodes * sizeof(int),
644                                                    kArenaAllocMisc));
645  }
646
647  void ShowOpcodeStats();
648
649  DexCompilationUnit* GetCurrentDexCompilationUnit() const {
650    return m_units_[current_method_];
651  }
652
653  /**
654   * @brief Dump a CFG into a dot file format.
655   * @param dir_prefix the directory the file will be created in.
656   * @param all_blocks does the dumper use all the basic blocks or use the reachable blocks.
657   * @param suffix does the filename require a suffix or not (default = nullptr).
658   */
659  void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
660
661  bool HasFieldAccess() const {
662    return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
663  }
664
665  bool HasStaticFieldAccess() const {
666    return (merged_df_flags_ & DF_SFIELD) != 0u;
667  }
668
669  bool HasInvokes() const {
670    // NOTE: These formats include the rare filled-new-array/range.
671    return (merged_df_flags_ & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0u;
672  }
673
674  void DoCacheFieldLoweringInfo();
675
676  const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) const {
677    DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.Size());
678    return ifield_lowering_infos_.GetRawStorage()[mir->meta.ifield_lowering_info];
679  }
680
681  const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) const {
682    DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.Size());
683    return sfield_lowering_infos_.GetRawStorage()[mir->meta.sfield_lowering_info];
684  }
685
686  void DoCacheMethodLoweringInfo();
687
688  const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) {
689    DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.Size());
690    return method_lowering_infos_.GetRawStorage()[mir->meta.method_lowering_info];
691  }
692
693  void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
694
695  void InitRegLocations();
696
697  void RemapRegLocations();
698
699  void DumpRegLocTable(RegLocation* table, int count);
700
701  void BasicBlockOptimization();
702
703  GrowableArray<BasicBlockId>* GetTopologicalSortOrder() {
704    return topological_order_;
705  }
706
707  bool IsConst(int32_t s_reg) const {
708    return is_constant_v_->IsBitSet(s_reg);
709  }
710
711  bool IsConst(RegLocation loc) const {
712    return loc.orig_sreg < 0 ? false : IsConst(loc.orig_sreg);
713  }
714
715  int32_t ConstantValue(RegLocation loc) const {
716    DCHECK(IsConst(loc));
717    return constant_values_[loc.orig_sreg];
718  }
719
720  int32_t ConstantValue(int32_t s_reg) const {
721    DCHECK(IsConst(s_reg));
722    return constant_values_[s_reg];
723  }
724
725  int64_t ConstantValueWide(RegLocation loc) const {
726    DCHECK(IsConst(loc));
727    return (static_cast<int64_t>(constant_values_[loc.orig_sreg + 1]) << 32) |
728        Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg]));
729  }
730
731  bool IsConstantNullRef(RegLocation loc) const {
732    return loc.ref && loc.is_const && (ConstantValue(loc) == 0);
733  }
734
735  int GetNumSSARegs() const {
736    return num_ssa_regs_;
737  }
738
739  void SetNumSSARegs(int new_num) {
740     /*
741      * TODO: It's theoretically possible to exceed 32767, though any cases which did
742      * would be filtered out with current settings.  When orig_sreg field is removed
743      * from RegLocation, expand s_reg_low to handle all possible cases and remove DCHECK().
744      */
745    DCHECK_EQ(new_num, static_cast<int16_t>(new_num));
746    num_ssa_regs_ = new_num;
747  }
748
749  unsigned int GetNumReachableBlocks() const {
750    return num_reachable_blocks_;
751  }
752
753  int GetUseCount(int vreg) const {
754    return use_counts_.Get(vreg);
755  }
756
757  int GetRawUseCount(int vreg) const {
758    return raw_use_counts_.Get(vreg);
759  }
760
761  int GetSSASubscript(int ssa_reg) const {
762    return ssa_subscripts_->Get(ssa_reg);
763  }
764
765  RegLocation GetRawSrc(MIR* mir, int num) {
766    DCHECK(num < mir->ssa_rep->num_uses);
767    RegLocation res = reg_location_[mir->ssa_rep->uses[num]];
768    return res;
769  }
770
771  RegLocation GetRawDest(MIR* mir) {
772    DCHECK_GT(mir->ssa_rep->num_defs, 0);
773    RegLocation res = reg_location_[mir->ssa_rep->defs[0]];
774    return res;
775  }
776
777  RegLocation GetDest(MIR* mir) {
778    RegLocation res = GetRawDest(mir);
779    DCHECK(!res.wide);
780    return res;
781  }
782
783  RegLocation GetSrc(MIR* mir, int num) {
784    RegLocation res = GetRawSrc(mir, num);
785    DCHECK(!res.wide);
786    return res;
787  }
788
789  RegLocation GetDestWide(MIR* mir) {
790    RegLocation res = GetRawDest(mir);
791    DCHECK(res.wide);
792    return res;
793  }
794
795  RegLocation GetSrcWide(MIR* mir, int low) {
796    RegLocation res = GetRawSrc(mir, low);
797    DCHECK(res.wide);
798    return res;
799  }
800
801  RegLocation GetBadLoc() {
802    return bad_loc;
803  }
804
805  int GetMethodSReg() const {
806    return method_sreg_;
807  }
808
809  /**
810   * @brief Used to obtain the number of compiler temporaries being used.
811   * @return Returns the number of compiler temporaries.
812   */
813  size_t GetNumUsedCompilerTemps() const {
814    size_t total_num_temps = compiler_temps_.Size();
815    DCHECK_LE(num_non_special_compiler_temps_, total_num_temps);
816    return total_num_temps;
817  }
818
819  /**
820   * @brief Used to obtain the number of non-special compiler temporaries being used.
821   * @return Returns the number of non-special compiler temporaries.
822   */
823  size_t GetNumNonSpecialCompilerTemps() const {
824    return num_non_special_compiler_temps_;
825  }
826
827  /**
828   * @brief Used to set the total number of available non-special compiler temporaries.
829   * @details Can fail setting the new max if there are more temps being used than the new_max.
830   * @param new_max The new maximum number of non-special compiler temporaries.
831   * @return Returns true if the max was set and false if failed to set.
832   */
833  bool SetMaxAvailableNonSpecialCompilerTemps(size_t new_max) {
834    if (new_max < GetNumNonSpecialCompilerTemps()) {
835      return false;
836    } else {
837      max_available_non_special_compiler_temps_ = new_max;
838      return true;
839    }
840  }
841
842  /**
843   * @brief Provides the number of non-special compiler temps available.
844   * @details Even if this returns zero, special compiler temps are guaranteed to be available.
845   * @return Returns the number of available temps.
846   */
847  size_t GetNumAvailableNonSpecialCompilerTemps();
848
849  /**
850   * @brief Used to obtain an existing compiler temporary.
851   * @param index The index of the temporary which must be strictly less than the
852   * number of temporaries.
853   * @return Returns the temporary that was asked for.
854   */
855  CompilerTemp* GetCompilerTemp(size_t index) const {
856    return compiler_temps_.Get(index);
857  }
858
859  /**
860   * @brief Used to obtain the maximum number of compiler temporaries that can be requested.
861   * @return Returns the maximum number of compiler temporaries, whether used or not.
862   */
863  size_t GetMaxPossibleCompilerTemps() const {
864    return max_available_special_compiler_temps_ + max_available_non_special_compiler_temps_;
865  }
866
867  /**
868   * @brief Used to obtain a new unique compiler temporary.
869   * @param ct_type Type of compiler temporary requested.
870   * @param wide Whether we should allocate a wide temporary.
871   * @return Returns the newly created compiler temporary.
872   */
873  CompilerTemp* GetNewCompilerTemp(CompilerTempType ct_type, bool wide);
874
875  bool MethodIsLeaf() {
876    return attributes_ & METHOD_IS_LEAF;
877  }
878
879  RegLocation GetRegLocation(int index) {
880    DCHECK((index >= 0) && (index < num_ssa_regs_));
881    return reg_location_[index];
882  }
883
884  RegLocation GetMethodLoc() {
885    return reg_location_[method_sreg_];
886  }
887
888  bool IsBackedge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
889    return ((target_bb_id != NullBasicBlockId) &&
890            (GetBasicBlock(target_bb_id)->start_offset <= branch_bb->start_offset));
891  }
892
893  bool IsBackwardsBranch(BasicBlock* branch_bb) {
894    return IsBackedge(branch_bb, branch_bb->taken) || IsBackedge(branch_bb, branch_bb->fall_through);
895  }
896
897  void CountBranch(DexOffset target_offset) {
898    if (target_offset <= current_offset_) {
899      backward_branches_++;
900    } else {
901      forward_branches_++;
902    }
903  }
904
905  int GetBranchCount() {
906    return backward_branches_ + forward_branches_;
907  }
908
909  static bool IsPseudoMirOp(Instruction::Code opcode) {
910    return static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst);
911  }
912
913  static bool IsPseudoMirOp(int opcode) {
914    return opcode >= static_cast<int>(kMirOpFirst);
915  }
916
917  // Is this vreg in the in set?
918  bool IsInVReg(int vreg) {
919    return (vreg >= cu_->num_regs);
920  }
921
922  void DumpCheckStats();
923  MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
924  int SRegToVReg(int ssa_reg) const;
925  void VerifyDataflow();
926  void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
927  void EliminateNullChecksAndInferTypesStart();
928  bool EliminateNullChecksAndInferTypes(BasicBlock* bb);
929  void EliminateNullChecksAndInferTypesEnd();
930  bool EliminateClassInitChecksGate();
931  bool EliminateClassInitChecks(BasicBlock* bb);
932  void EliminateClassInitChecksEnd();
933  /*
934   * Type inference handling helpers.  Because Dalvik's bytecode is not fully typed,
935   * we have to do some work to figure out the sreg type.  For some operations it is
936   * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we
937   * may never know the "real" type.
938   *
939   * We perform the type inference operation by using an iterative  walk over
940   * the graph, propagating types "defined" by typed opcodes to uses and defs in
941   * non-typed opcodes (such as MOVE).  The Setxx(index) helpers are used to set defined
942   * types on typed opcodes (such as ADD_INT).  The Setxx(index, is_xx) form is used to
943   * propagate types through non-typed opcodes such as PHI and MOVE.  The is_xx flag
944   * tells whether our guess of the type is based on a previously typed definition.
945   * If so, the defined type takes precedence.  Note that it's possible to have the same sreg
946   * show multiple defined types because dx treats constants as untyped bit patterns.
947   * The return value of the Setxx() helpers says whether or not the Setxx() action changed
948   * the current guess, and is used to know when to terminate the iterative walk.
949   */
950  bool SetFp(int index, bool is_fp);
951  bool SetFp(int index);
952  bool SetCore(int index, bool is_core);
953  bool SetCore(int index);
954  bool SetRef(int index, bool is_ref);
955  bool SetRef(int index);
956  bool SetWide(int index, bool is_wide);
957  bool SetWide(int index);
958  bool SetHigh(int index, bool is_high);
959  bool SetHigh(int index);
960
961  bool PuntToInterpreter() {
962    return punt_to_interpreter_;
963  }
964
965  void SetPuntToInterpreter(bool val) {
966    punt_to_interpreter_ = val;
967  }
968
969  char* GetDalvikDisassembly(const MIR* mir);
970  void ReplaceSpecialChars(std::string& str);
971  std::string GetSSAName(int ssa_reg);
972  std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
973  void GetBlockName(BasicBlock* bb, char* name);
974  const char* GetShortyFromTargetIdx(int);
975  void DumpMIRGraph();
976  CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
977  BasicBlock* NewMemBB(BBType block_type, int block_id);
978  MIR* NewMIR();
979  MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir);
980  BasicBlock* NextDominatedBlock(BasicBlock* bb);
981  bool LayoutBlocks(BasicBlock* bb);
982  void ComputeTopologicalSortOrder();
983  BasicBlock* CreateNewBB(BBType block_type);
984
985  bool InlineCallsGate();
986  void InlineCallsStart();
987  void InlineCalls(BasicBlock* bb);
988  void InlineCallsEnd();
989
990  /**
991   * @brief Perform the initial preparation for the Method Uses.
992   */
993  void InitializeMethodUses();
994
995  /**
996   * @brief Perform the initial preparation for the Constant Propagation.
997   */
998  void InitializeConstantPropagation();
999
1000  /**
1001   * @brief Perform the initial preparation for the SSA Transformation.
1002   */
1003  void SSATransformationStart();
1004
1005  /**
1006   * @brief Insert a the operands for the Phi nodes.
1007   * @param bb the considered BasicBlock.
1008   * @return true
1009   */
1010  bool InsertPhiNodeOperands(BasicBlock* bb);
1011
1012  /**
1013   * @brief Perform the cleanup after the SSA Transformation.
1014   */
1015  void SSATransformationEnd();
1016
1017  /**
1018   * @brief Perform constant propagation on a BasicBlock.
1019   * @param bb the considered BasicBlock.
1020   */
1021  void DoConstantPropagation(BasicBlock* bb);
1022
1023  /**
1024   * @brief Count the uses in the BasicBlock
1025   * @param bb the BasicBlock
1026   */
1027  void CountUses(struct BasicBlock* bb);
1028
1029  static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
1030  static uint64_t GetDataFlowAttributes(MIR* mir);
1031
1032  /**
1033   * @brief Combine BasicBlocks
1034   * @param the BasicBlock we are considering
1035   */
1036  void CombineBlocks(BasicBlock* bb);
1037
1038  void ClearAllVisitedFlags();
1039
1040  void AllocateSSAUseData(MIR *mir, int num_uses);
1041  void AllocateSSADefData(MIR *mir, int num_defs);
1042  void CalculateBasicBlockInformation();
1043  void InitializeBasicBlockData();
1044  void ComputeDFSOrders();
1045  void ComputeDefBlockMatrix();
1046  void ComputeDominators();
1047  void CompilerInitializeSSAConversion();
1048  void InsertPhiNodes();
1049  void DoDFSPreOrderSSARename(BasicBlock* block);
1050
1051  /*
1052   * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
1053   * we can verify that all catch entries have native PC entries.
1054   */
1055  std::set<uint32_t> catches_;
1056
1057  // TODO: make these private.
1058  RegLocation* reg_location_;                         // Map SSA names to location.
1059  SafeMap<unsigned int, unsigned int> block_id_map_;  // Block collapse lookup cache.
1060
1061  static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
1062  static const uint32_t analysis_attributes_[kMirOpLast];
1063
1064  void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
1065  bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
1066
1067  // Used for removing redudant suspend tests
1068  void AppendGenSuspendTestList(BasicBlock* bb) {
1069    if (gen_suspend_test_list_.Size() == 0 ||
1070        gen_suspend_test_list_.Get(gen_suspend_test_list_.Size() - 1) != bb) {
1071      gen_suspend_test_list_.Insert(bb);
1072    }
1073  }
1074
1075  /* This is used to check if there is already a method call dominating the
1076   * source basic block of a backedge and being dominated by the target basic
1077   * block of the backedge.
1078   */
1079  bool HasSuspendTestBetween(BasicBlock* source, BasicBlockId target_id);
1080
1081 protected:
1082  int FindCommonParent(int block1, int block2);
1083  void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
1084                         const ArenaBitVector* src2);
1085  void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
1086                       ArenaBitVector* live_in_v, int dalvik_reg_id);
1087  void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
1088  bool DoSSAConversion(BasicBlock* bb);
1089  bool InvokeUsesMethodStar(MIR* mir);
1090  int ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction);
1091  bool ContentIsInsn(const uint16_t* code_ptr);
1092  BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
1093                         BasicBlock** immed_pred_block_p);
1094  BasicBlock* FindBlock(DexOffset code_offset, bool split, bool create,
1095                        BasicBlock** immed_pred_block_p);
1096  void ProcessTryCatchBlocks();
1097  BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
1098                               int flags, const uint16_t* code_ptr, const uint16_t* code_end);
1099  BasicBlock* ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
1100                               int flags);
1101  BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
1102                              int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
1103                              const uint16_t* code_end);
1104  int AddNewSReg(int v_reg);
1105  void HandleSSAUse(int* uses, int dalvik_reg, int reg_index);
1106  void DataFlowSSAFormat35C(MIR* mir);
1107  void DataFlowSSAFormat3RC(MIR* mir);
1108  bool FindLocalLiveIn(BasicBlock* bb);
1109  bool VerifyPredInfo(BasicBlock* bb);
1110  BasicBlock* NeedsVisit(BasicBlock* bb);
1111  BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
1112  void MarkPreOrder(BasicBlock* bb);
1113  void RecordDFSOrders(BasicBlock* bb);
1114  void ComputeDomPostOrderTraversal(BasicBlock* bb);
1115  void SetConstant(int32_t ssa_reg, int value);
1116  void SetConstantWide(int ssa_reg, int64_t value);
1117  int GetSSAUseCount(int s_reg);
1118  bool BasicBlockOpt(BasicBlock* bb);
1119  bool BuildExtendedBBList(struct BasicBlock* bb);
1120  bool FillDefBlockMatrix(BasicBlock* bb);
1121  void InitializeDominationInfo(BasicBlock* bb);
1122  bool ComputeblockIDom(BasicBlock* bb);
1123  bool ComputeBlockDominators(BasicBlock* bb);
1124  bool SetDominators(BasicBlock* bb);
1125  bool ComputeBlockLiveIns(BasicBlock* bb);
1126  bool ComputeDominanceFrontier(BasicBlock* bb);
1127
1128  void CountChecks(BasicBlock* bb);
1129  void AnalyzeBlock(BasicBlock* bb, struct MethodStats* stats);
1130  bool ComputeSkipCompilation(struct MethodStats* stats, bool skip_default,
1131                              std::string* skip_message);
1132
1133  CompilationUnit* const cu_;
1134  GrowableArray<int>* ssa_base_vregs_;
1135  GrowableArray<int>* ssa_subscripts_;
1136  // Map original Dalvik virtual reg i to the current SSA name.
1137  int* vreg_to_ssa_map_;            // length == method->registers_size
1138  int* ssa_last_defs_;              // length == method->registers_size
1139  ArenaBitVector* is_constant_v_;   // length == num_ssa_reg
1140  int* constant_values_;            // length == num_ssa_reg
1141  // Use counts of ssa names.
1142  GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
1143  GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
1144  unsigned int num_reachable_blocks_;
1145  unsigned int max_num_reachable_blocks_;
1146  GrowableArray<BasicBlockId>* dfs_order_;
1147  GrowableArray<BasicBlockId>* dfs_post_order_;
1148  GrowableArray<BasicBlockId>* dom_post_order_traversal_;
1149  GrowableArray<BasicBlockId>* topological_order_;
1150  int* i_dom_list_;
1151  ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
1152  std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
1153  uint16_t* temp_insn_data_;
1154  uint32_t temp_bit_vector_size_;
1155  ArenaBitVector* temp_bit_vector_;
1156  static const int kInvalidEntry = -1;
1157  GrowableArray<BasicBlock*> block_list_;
1158  ArenaBitVector* try_block_addr_;
1159  BasicBlock* entry_block_;
1160  BasicBlock* exit_block_;
1161  unsigned int num_blocks_;
1162  const DexFile::CodeItem* current_code_item_;
1163  GrowableArray<uint16_t> dex_pc_to_block_map_;  // FindBlock lookup cache.
1164  std::vector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
1165  typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
1166  std::vector<MIRLocation> method_stack_;        // Include stack
1167  int current_method_;
1168  DexOffset current_offset_;                     // Offset in code units
1169  int def_count_;                                // Used to estimate size of ssa name storage.
1170  int* opcode_count_;                            // Dex opcode coverage stats.
1171  int num_ssa_regs_;                             // Number of names following SSA transformation.
1172  std::vector<BasicBlockId> extended_basic_blocks_;  // Heads of block "traces".
1173  int method_sreg_;
1174  unsigned int attributes_;
1175  Checkstats* checkstats_;
1176  ArenaAllocator* arena_;
1177  int backward_branches_;
1178  int forward_branches_;
1179  GrowableArray<CompilerTemp*> compiler_temps_;
1180  size_t num_non_special_compiler_temps_;
1181  size_t max_available_non_special_compiler_temps_;
1182  size_t max_available_special_compiler_temps_;
1183  bool punt_to_interpreter_;                    // Difficult or not worthwhile - just interpret.
1184  uint64_t merged_df_flags_;
1185  GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
1186  GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
1187  GrowableArray<MirMethodLoweringInfo> method_lowering_infos_;
1188  static const uint64_t oat_data_flow_attributes_[kMirOpLast];
1189  GrowableArray<BasicBlock*> gen_suspend_test_list_;  // List of blocks containing suspend tests
1190
1191  friend class ClassInitCheckEliminationTest;
1192  friend class LocalValueNumberingTest;
1193};
1194
1195}  // namespace art
1196
1197#endif  // ART_COMPILER_DEX_MIR_GRAPH_H_
1198