mir_graph.h revision 7940e44f4517de5e2634a7e07d58d0fb26160513
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_COMPILER_DEX_MIRGRAPH_H_
18#define ART_SRC_COMPILER_DEX_MIRGRAPH_H_
19
20#include "dex_file.h"
21#include "dex_instruction.h"
22#include "compiler_ir.h"
23#include "arena_bit_vector.h"
24#include "growable_array.h"
25
26namespace art {
27
28enum DataFlowAttributePos {
29  kUA = 0,
30  kUB,
31  kUC,
32  kAWide,
33  kBWide,
34  kCWide,
35  kDA,
36  kIsMove,
37  kSetsConst,
38  kFormat35c,
39  kFormat3rc,
40  kNullCheckSrc0,        // Null check of uses[0].
41  kNullCheckSrc1,        // Null check of uses[1].
42  kNullCheckSrc2,        // Null check of uses[2].
43  kNullCheckOut0,        // Null check out outgoing arg0.
44  kDstNonNull,           // May assume dst is non-null.
45  kRetNonNull,           // May assume retval is non-null.
46  kNullTransferSrc0,     // Object copy src[0] -> dst.
47  kNullTransferSrcN,     // Phi null check state transfer.
48  kRangeCheckSrc1,       // Range check of uses[1].
49  kRangeCheckSrc2,       // Range check of uses[2].
50  kRangeCheckSrc3,       // Range check of uses[3].
51  kFPA,
52  kFPB,
53  kFPC,
54  kCoreA,
55  kCoreB,
56  kCoreC,
57  kRefA,
58  kRefB,
59  kRefC,
60  kUsesMethodStar,       // Implicit use of Method*.
61};
62
63#define DF_NOP                  0
64#define DF_UA                   (1 << kUA)
65#define DF_UB                   (1 << kUB)
66#define DF_UC                   (1 << kUC)
67#define DF_A_WIDE               (1 << kAWide)
68#define DF_B_WIDE               (1 << kBWide)
69#define DF_C_WIDE               (1 << kCWide)
70#define DF_DA                   (1 << kDA)
71#define DF_IS_MOVE              (1 << kIsMove)
72#define DF_SETS_CONST           (1 << kSetsConst)
73#define DF_FORMAT_35C           (1 << kFormat35c)
74#define DF_FORMAT_3RC           (1 << kFormat3rc)
75#define DF_NULL_CHK_0           (1 << kNullCheckSrc0)
76#define DF_NULL_CHK_1           (1 << kNullCheckSrc1)
77#define DF_NULL_CHK_2           (1 << kNullCheckSrc2)
78#define DF_NULL_CHK_OUT0        (1 << kNullCheckOut0)
79#define DF_NON_NULL_DST         (1 << kDstNonNull)
80#define DF_NON_NULL_RET         (1 << kRetNonNull)
81#define DF_NULL_TRANSFER_0      (1 << kNullTransferSrc0)
82#define DF_NULL_TRANSFER_N      (1 << kNullTransferSrcN)
83#define DF_RANGE_CHK_1          (1 << kRangeCheckSrc1)
84#define DF_RANGE_CHK_2          (1 << kRangeCheckSrc2)
85#define DF_RANGE_CHK_3          (1 << kRangeCheckSrc3)
86#define DF_FP_A                 (1 << kFPA)
87#define DF_FP_B                 (1 << kFPB)
88#define DF_FP_C                 (1 << kFPC)
89#define DF_CORE_A               (1 << kCoreA)
90#define DF_CORE_B               (1 << kCoreB)
91#define DF_CORE_C               (1 << kCoreC)
92#define DF_REF_A                (1 << kRefA)
93#define DF_REF_B                (1 << kRefB)
94#define DF_REF_C                (1 << kRefC)
95#define DF_UMS                  (1 << kUsesMethodStar)
96
97#define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
98
99#define DF_HAS_DEFS             (DF_DA)
100
101#define DF_HAS_NULL_CHKS        (DF_NULL_CHK_0 | \
102                                 DF_NULL_CHK_1 | \
103                                 DF_NULL_CHK_2 | \
104                                 DF_NULL_CHK_OUT0)
105
106#define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_1 | \
107                                 DF_RANGE_CHK_2 | \
108                                 DF_RANGE_CHK_3)
109
110#define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
111                                 DF_HAS_RANGE_CHKS)
112
113#define DF_A_IS_REG             (DF_UA | DF_DA)
114#define DF_B_IS_REG             (DF_UB)
115#define DF_C_IS_REG             (DF_UC)
116#define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
117#define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
118
119enum OatMethodAttributes {
120  kIsLeaf,            // Method is leaf.
121  kHasLoop,           // Method contains simple loop.
122};
123
124#define METHOD_IS_LEAF          (1 << kIsLeaf)
125#define METHOD_HAS_LOOP         (1 << kHasLoop)
126
127// Minimum field size to contain Dalvik v_reg number.
128#define VREG_NUM_WIDTH 16
129
130#define INVALID_SREG (-1)
131#define INVALID_VREG (0xFFFFU)
132#define INVALID_REG (0xFF)
133#define INVALID_OFFSET (0xDEADF00FU)
134
135/* SSA encodings for special registers */
136#define SSA_METHOD_BASEREG (-2)
137/* First compiler temp basereg, grows smaller */
138#define SSA_CTEMP_BASEREG (SSA_METHOD_BASEREG - 1)
139
140#define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
141#define MIR_NULL_CHECK_ONLY             (1 << kMIRNullCheckOnly)
142#define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
143#define MIR_RANGE_CHECK_ONLY            (1 << kMIRRangeCheckOnly)
144#define MIR_INLINED                     (1 << kMIRInlined)
145#define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
146#define MIR_CALLEE                      (1 << kMIRCallee)
147#define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
148#define MIR_DUP                         (1 << kMIRDup)
149
150#define BLOCK_NAME_LEN 80
151
152/*
153 * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
154 * it is useful to have compiler-generated temporary registers and have them treated
155 * in the same manner as dx-generated virtual registers.  This struct records the SSA
156 * name of compiler-introduced temporaries.
157 */
158struct CompilerTemp {
159  int s_reg;
160};
161
162// When debug option enabled, records effectiveness of null and range check elimination.
163struct Checkstats {
164  int null_checks;
165  int null_checks_eliminated;
166  int range_checks;
167  int range_checks_eliminated;
168};
169
170// Dataflow attributes of a basic block.
171struct BasicBlockDataFlow {
172  ArenaBitVector* use_v;
173  ArenaBitVector* def_v;
174  ArenaBitVector* live_in_v;
175  ArenaBitVector* phi_v;
176  int* vreg_to_ssa_map;
177  ArenaBitVector* ending_null_check_v;
178};
179
180/*
181 * Normalized use/def for a MIR operation using SSA names rather than vregs.  Note that
182 * uses/defs retain the Dalvik convention that long operations operate on a pair of 32-bit
183 * vregs.  For example, "ADD_LONG v0, v2, v3" would have 2 defs (v0/v1) and 4 uses (v2/v3, v4/v5).
184 * Following SSA renaming, this is the primary struct used by code generators to locate
185 * operand and result registers.  This is a somewhat confusing and unhelpful convention that
186 * we may want to revisit in the future.
187 */
188struct SSARepresentation {
189  int num_uses;
190  int* uses;
191  bool* fp_use;
192  int num_defs;
193  int* defs;
194  bool* fp_def;
195};
196
197/*
198 * The Midlevel Intermediate Representation node, which may be largely considered a
199 * wrapper around a Dalvik byte code.
200 */
201struct MIR {
202  DecodedInstruction dalvikInsn;
203  unsigned int width;
204  unsigned int offset;
205  int m_unit_index;               // From which method was this MIR included
206  MIR* prev;
207  MIR* next;
208  SSARepresentation* ssa_rep;
209  int optimization_flags;
210  union {
211    // Establish link between two halves of throwing instructions.
212    MIR* throw_insn;
213    // Saved opcode for NOP'd MIRs
214    Instruction::Code original_opcode;
215  } meta;
216};
217
218struct SuccessorBlockInfo;
219
220struct BasicBlock {
221  int id;
222  int dfs_id;
223  bool visited;
224  bool hidden;
225  bool catch_entry;
226  bool explicit_throw;
227  bool conditional_branch;
228  bool terminated_by_return;        // Block ends with a Dalvik return opcode.
229  bool dominates_return;            // Is a member of return extended basic block.
230  uint16_t start_offset;
231  uint16_t nesting_depth;
232  BBType block_type;
233  MIR* first_mir_insn;
234  MIR* last_mir_insn;
235  BasicBlock* fall_through;
236  BasicBlock* taken;
237  BasicBlock* i_dom;                // Immediate dominator.
238  BasicBlockDataFlow* data_flow_info;
239  GrowableArray<BasicBlock*>* predecessors;
240  ArenaBitVector* dominators;
241  ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
242  ArenaBitVector* dom_frontier;     // Dominance frontier.
243  struct {                          // For one-to-many successors like.
244    BlockListType block_list_type;  // switch and exception handling.
245    GrowableArray<SuccessorBlockInfo*>* blocks;
246  } successor_block_list;
247};
248
249/*
250 * The "blocks" field in "successor_block_list" points to an array of elements with the type
251 * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For swtich
252 * blocks, key is the case value.
253 */
254// TODO: make class with placement new.
255struct SuccessorBlockInfo {
256  BasicBlock* block;
257  int key;
258};
259
260/*
261 * Whereas a SSA name describes a definition of a Dalvik vreg, the RegLocation describes
262 * the type of an SSA name (and, can also be used by code generators to record where the
263 * value is located (i.e. - physical register, frame, spill, etc.).  For each SSA name (SReg)
264 * there is a RegLocation.
265 * FIXME: The orig_sreg field was added as a workaround for llvm bitcode generation.  With
266 * the latest restructuring, we should be able to remove it and rely on s_reg_low throughout.
267 */
268struct RegLocation {
269  RegLocationType location:3;
270  unsigned wide:1;
271  unsigned defined:1;   // Do we know the type?
272  unsigned is_const:1;  // Constant, value in mir_graph->constant_values[].
273  unsigned fp:1;        // Floating point?
274  unsigned core:1;      // Non-floating point?
275  unsigned ref:1;       // Something GC cares about.
276  unsigned high_word:1; // High word of pair?
277  unsigned home:1;      // Does this represent the home location?
278  uint8_t low_reg;      // First physical register.
279  uint8_t high_reg;     // 2nd physical register (if wide).
280  int32_t s_reg_low;    // SSA name for low Dalvik word.
281  int32_t orig_sreg;    // TODO: remove after Bitcode gen complete
282                        // and consolodate usage w/ s_reg_low.
283};
284
285/*
286 * Collection of information describing an invoke, and the destination of
287 * the subsequent MOVE_RESULT (if applicable).  Collected as a unit to enable
288 * more efficient invoke code generation.
289 */
290struct CallInfo {
291  int num_arg_words;    // Note: word count, not arg count.
292  RegLocation* args;    // One for each word of arguments.
293  RegLocation result;   // Eventual target of MOVE_RESULT.
294  int opt_flags;
295  InvokeType type;
296  uint32_t dex_idx;
297  uint32_t index;       // Method idx for invokes, type idx for FilledNewArray.
298  uintptr_t direct_code;
299  uintptr_t direct_method;
300  RegLocation target;    // Target of following move_result.
301  bool skip_this;
302  bool is_range;
303  int offset;            // Dalvik offset.
304};
305
306
307const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
308                             INVALID_REG, INVALID_REG, INVALID_SREG, INVALID_SREG};
309
310class MIRGraph {
311 public:
312  MIRGraph(CompilationUnit* cu, ArenaAllocator* arena);
313  ~MIRGraph();
314
315  /*
316   * Parse dex method and add MIR at current insert point.  Returns id (which is
317   * actually the index of the method in the m_units_ array).
318   */
319  void InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
320                    InvokeType invoke_type, uint32_t class_def_idx,
321                    uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
322
323  /* Find existing block */
324  BasicBlock* FindBlock(unsigned int code_offset) {
325    return FindBlock(code_offset, false, false, NULL);
326  }
327
328  const uint16_t* GetCurrentInsns() const {
329    return current_code_item_->insns_;
330  }
331
332  const uint16_t* GetInsns(int m_unit_index) const {
333    return m_units_[m_unit_index]->GetCodeItem()->insns_;
334  }
335
336  int GetNumBlocks() const {
337    return num_blocks_;
338  }
339
340  ArenaBitVector* GetTryBlockAddr() const {
341    return try_block_addr_;
342  }
343
344  BasicBlock* GetEntryBlock() const {
345    return entry_block_;
346  }
347
348  BasicBlock* GetExitBlock() const {
349    return exit_block_;
350  }
351
352  BasicBlock* GetBasicBlock(int block_id) const {
353    return block_list_.Get(block_id);
354  }
355
356  size_t GetBasicBlockListCount() const {
357    return block_list_.Size();
358  }
359
360  GrowableArray<BasicBlock*>* GetBlockList() {
361    return &block_list_;
362  }
363
364  GrowableArray<int>* GetDfsOrder() {
365    return dfs_order_;
366  }
367
368  GrowableArray<int>* GetDfsPostOrder() {
369    return dfs_post_order_;
370  }
371
372  GrowableArray<int>* GetDomPostOrder() {
373    return dom_post_order_traversal_;
374  }
375
376  int GetDefCount() const {
377    return def_count_;
378  }
379
380  ArenaAllocator* GetArena() {
381    return arena_;
382  }
383
384  void EnableOpcodeCounting() {
385    opcode_count_ = static_cast<int*>(arena_->NewMem(kNumPackedOpcodes * sizeof(int), true,
386                                      ArenaAllocator::kAllocMisc));
387  }
388
389  void ShowOpcodeStats();
390
391  DexCompilationUnit* GetCurrentDexCompilationUnit() const {
392    return m_units_[current_method_];
393  }
394
395  void DumpCFG(const char* dir_prefix, bool all_blocks);
396
397  void BuildRegLocations();
398
399  void DumpRegLocTable(RegLocation* table, int count);
400
401  void BasicBlockOptimization();
402
403  bool IsConst(int32_t s_reg) const {
404    return is_constant_v_->IsBitSet(s_reg);
405  }
406
407  bool IsConst(RegLocation loc) const {
408    return (IsConst(loc.orig_sreg));
409  }
410
411  int32_t ConstantValue(RegLocation loc) const {
412    DCHECK(IsConst(loc));
413    return constant_values_[loc.orig_sreg];
414  }
415
416  int32_t ConstantValue(int32_t s_reg) const {
417    DCHECK(IsConst(s_reg));
418    return constant_values_[s_reg];
419  }
420
421  int64_t ConstantValueWide(RegLocation loc) const {
422    DCHECK(IsConst(loc));
423    return (static_cast<int64_t>(constant_values_[loc.orig_sreg + 1]) << 32) |
424        Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg]));
425  }
426
427  bool IsConstantNullRef(RegLocation loc) const {
428    return loc.ref && loc.is_const && (ConstantValue(loc) == 0);
429  }
430
431  int GetNumSSARegs() const {
432    return num_ssa_regs_;
433  }
434
435  void SetNumSSARegs(int new_num) {
436    num_ssa_regs_ = new_num;
437  }
438
439  unsigned int GetNumReachableBlocks() const {
440    return num_reachable_blocks_;
441  }
442
443  int GetUseCount(int vreg) const {
444    return use_counts_.Get(vreg);
445  }
446
447  int GetRawUseCount(int vreg) const {
448    return raw_use_counts_.Get(vreg);
449  }
450
451  int GetSSASubscript(int ssa_reg) const {
452    return ssa_subscripts_->Get(ssa_reg);
453  }
454
455  RegLocation GetRawSrc(MIR* mir, int num)
456  {
457    DCHECK(num < mir->ssa_rep->num_uses);
458    RegLocation res = reg_location_[mir->ssa_rep->uses[num]];
459    return res;
460  }
461
462  RegLocation GetRawDest(MIR* mir)
463  {
464    DCHECK_GT(mir->ssa_rep->num_defs, 0);
465    RegLocation res = reg_location_[mir->ssa_rep->defs[0]];
466    return res;
467  }
468
469  RegLocation GetDest(MIR* mir)
470  {
471    RegLocation res = GetRawDest(mir);
472    DCHECK(!res.wide);
473    return res;
474  }
475
476  RegLocation GetSrc(MIR* mir, int num)
477  {
478    RegLocation res = GetRawSrc(mir, num);
479    DCHECK(!res.wide);
480    return res;
481  }
482
483  RegLocation GetDestWide(MIR* mir)
484  {
485    RegLocation res = GetRawDest(mir);
486    DCHECK(res.wide);
487    return res;
488  }
489
490  RegLocation GetSrcWide(MIR* mir, int low)
491  {
492    RegLocation res = GetRawSrc(mir, low);
493    DCHECK(res.wide);
494    return res;
495  }
496
497  RegLocation GetBadLoc() {
498    return bad_loc;
499  }
500
501  int GetMethodSReg() {
502    return method_sreg_;
503  }
504
505  bool MethodIsLeaf() {
506    return attributes_ & METHOD_IS_LEAF;
507  }
508
509  RegLocation GetRegLocation(int index) {
510    DCHECK((index >= 0) && (index > num_ssa_regs_));
511    return reg_location_[index];
512  }
513
514  RegLocation GetMethodLoc() {
515    return reg_location_[method_sreg_];
516  }
517
518  void BasicBlockCombine();
519  void CodeLayout();
520  void DumpCheckStats();
521  void PropagateConstants();
522  MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
523  int SRegToVReg(int ssa_reg) const;
524  void VerifyDataflow();
525  void MethodUseCount();
526  void SSATransformation();
527  void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
528  void NullCheckElimination();
529  bool SetFp(int index, bool is_fp);
530  bool SetCore(int index, bool is_core);
531  bool SetRef(int index, bool is_ref);
532  bool SetWide(int index, bool is_wide);
533  bool SetHigh(int index, bool is_high);
534  void AppendMIR(BasicBlock* bb, MIR* mir);
535  void PrependMIR(BasicBlock* bb, MIR* mir);
536  void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
537  char* GetDalvikDisassembly(const MIR* mir);
538  void ReplaceSpecialChars(std::string& str);
539  std::string GetSSAName(int ssa_reg);
540  std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
541  void GetBlockName(BasicBlock* bb, char* name);
542  const char* GetShortyFromTargetIdx(int);
543  void DumpMIRGraph();
544  CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
545  BasicBlock* NewMemBB(BBType block_type, int block_id);
546
547  /*
548   * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
549   * we can verify that all catch entries have native PC entries.
550   */
551   std::set<uint32_t> catches_;
552
553   // TODO: make these private.
554   RegLocation* reg_location_;                         // Map SSA names to location.
555   GrowableArray<CompilerTemp*> compiler_temps_;
556   SafeMap<unsigned int, unsigned int> block_id_map_;  // Block collapse lookup cache.
557
558   static const int oat_data_flow_attributes_[kMirOpLast];
559   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
560
561 private:
562
563   int FindCommonParent(int block1, int block2);
564   void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
565                          const ArenaBitVector* src2);
566   void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
567                        ArenaBitVector* live_in_v, int dalvik_reg_id);
568   void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
569   void CompilerInitializeSSAConversion();
570   bool DoSSAConversion(BasicBlock* bb);
571   bool InvokeUsesMethodStar(MIR* mir);
572   int ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction);
573   bool ContentIsInsn(const uint16_t* code_ptr);
574   BasicBlock* SplitBlock(unsigned int code_offset, BasicBlock* orig_block,
575                          BasicBlock** immed_pred_block_p);
576   BasicBlock* FindBlock(unsigned int code_offset, bool split, bool create,
577                         BasicBlock** immed_pred_block_p);
578   void ProcessTryCatchBlocks();
579   BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
580                                int flags, const uint16_t* code_ptr, const uint16_t* code_end);
581   void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags);
582   BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
583                               int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
584                               const uint16_t* code_end);
585   int AddNewSReg(int v_reg);
586   void HandleSSAUse(int* uses, int dalvik_reg, int reg_index);
587   void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
588   void DataFlowSSAFormat35C(MIR* mir);
589   void DataFlowSSAFormat3RC(MIR* mir);
590   bool FindLocalLiveIn(BasicBlock* bb);
591   void ClearAllVisitedFlags();
592   bool CountUses(struct BasicBlock* bb);
593   bool InferTypeAndSize(BasicBlock* bb);
594   bool VerifyPredInfo(BasicBlock* bb);
595   BasicBlock* NeedsVisit(BasicBlock* bb);
596   BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
597   void MarkPreOrder(BasicBlock* bb);
598   void RecordDFSOrders(BasicBlock* bb);
599   void ComputeDFSOrders();
600   void ComputeDefBlockMatrix();
601   void ComputeDomPostOrderTraversal(BasicBlock* bb);
602   void ComputeDominators();
603   void InsertPhiNodes();
604   void DoDFSPreOrderSSARename(BasicBlock* block);
605   void SetConstant(int32_t ssa_reg, int value);
606   void SetConstantWide(int ssa_reg, int64_t value);
607   int GetSSAUseCount(int s_reg);
608   bool BasicBlockOpt(BasicBlock* bb);
609   bool EliminateNullChecks(BasicBlock* bb);
610   void NullCheckEliminationInit(BasicBlock* bb);
611   bool BuildExtendedBBList(struct BasicBlock* bb);
612   bool FillDefBlockMatrix(BasicBlock* bb);
613   void InitializeDominationInfo(BasicBlock* bb);
614   bool ComputeblockIDom(BasicBlock* bb);
615   bool ComputeBlockDominators(BasicBlock* bb);
616   bool SetDominators(BasicBlock* bb);
617   bool ComputeBlockLiveIns(BasicBlock* bb);
618   bool InsertPhiNodeOperands(BasicBlock* bb);
619   bool ComputeDominanceFrontier(BasicBlock* bb);
620   void DoConstantPropogation(BasicBlock* bb);
621   void CountChecks(BasicBlock* bb);
622   bool CombineBlocks(BasicBlock* bb);
623
624   CompilationUnit* const cu_;
625   GrowableArray<int>* ssa_base_vregs_;
626   GrowableArray<int>* ssa_subscripts_;
627   // Map original Dalvik virtual reg i to the current SSA name.
628   int* vreg_to_ssa_map_;            // length == method->registers_size
629   int* ssa_last_defs_;              // length == method->registers_size
630   ArenaBitVector* is_constant_v_;   // length == num_ssa_reg
631   int* constant_values_;            // length == num_ssa_reg
632   // Use counts of ssa names.
633   GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
634   GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
635   unsigned int num_reachable_blocks_;
636   GrowableArray<int>* dfs_order_;
637   GrowableArray<int>* dfs_post_order_;
638   GrowableArray<int>* dom_post_order_traversal_;
639   int* i_dom_list_;
640   ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
641   ArenaBitVector* temp_block_v_;
642   ArenaBitVector* temp_dalvik_register_v_;
643   ArenaBitVector* temp_ssa_register_v_;  // num_ssa_regs.
644   static const int kInvalidEntry = -1;
645   GrowableArray<BasicBlock*> block_list_;
646   ArenaBitVector* try_block_addr_;
647   BasicBlock* entry_block_;
648   BasicBlock* exit_block_;
649   BasicBlock* cur_block_;
650   int num_blocks_;
651   const DexFile::CodeItem* current_code_item_;
652   SafeMap<unsigned int, BasicBlock*> block_map_; // FindBlock lookup cache.
653   std::vector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
654   typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
655   std::vector<MIRLocation> method_stack_;        // Include stack
656   int current_method_;
657   int current_offset_;
658   int def_count_;                                // Used to estimate size of ssa name storage.
659   int* opcode_count_;                            // Dex opcode coverage stats.
660   int num_ssa_regs_;                             // Number of names following SSA transformation.
661   std::vector<BasicBlock*> extended_basic_blocks_; // Heads of block "traces".
662   int method_sreg_;
663   unsigned int attributes_;
664   Checkstats* checkstats_;
665   ArenaAllocator* arena_;
666};
667
668}  // namespace art
669
670#endif // ART_SRC_COMPILER_DEX_MIRGRAPH_H_
671