mir_graph.h revision ee17e0aa4d24deb11c1766bfcc6a864519df1c1e
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_DEX_MIR_GRAPH_H_ 18#define ART_COMPILER_DEX_MIR_GRAPH_H_ 19 20#include "dex_file.h" 21#include "dex_instruction.h" 22#include "compiler_ir.h" 23#include "arena_bit_vector.h" 24#include "growable_array.h" 25 26namespace art { 27 28enum InstructionAnalysisAttributePos { 29 kUninterestingOp = 0, 30 kArithmeticOp, 31 kFPOp, 32 kSingleOp, 33 kDoubleOp, 34 kIntOp, 35 kLongOp, 36 kBranchOp, 37 kInvokeOp, 38 kArrayOp, 39 kHeavyweightOp, 40 kSimpleConstOp, 41 kMoveOp 42}; 43 44#define AN_NONE (1 << kUninterestingOp) 45#define AN_MATH (1 << kArithmeticOp) 46#define AN_FP (1 << kFPOp) 47#define AN_LONG (1 << kLongOp) 48#define AN_INT (1 << kIntOp) 49#define AN_SINGLE (1 << kSingleOp) 50#define AN_DOUBLE (1 << kDoubleOp) 51#define AN_FLOATMATH (1 << kFPOp) 52#define AN_BRANCH (1 << kBranchOp) 53#define AN_INVOKE (1 << kInvokeOp) 54#define AN_ARRAYOP (1 << kArrayOp) 55#define AN_HEAVYWEIGHT (1 << kHeavyweightOp) 56#define AN_SIMPLECONST (1 << kSimpleConstOp) 57#define AN_MOVE (1 << kMoveOp) 58#define AN_COMPUTATIONAL (AN_MATH | AN_ARRAYOP | AN_MOVE | AN_SIMPLECONST) 59 60enum DataFlowAttributePos { 61 kUA = 0, 62 kUB, 63 kUC, 64 kAWide, 65 kBWide, 66 kCWide, 67 kDA, 68 kIsMove, 69 kSetsConst, 70 kFormat35c, 71 kFormat3rc, 72 kNullCheckSrc0, // Null check of uses[0]. 73 kNullCheckSrc1, // Null check of uses[1]. 74 kNullCheckSrc2, // Null check of uses[2]. 75 kNullCheckOut0, // Null check out outgoing arg0. 76 kDstNonNull, // May assume dst is non-null. 77 kRetNonNull, // May assume retval is non-null. 78 kNullTransferSrc0, // Object copy src[0] -> dst. 79 kNullTransferSrcN, // Phi null check state transfer. 80 kRangeCheckSrc1, // Range check of uses[1]. 81 kRangeCheckSrc2, // Range check of uses[2]. 82 kRangeCheckSrc3, // Range check of uses[3]. 83 kFPA, 84 kFPB, 85 kFPC, 86 kCoreA, 87 kCoreB, 88 kCoreC, 89 kRefA, 90 kRefB, 91 kRefC, 92 kUsesMethodStar, // Implicit use of Method*. 93}; 94 95#define DF_NOP 0 96#define DF_UA (1 << kUA) 97#define DF_UB (1 << kUB) 98#define DF_UC (1 << kUC) 99#define DF_A_WIDE (1 << kAWide) 100#define DF_B_WIDE (1 << kBWide) 101#define DF_C_WIDE (1 << kCWide) 102#define DF_DA (1 << kDA) 103#define DF_IS_MOVE (1 << kIsMove) 104#define DF_SETS_CONST (1 << kSetsConst) 105#define DF_FORMAT_35C (1 << kFormat35c) 106#define DF_FORMAT_3RC (1 << kFormat3rc) 107#define DF_NULL_CHK_0 (1 << kNullCheckSrc0) 108#define DF_NULL_CHK_1 (1 << kNullCheckSrc1) 109#define DF_NULL_CHK_2 (1 << kNullCheckSrc2) 110#define DF_NULL_CHK_OUT0 (1 << kNullCheckOut0) 111#define DF_NON_NULL_DST (1 << kDstNonNull) 112#define DF_NON_NULL_RET (1 << kRetNonNull) 113#define DF_NULL_TRANSFER_0 (1 << kNullTransferSrc0) 114#define DF_NULL_TRANSFER_N (1 << kNullTransferSrcN) 115#define DF_RANGE_CHK_1 (1 << kRangeCheckSrc1) 116#define DF_RANGE_CHK_2 (1 << kRangeCheckSrc2) 117#define DF_RANGE_CHK_3 (1 << kRangeCheckSrc3) 118#define DF_FP_A (1 << kFPA) 119#define DF_FP_B (1 << kFPB) 120#define DF_FP_C (1 << kFPC) 121#define DF_CORE_A (1 << kCoreA) 122#define DF_CORE_B (1 << kCoreB) 123#define DF_CORE_C (1 << kCoreC) 124#define DF_REF_A (1 << kRefA) 125#define DF_REF_B (1 << kRefB) 126#define DF_REF_C (1 << kRefC) 127#define DF_UMS (1 << kUsesMethodStar) 128 129#define DF_HAS_USES (DF_UA | DF_UB | DF_UC) 130 131#define DF_HAS_DEFS (DF_DA) 132 133#define DF_HAS_NULL_CHKS (DF_NULL_CHK_0 | \ 134 DF_NULL_CHK_1 | \ 135 DF_NULL_CHK_2 | \ 136 DF_NULL_CHK_OUT0) 137 138#define DF_HAS_RANGE_CHKS (DF_RANGE_CHK_1 | \ 139 DF_RANGE_CHK_2 | \ 140 DF_RANGE_CHK_3) 141 142#define DF_HAS_NR_CHKS (DF_HAS_NULL_CHKS | \ 143 DF_HAS_RANGE_CHKS) 144 145#define DF_A_IS_REG (DF_UA | DF_DA) 146#define DF_B_IS_REG (DF_UB) 147#define DF_C_IS_REG (DF_UC) 148#define DF_IS_GETTER_OR_SETTER (DF_IS_GETTER | DF_IS_SETTER) 149#define DF_USES_FP (DF_FP_A | DF_FP_B | DF_FP_C) 150 151enum OatMethodAttributes { 152 kIsLeaf, // Method is leaf. 153 kHasLoop, // Method contains simple loop. 154}; 155 156#define METHOD_IS_LEAF (1 << kIsLeaf) 157#define METHOD_HAS_LOOP (1 << kHasLoop) 158 159// Minimum field size to contain Dalvik v_reg number. 160#define VREG_NUM_WIDTH 16 161 162#define INVALID_SREG (-1) 163#define INVALID_VREG (0xFFFFU) 164#define INVALID_REG (0xFF) 165#define INVALID_OFFSET (0xDEADF00FU) 166 167/* SSA encodings for special registers */ 168#define SSA_METHOD_BASEREG (-2) 169/* First compiler temp basereg, grows smaller */ 170#define SSA_CTEMP_BASEREG (SSA_METHOD_BASEREG - 1) 171 172#define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck) 173#define MIR_NULL_CHECK_ONLY (1 << kMIRNullCheckOnly) 174#define MIR_IGNORE_RANGE_CHECK (1 << kMIRIgnoreRangeCheck) 175#define MIR_RANGE_CHECK_ONLY (1 << kMIRRangeCheckOnly) 176#define MIR_INLINED (1 << kMIRInlined) 177#define MIR_INLINED_PRED (1 << kMIRInlinedPred) 178#define MIR_CALLEE (1 << kMIRCallee) 179#define MIR_IGNORE_SUSPEND_CHECK (1 << kMIRIgnoreSuspendCheck) 180#define MIR_DUP (1 << kMIRDup) 181 182#define BLOCK_NAME_LEN 80 183 184/* 185 * In general, vreg/sreg describe Dalvik registers that originated with dx. However, 186 * it is useful to have compiler-generated temporary registers and have them treated 187 * in the same manner as dx-generated virtual registers. This struct records the SSA 188 * name of compiler-introduced temporaries. 189 */ 190struct CompilerTemp { 191 int s_reg; 192}; 193 194// When debug option enabled, records effectiveness of null and range check elimination. 195struct Checkstats { 196 int null_checks; 197 int null_checks_eliminated; 198 int range_checks; 199 int range_checks_eliminated; 200}; 201 202// Dataflow attributes of a basic block. 203struct BasicBlockDataFlow { 204 ArenaBitVector* use_v; 205 ArenaBitVector* def_v; 206 ArenaBitVector* live_in_v; 207 ArenaBitVector* phi_v; 208 int* vreg_to_ssa_map; 209 ArenaBitVector* ending_null_check_v; 210}; 211 212/* 213 * Normalized use/def for a MIR operation using SSA names rather than vregs. Note that 214 * uses/defs retain the Dalvik convention that long operations operate on a pair of 32-bit 215 * vregs. For example, "ADD_LONG v0, v2, v3" would have 2 defs (v0/v1) and 4 uses (v2/v3, v4/v5). 216 * Following SSA renaming, this is the primary struct used by code generators to locate 217 * operand and result registers. This is a somewhat confusing and unhelpful convention that 218 * we may want to revisit in the future. 219 */ 220struct SSARepresentation { 221 int num_uses; 222 int* uses; 223 bool* fp_use; 224 int num_defs; 225 int* defs; 226 bool* fp_def; 227}; 228 229/* 230 * The Midlevel Intermediate Representation node, which may be largely considered a 231 * wrapper around a Dalvik byte code. 232 */ 233struct MIR { 234 DecodedInstruction dalvikInsn; 235 unsigned int width; 236 unsigned int offset; 237 int m_unit_index; // From which method was this MIR included 238 MIR* prev; 239 MIR* next; 240 SSARepresentation* ssa_rep; 241 int optimization_flags; 242 union { 243 // Establish link between two halves of throwing instructions. 244 MIR* throw_insn; 245 // Saved opcode for NOP'd MIRs 246 Instruction::Code original_opcode; 247 } meta; 248}; 249 250struct SuccessorBlockInfo; 251 252struct BasicBlock { 253 int id; 254 int dfs_id; 255 bool visited; 256 bool hidden; 257 bool catch_entry; 258 bool explicit_throw; 259 bool conditional_branch; 260 bool terminated_by_return; // Block ends with a Dalvik return opcode. 261 bool dominates_return; // Is a member of return extended basic block. 262 uint16_t start_offset; 263 uint16_t nesting_depth; 264 BBType block_type; 265 MIR* first_mir_insn; 266 MIR* last_mir_insn; 267 BasicBlock* fall_through; 268 BasicBlock* taken; 269 BasicBlock* i_dom; // Immediate dominator. 270 BasicBlockDataFlow* data_flow_info; 271 GrowableArray<BasicBlock*>* predecessors; 272 ArenaBitVector* dominators; 273 ArenaBitVector* i_dominated; // Set nodes being immediately dominated. 274 ArenaBitVector* dom_frontier; // Dominance frontier. 275 struct { // For one-to-many successors like. 276 BlockListType block_list_type; // switch and exception handling. 277 GrowableArray<SuccessorBlockInfo*>* blocks; 278 } successor_block_list; 279}; 280 281/* 282 * The "blocks" field in "successor_block_list" points to an array of elements with the type 283 * "SuccessorBlockInfo". For catch blocks, key is type index for the exception. For swtich 284 * blocks, key is the case value. 285 */ 286// TODO: make class with placement new. 287struct SuccessorBlockInfo { 288 BasicBlock* block; 289 int key; 290}; 291 292/* 293 * Whereas a SSA name describes a definition of a Dalvik vreg, the RegLocation describes 294 * the type of an SSA name (and, can also be used by code generators to record where the 295 * value is located (i.e. - physical register, frame, spill, etc.). For each SSA name (SReg) 296 * there is a RegLocation. 297 * FIXME: The orig_sreg field was added as a workaround for llvm bitcode generation. With 298 * the latest restructuring, we should be able to remove it and rely on s_reg_low throughout. 299 */ 300struct RegLocation { 301 RegLocationType location:3; 302 unsigned wide:1; 303 unsigned defined:1; // Do we know the type? 304 unsigned is_const:1; // Constant, value in mir_graph->constant_values[]. 305 unsigned fp:1; // Floating point? 306 unsigned core:1; // Non-floating point? 307 unsigned ref:1; // Something GC cares about. 308 unsigned high_word:1; // High word of pair? 309 unsigned home:1; // Does this represent the home location? 310 uint8_t low_reg; // First physical register. 311 uint8_t high_reg; // 2nd physical register (if wide). 312 int32_t s_reg_low; // SSA name for low Dalvik word. 313 int32_t orig_sreg; // TODO: remove after Bitcode gen complete 314 // and consolodate usage w/ s_reg_low. 315}; 316 317/* 318 * Collection of information describing an invoke, and the destination of 319 * the subsequent MOVE_RESULT (if applicable). Collected as a unit to enable 320 * more efficient invoke code generation. 321 */ 322struct CallInfo { 323 int num_arg_words; // Note: word count, not arg count. 324 RegLocation* args; // One for each word of arguments. 325 RegLocation result; // Eventual target of MOVE_RESULT. 326 int opt_flags; 327 InvokeType type; 328 uint32_t dex_idx; 329 uint32_t index; // Method idx for invokes, type idx for FilledNewArray. 330 uintptr_t direct_code; 331 uintptr_t direct_method; 332 RegLocation target; // Target of following move_result. 333 bool skip_this; 334 bool is_range; 335 int offset; // Dalvik offset. 336}; 337 338 339const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, 340 INVALID_REG, INVALID_REG, INVALID_SREG, INVALID_SREG}; 341 342class MIRGraph { 343 public: 344 MIRGraph(CompilationUnit* cu, ArenaAllocator* arena); 345 ~MIRGraph(); 346 347 /* 348 * Examine the graph to determine whether it's worthwile to spend the time compiling 349 * this method. 350 */ 351 bool SkipCompilation(Runtime::CompilerFilter compiler_filter); 352 353 /* 354 * Parse dex method and add MIR at current insert point. Returns id (which is 355 * actually the index of the method in the m_units_ array). 356 */ 357 void InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags, 358 InvokeType invoke_type, uint32_t class_def_idx, 359 uint32_t method_idx, jobject class_loader, const DexFile& dex_file); 360 361 /* Find existing block */ 362 BasicBlock* FindBlock(unsigned int code_offset) { 363 return FindBlock(code_offset, false, false, NULL); 364 } 365 366 const uint16_t* GetCurrentInsns() const { 367 return current_code_item_->insns_; 368 } 369 370 const uint16_t* GetInsns(int m_unit_index) const { 371 return m_units_[m_unit_index]->GetCodeItem()->insns_; 372 } 373 374 int GetNumBlocks() const { 375 return num_blocks_; 376 } 377 378 size_t GetNumDalvikInsns() const { 379 return cu_->code_item->insns_size_in_code_units_; 380 } 381 382 ArenaBitVector* GetTryBlockAddr() const { 383 return try_block_addr_; 384 } 385 386 BasicBlock* GetEntryBlock() const { 387 return entry_block_; 388 } 389 390 BasicBlock* GetExitBlock() const { 391 return exit_block_; 392 } 393 394 BasicBlock* GetBasicBlock(int block_id) const { 395 return block_list_.Get(block_id); 396 } 397 398 size_t GetBasicBlockListCount() const { 399 return block_list_.Size(); 400 } 401 402 GrowableArray<BasicBlock*>* GetBlockList() { 403 return &block_list_; 404 } 405 406 GrowableArray<int>* GetDfsOrder() { 407 return dfs_order_; 408 } 409 410 GrowableArray<int>* GetDfsPostOrder() { 411 return dfs_post_order_; 412 } 413 414 GrowableArray<int>* GetDomPostOrder() { 415 return dom_post_order_traversal_; 416 } 417 418 int GetDefCount() const { 419 return def_count_; 420 } 421 422 ArenaAllocator* GetArena() { 423 return arena_; 424 } 425 426 void EnableOpcodeCounting() { 427 opcode_count_ = static_cast<int*>(arena_->NewMem(kNumPackedOpcodes * sizeof(int), true, 428 ArenaAllocator::kAllocMisc)); 429 } 430 431 void ShowOpcodeStats(); 432 433 DexCompilationUnit* GetCurrentDexCompilationUnit() const { 434 return m_units_[current_method_]; 435 } 436 437 void DumpCFG(const char* dir_prefix, bool all_blocks); 438 439 void BuildRegLocations(); 440 441 void DumpRegLocTable(RegLocation* table, int count); 442 443 void BasicBlockOptimization(); 444 445 bool IsConst(int32_t s_reg) const { 446 return is_constant_v_->IsBitSet(s_reg); 447 } 448 449 bool IsConst(RegLocation loc) const { 450 return (IsConst(loc.orig_sreg)); 451 } 452 453 int32_t ConstantValue(RegLocation loc) const { 454 DCHECK(IsConst(loc)); 455 return constant_values_[loc.orig_sreg]; 456 } 457 458 int32_t ConstantValue(int32_t s_reg) const { 459 DCHECK(IsConst(s_reg)); 460 return constant_values_[s_reg]; 461 } 462 463 int64_t ConstantValueWide(RegLocation loc) const { 464 DCHECK(IsConst(loc)); 465 return (static_cast<int64_t>(constant_values_[loc.orig_sreg + 1]) << 32) | 466 Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg])); 467 } 468 469 bool IsConstantNullRef(RegLocation loc) const { 470 return loc.ref && loc.is_const && (ConstantValue(loc) == 0); 471 } 472 473 int GetNumSSARegs() const { 474 return num_ssa_regs_; 475 } 476 477 void SetNumSSARegs(int new_num) { 478 num_ssa_regs_ = new_num; 479 } 480 481 unsigned int GetNumReachableBlocks() const { 482 return num_reachable_blocks_; 483 } 484 485 int GetUseCount(int vreg) const { 486 return use_counts_.Get(vreg); 487 } 488 489 int GetRawUseCount(int vreg) const { 490 return raw_use_counts_.Get(vreg); 491 } 492 493 int GetSSASubscript(int ssa_reg) const { 494 return ssa_subscripts_->Get(ssa_reg); 495 } 496 497 RegLocation GetRawSrc(MIR* mir, int num) { 498 DCHECK(num < mir->ssa_rep->num_uses); 499 RegLocation res = reg_location_[mir->ssa_rep->uses[num]]; 500 return res; 501 } 502 503 RegLocation GetRawDest(MIR* mir) { 504 DCHECK_GT(mir->ssa_rep->num_defs, 0); 505 RegLocation res = reg_location_[mir->ssa_rep->defs[0]]; 506 return res; 507 } 508 509 RegLocation GetDest(MIR* mir) { 510 RegLocation res = GetRawDest(mir); 511 DCHECK(!res.wide); 512 return res; 513 } 514 515 RegLocation GetSrc(MIR* mir, int num) { 516 RegLocation res = GetRawSrc(mir, num); 517 DCHECK(!res.wide); 518 return res; 519 } 520 521 RegLocation GetDestWide(MIR* mir) { 522 RegLocation res = GetRawDest(mir); 523 DCHECK(res.wide); 524 return res; 525 } 526 527 RegLocation GetSrcWide(MIR* mir, int low) { 528 RegLocation res = GetRawSrc(mir, low); 529 DCHECK(res.wide); 530 return res; 531 } 532 533 RegLocation GetBadLoc() { 534 return bad_loc; 535 } 536 537 int GetMethodSReg() { 538 return method_sreg_; 539 } 540 541 bool MethodIsLeaf() { 542 return attributes_ & METHOD_IS_LEAF; 543 } 544 545 RegLocation GetRegLocation(int index) { 546 DCHECK((index >= 0) && (index > num_ssa_regs_)); 547 return reg_location_[index]; 548 } 549 550 RegLocation GetMethodLoc() { 551 return reg_location_[method_sreg_]; 552 } 553 554 bool IsSpecialCase() { 555 return special_case_ != kNoHandler; 556 } 557 558 SpecialCaseHandler GetSpecialCase() { 559 return special_case_; 560 } 561 562 void BasicBlockCombine(); 563 void CodeLayout(); 564 void DumpCheckStats(); 565 void PropagateConstants(); 566 MIR* FindMoveResult(BasicBlock* bb, MIR* mir); 567 int SRegToVReg(int ssa_reg) const; 568 void VerifyDataflow(); 569 void MethodUseCount(); 570 void SSATransformation(); 571 void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb); 572 void NullCheckElimination(); 573 bool SetFp(int index, bool is_fp); 574 bool SetCore(int index, bool is_core); 575 bool SetRef(int index, bool is_ref); 576 bool SetWide(int index, bool is_wide); 577 bool SetHigh(int index, bool is_high); 578 void AppendMIR(BasicBlock* bb, MIR* mir); 579 void PrependMIR(BasicBlock* bb, MIR* mir); 580 void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir); 581 char* GetDalvikDisassembly(const MIR* mir); 582 void ReplaceSpecialChars(std::string& str); 583 std::string GetSSAName(int ssa_reg); 584 std::string GetSSANameWithConst(int ssa_reg, bool singles_only); 585 void GetBlockName(BasicBlock* bb, char* name); 586 const char* GetShortyFromTargetIdx(int); 587 void DumpMIRGraph(); 588 CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range); 589 BasicBlock* NewMemBB(BBType block_type, int block_id); 590 591 /* 592 * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on 593 * we can verify that all catch entries have native PC entries. 594 */ 595 std::set<uint32_t> catches_; 596 597 // TODO: make these private. 598 RegLocation* reg_location_; // Map SSA names to location. 599 GrowableArray<CompilerTemp*> compiler_temps_; 600 SafeMap<unsigned int, unsigned int> block_id_map_; // Block collapse lookup cache. 601 602 static const int oat_data_flow_attributes_[kMirOpLast]; 603 static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst]; 604 static const uint32_t analysis_attributes_[kMirOpLast]; 605 606 private: 607 int FindCommonParent(int block1, int block2); 608 void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1, 609 const ArenaBitVector* src2); 610 void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v, 611 ArenaBitVector* live_in_v, int dalvik_reg_id); 612 void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id); 613 void CompilerInitializeSSAConversion(); 614 bool DoSSAConversion(BasicBlock* bb); 615 bool InvokeUsesMethodStar(MIR* mir); 616 int ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction); 617 bool ContentIsInsn(const uint16_t* code_ptr); 618 BasicBlock* SplitBlock(unsigned int code_offset, BasicBlock* orig_block, 619 BasicBlock** immed_pred_block_p); 620 BasicBlock* FindBlock(unsigned int code_offset, bool split, bool create, 621 BasicBlock** immed_pred_block_p); 622 void ProcessTryCatchBlocks(); 623 BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, 624 int flags, const uint16_t* code_ptr, const uint16_t* code_end); 625 void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags); 626 BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, 627 int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr, 628 const uint16_t* code_end); 629 int AddNewSReg(int v_reg); 630 void HandleSSAUse(int* uses, int dalvik_reg, int reg_index); 631 void HandleSSADef(int* defs, int dalvik_reg, int reg_index); 632 void DataFlowSSAFormat35C(MIR* mir); 633 void DataFlowSSAFormat3RC(MIR* mir); 634 bool FindLocalLiveIn(BasicBlock* bb); 635 void ClearAllVisitedFlags(); 636 bool CountUses(struct BasicBlock* bb); 637 bool InferTypeAndSize(BasicBlock* bb); 638 bool VerifyPredInfo(BasicBlock* bb); 639 BasicBlock* NeedsVisit(BasicBlock* bb); 640 BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb); 641 void MarkPreOrder(BasicBlock* bb); 642 void RecordDFSOrders(BasicBlock* bb); 643 void ComputeDFSOrders(); 644 void ComputeDefBlockMatrix(); 645 void ComputeDomPostOrderTraversal(BasicBlock* bb); 646 void ComputeDominators(); 647 void InsertPhiNodes(); 648 void DoDFSPreOrderSSARename(BasicBlock* block); 649 void SetConstant(int32_t ssa_reg, int value); 650 void SetConstantWide(int ssa_reg, int64_t value); 651 int GetSSAUseCount(int s_reg); 652 bool BasicBlockOpt(BasicBlock* bb); 653 bool EliminateNullChecks(BasicBlock* bb); 654 void NullCheckEliminationInit(BasicBlock* bb); 655 bool BuildExtendedBBList(struct BasicBlock* bb); 656 bool FillDefBlockMatrix(BasicBlock* bb); 657 void InitializeDominationInfo(BasicBlock* bb); 658 bool ComputeblockIDom(BasicBlock* bb); 659 bool ComputeBlockDominators(BasicBlock* bb); 660 bool SetDominators(BasicBlock* bb); 661 bool ComputeBlockLiveIns(BasicBlock* bb); 662 bool InsertPhiNodeOperands(BasicBlock* bb); 663 bool ComputeDominanceFrontier(BasicBlock* bb); 664 void DoConstantPropogation(BasicBlock* bb); 665 void CountChecks(BasicBlock* bb); 666 bool CombineBlocks(BasicBlock* bb); 667 void AnalyzeBlock(BasicBlock* bb, struct MethodStats* stats); 668 bool ComputeSkipCompilation(struct MethodStats* stats, bool skip_default); 669 670 CompilationUnit* const cu_; 671 GrowableArray<int>* ssa_base_vregs_; 672 GrowableArray<int>* ssa_subscripts_; 673 // Map original Dalvik virtual reg i to the current SSA name. 674 int* vreg_to_ssa_map_; // length == method->registers_size 675 int* ssa_last_defs_; // length == method->registers_size 676 ArenaBitVector* is_constant_v_; // length == num_ssa_reg 677 int* constant_values_; // length == num_ssa_reg 678 // Use counts of ssa names. 679 GrowableArray<uint32_t> use_counts_; // Weighted by nesting depth 680 GrowableArray<uint32_t> raw_use_counts_; // Not weighted 681 unsigned int num_reachable_blocks_; 682 GrowableArray<int>* dfs_order_; 683 GrowableArray<int>* dfs_post_order_; 684 GrowableArray<int>* dom_post_order_traversal_; 685 int* i_dom_list_; 686 ArenaBitVector** def_block_matrix_; // num_dalvik_register x num_blocks. 687 ArenaBitVector* temp_block_v_; 688 ArenaBitVector* temp_dalvik_register_v_; 689 ArenaBitVector* temp_ssa_register_v_; // num_ssa_regs. 690 static const int kInvalidEntry = -1; 691 GrowableArray<BasicBlock*> block_list_; 692 ArenaBitVector* try_block_addr_; 693 BasicBlock* entry_block_; 694 BasicBlock* exit_block_; 695 BasicBlock* cur_block_; 696 int num_blocks_; 697 const DexFile::CodeItem* current_code_item_; 698 SafeMap<unsigned int, BasicBlock*> block_map_; // FindBlock lookup cache. 699 std::vector<DexCompilationUnit*> m_units_; // List of methods included in this graph 700 typedef std::pair<int, int> MIRLocation; // Insert point, (m_unit_ index, offset) 701 std::vector<MIRLocation> method_stack_; // Include stack 702 int current_method_; 703 int current_offset_; 704 int def_count_; // Used to estimate size of ssa name storage. 705 int* opcode_count_; // Dex opcode coverage stats. 706 int num_ssa_regs_; // Number of names following SSA transformation. 707 std::vector<BasicBlock*> extended_basic_blocks_; // Heads of block "traces". 708 int method_sreg_; 709 unsigned int attributes_; 710 Checkstats* checkstats_; 711 SpecialCaseHandler special_case_; 712 ArenaAllocator* arena_; 713}; 714 715} // namespace art 716 717#endif // ART_COMPILER_DEX_MIR_GRAPH_H_ 718