mir_to_lir.h revision 2f244e9faccfcca68af3c5484c397a01a1c3a342
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 18#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 19 20#include "invoke_type.h" 21#include "compiled_method.h" 22#include "dex/compiler_enums.h" 23#include "dex/compiler_ir.h" 24#include "dex/reg_storage.h" 25#include "dex/backend.h" 26#include "driver/compiler_driver.h" 27#include "leb128.h" 28#include "safe_map.h" 29#include "utils/arena_allocator.h" 30#include "utils/growable_array.h" 31 32namespace art { 33 34/* 35 * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to 36 * add type safety (see runtime/offsets.h). 37 */ 38typedef uint32_t DexOffset; // Dex offset in code units. 39typedef uint16_t NarrowDexOffset; // For use in structs, Dex offsets range from 0 .. 0xffff. 40typedef uint32_t CodeOffset; // Native code offset in bytes. 41 42// Set to 1 to measure cost of suspend check. 43#define NO_SUSPEND 0 44 45#define IS_BINARY_OP (1ULL << kIsBinaryOp) 46#define IS_BRANCH (1ULL << kIsBranch) 47#define IS_IT (1ULL << kIsIT) 48#define IS_LOAD (1ULL << kMemLoad) 49#define IS_QUAD_OP (1ULL << kIsQuadOp) 50#define IS_QUIN_OP (1ULL << kIsQuinOp) 51#define IS_SEXTUPLE_OP (1ULL << kIsSextupleOp) 52#define IS_STORE (1ULL << kMemStore) 53#define IS_TERTIARY_OP (1ULL << kIsTertiaryOp) 54#define IS_UNARY_OP (1ULL << kIsUnaryOp) 55#define NEEDS_FIXUP (1ULL << kPCRelFixup) 56#define NO_OPERAND (1ULL << kNoOperand) 57#define REG_DEF0 (1ULL << kRegDef0) 58#define REG_DEF1 (1ULL << kRegDef1) 59#define REG_DEF2 (1ULL << kRegDef2) 60#define REG_DEFA (1ULL << kRegDefA) 61#define REG_DEFD (1ULL << kRegDefD) 62#define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0) 63#define REG_DEF_FPCS_LIST2 (1ULL << kRegDefFPCSList2) 64#define REG_DEF_LIST0 (1ULL << kRegDefList0) 65#define REG_DEF_LIST1 (1ULL << kRegDefList1) 66#define REG_DEF_LR (1ULL << kRegDefLR) 67#define REG_DEF_SP (1ULL << kRegDefSP) 68#define REG_USE0 (1ULL << kRegUse0) 69#define REG_USE1 (1ULL << kRegUse1) 70#define REG_USE2 (1ULL << kRegUse2) 71#define REG_USE3 (1ULL << kRegUse3) 72#define REG_USE4 (1ULL << kRegUse4) 73#define REG_USEA (1ULL << kRegUseA) 74#define REG_USEC (1ULL << kRegUseC) 75#define REG_USED (1ULL << kRegUseD) 76#define REG_USEB (1ULL << kRegUseB) 77#define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0) 78#define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2) 79#define REG_USE_LIST0 (1ULL << kRegUseList0) 80#define REG_USE_LIST1 (1ULL << kRegUseList1) 81#define REG_USE_LR (1ULL << kRegUseLR) 82#define REG_USE_PC (1ULL << kRegUsePC) 83#define REG_USE_SP (1ULL << kRegUseSP) 84#define SETS_CCODES (1ULL << kSetsCCodes) 85#define USES_CCODES (1ULL << kUsesCCodes) 86#define USE_FP_STACK (1ULL << kUseFpStack) 87#define REG_USE_LO (1ULL << kUseLo) 88#define REG_USE_HI (1ULL << kUseHi) 89#define REG_DEF_LO (1ULL << kDefLo) 90#define REG_DEF_HI (1ULL << kDefHi) 91 92// Common combo register usage patterns. 93#define REG_DEF01 (REG_DEF0 | REG_DEF1) 94#define REG_DEF012 (REG_DEF0 | REG_DEF1 | REG_DEF2) 95#define REG_DEF01_USE2 (REG_DEF0 | REG_DEF1 | REG_USE2) 96#define REG_DEF0_USE01 (REG_DEF0 | REG_USE01) 97#define REG_DEF0_USE0 (REG_DEF0 | REG_USE0) 98#define REG_DEF0_USE12 (REG_DEF0 | REG_USE12) 99#define REG_DEF0_USE123 (REG_DEF0 | REG_USE123) 100#define REG_DEF0_USE1 (REG_DEF0 | REG_USE1) 101#define REG_DEF0_USE2 (REG_DEF0 | REG_USE2) 102#define REG_DEFAD_USEAD (REG_DEFAD_USEA | REG_USED) 103#define REG_DEFAD_USEA (REG_DEFA_USEA | REG_DEFD) 104#define REG_DEFA_USEA (REG_DEFA | REG_USEA) 105#define REG_USE012 (REG_USE01 | REG_USE2) 106#define REG_USE014 (REG_USE01 | REG_USE4) 107#define REG_USE01 (REG_USE0 | REG_USE1) 108#define REG_USE02 (REG_USE0 | REG_USE2) 109#define REG_USE12 (REG_USE1 | REG_USE2) 110#define REG_USE23 (REG_USE2 | REG_USE3) 111#define REG_USE123 (REG_USE1 | REG_USE2 | REG_USE3) 112 113// TODO: #includes need a cleanup 114#ifndef INVALID_SREG 115#define INVALID_SREG (-1) 116#endif 117 118struct BasicBlock; 119struct CallInfo; 120struct CompilationUnit; 121struct InlineMethod; 122struct MIR; 123struct LIR; 124struct RegLocation; 125struct RegisterInfo; 126class DexFileMethodInliner; 127class MIRGraph; 128class Mir2Lir; 129 130typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, 131 const MethodReference& target_method, 132 uint32_t method_idx, uintptr_t direct_code, 133 uintptr_t direct_method, InvokeType type); 134 135typedef std::vector<uint8_t> CodeBuffer; 136 137struct UseDefMasks { 138 uint64_t use_mask; // Resource mask for use. 139 uint64_t def_mask; // Resource mask for def. 140}; 141 142struct AssemblyInfo { 143 LIR* pcrel_next; // Chain of LIR nodes needing pc relative fixups. 144}; 145 146struct LIR { 147 CodeOffset offset; // Offset of this instruction. 148 NarrowDexOffset dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words). 149 int16_t opcode; 150 LIR* next; 151 LIR* prev; 152 LIR* target; 153 struct { 154 unsigned int alias_info:17; // For Dalvik register disambiguation. 155 bool is_nop:1; // LIR is optimized away. 156 unsigned int size:4; // Note: size of encoded instruction is in bytes. 157 bool use_def_invalid:1; // If true, masks should not be used. 158 unsigned int generation:1; // Used to track visitation state during fixup pass. 159 unsigned int fixup:8; // Fixup kind. 160 } flags; 161 union { 162 UseDefMasks m; // Use & Def masks used during optimization. 163 AssemblyInfo a; // Instruction info used during assembly phase. 164 } u; 165 int32_t operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]. 166}; 167 168// Target-specific initialization. 169Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 170 ArenaAllocator* const arena); 171Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 172 ArenaAllocator* const arena); 173Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 174 ArenaAllocator* const arena); 175Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 176 ArenaAllocator* const arena); 177 178// Utility macros to traverse the LIR list. 179#define NEXT_LIR(lir) (lir->next) 180#define PREV_LIR(lir) (lir->prev) 181 182// Defines for alias_info (tracks Dalvik register references). 183#define DECODE_ALIAS_INFO_REG(X) (X & 0xffff) 184#define DECODE_ALIAS_INFO_WIDE_FLAG (0x10000) 185#define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0) 186#define ENCODE_ALIAS_INFO(REG, ISWIDE) (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0)) 187 188// Common resource macros. 189#define ENCODE_CCODE (1ULL << kCCode) 190#define ENCODE_FP_STATUS (1ULL << kFPStatus) 191 192// Abstract memory locations. 193#define ENCODE_DALVIK_REG (1ULL << kDalvikReg) 194#define ENCODE_LITERAL (1ULL << kLiteral) 195#define ENCODE_HEAP_REF (1ULL << kHeapRef) 196#define ENCODE_MUST_NOT_ALIAS (1ULL << kMustNotAlias) 197 198#define ENCODE_ALL (~0ULL) 199#define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \ 200 ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS) 201 202#define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8)) 203#define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \ 204 do { \ 205 low_reg = both_regs & 0xff; \ 206 high_reg = (both_regs >> 8) & 0xff; \ 207 } while (false) 208 209// Mask to denote sreg as the start of a double. Must not interfere with low 16 bits. 210#define STARTING_DOUBLE_SREG 0x10000 211 212// TODO: replace these macros 213#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath)) 214#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath)) 215#define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath)) 216#define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath)) 217#define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath)) 218 219class Mir2Lir : public Backend { 220 public: 221 /* 222 * Auxiliary information describing the location of data embedded in the Dalvik 223 * byte code stream. 224 */ 225 struct EmbeddedData { 226 CodeOffset offset; // Code offset of data block. 227 const uint16_t* table; // Original dex data. 228 DexOffset vaddr; // Dalvik offset of parent opcode. 229 }; 230 231 struct FillArrayData : EmbeddedData { 232 int32_t size; 233 }; 234 235 struct SwitchTable : EmbeddedData { 236 LIR* anchor; // Reference instruction for relative offsets. 237 LIR** targets; // Array of case targets. 238 }; 239 240 /* Static register use counts */ 241 struct RefCounts { 242 int count; 243 int s_reg; 244 }; 245 246 /* 247 * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits) 248 * and native register storage. The primary purpose is to reuse previuosly 249 * loaded values, if possible, and otherwise to keep the value in register 250 * storage as long as possible. 251 * 252 * NOTE 1: wide_value refers to the width of the Dalvik value contained in 253 * this register (or pair). For example, a 64-bit register containing a 32-bit 254 * Dalvik value would have wide_value==false even though the storage container itself 255 * is wide. Similarly, a 32-bit register containing half of a 64-bit Dalvik value 256 * would have wide_value==true (and additionally would have its partner field set to the 257 * other half whose wide_value field would also be true. 258 * 259 * NOTE 2: In the case of a register pair, you can determine which of the partners 260 * is the low half by looking at the s_reg names. The high s_reg will equal low_sreg + 1. 261 * 262 * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value 263 * will be true and partner==self. s_reg refers to the low-order word of the Dalvik 264 * value, and the s_reg of the high word is implied (s_reg + 1). 265 * 266 * NOTE 4: The reg and is_temp fields should always be correct. If is_temp is false no 267 * other fields have meaning. [perhaps not true, wide should work for promoted regs?] 268 * If is_temp==true and live==false, no other fields have 269 * meaning. If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start 270 * and def_end describe the relationship between the temp register/register pair and 271 * the Dalvik value[s] described by s_reg/s_reg+1. 272 * 273 * The fields used_storage, master_storage and storage_mask are used to track allocation 274 * in light of potential aliasing. For example, consider Arm's d2, which overlaps s4 & s5. 275 * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of 276 * storage use. For s4, it would be 0x0000001; for s5 0x00000002. These values should not 277 * change once initialized. The "used_storage" field tracks current allocation status. 278 * Although each record contains this field, only the field from the largest member of 279 * an aliased group is used. In our case, it would be d2's. The master_storage pointer 280 * of d2, s4 and s5 would all point to d2's used_storage field. Each bit in a used_storage 281 * represents 32 bits of storage. d2's used_storage would be initialized to 0xfffffffc. 282 * Then, if we wanted to determine whether s4 could be allocated, we would "and" 283 * s4's storage_mask with s4's *master_storage. If the result is zero, s4 is free and 284 * to allocate: *master_storage |= storage_mask. To free, *master_storage &= ~storage_mask. 285 * 286 * For an X86 vector register example, storage_mask would be: 287 * 0x00000001 for 32-bit view of xmm1 288 * 0x00000003 for 64-bit view of xmm1 289 * 0x0000000f for 128-bit view of xmm1 290 * 0x000000ff for 256-bit view of ymm1 // future expansion, if needed 291 * 0x0000ffff for 512-bit view of ymm1 // future expansion, if needed 292 * 0xffffffff for 1024-bit view of ymm1 // future expansion, if needed 293 * 294 * The "liveness" of a register is handled in a similar way. The liveness_ storage is 295 * held in the widest member of an aliased set. Note, though, that for a temp register to 296 * reused as live, it must both be marked live and the associated SReg() must match the 297 * desired s_reg. This gets a little complicated when dealing with aliased registers. All 298 * members of an aliased set will share the same liveness flags, but each will individually 299 * maintain s_reg_. In this way we can know that at least one member of an 300 * aliased set is live, but will only fully match on the appropriate alias view. For example, 301 * if Arm d1 is live as a double and has s_reg_ set to Dalvik v8 (which also implies v9 302 * because it is wide), its aliases s2 and s3 will show as live, but will have 303 * s_reg_ == INVALID_SREG. An attempt to later AllocLiveReg() of v9 with a single-precision 304 * view will fail because although s3's liveness bit is set, its s_reg_ will not match v9. 305 * This will cause all members of the aliased set to be clobbered and AllocLiveReg() will 306 * report that v9 is currently not live as a single (which is what we want). 307 * 308 * NOTE: the x86 usage is still somewhat in flux. There are competing notions of how 309 * to treat xmm registers: 310 * 1. Treat them all as 128-bits wide, but denote how much data used via bytes field. 311 * o This more closely matches reality, but means you'd need to be able to get 312 * to the associated RegisterInfo struct to figure out how it's being used. 313 * o This is how 64-bit core registers will be used - always 64 bits, but the 314 * "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage. 315 * 2. View the xmm registers based on contents. 316 * o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would 317 * be a k64BitVector. 318 * o Note that the two uses above would be considered distinct registers (but with 319 * the aliasing mechanism, we could detect interference). 320 * o This is how aliased double and single float registers will be handled on 321 * Arm and MIPS. 322 * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and 323 * mechanism 2 for aliased float registers and x86 vector registers. 324 */ 325 class RegisterInfo { 326 public: 327 RegisterInfo(RegStorage r, uint64_t mask = ENCODE_ALL); 328 ~RegisterInfo() {} 329 static void* operator new(size_t size, ArenaAllocator* arena) { 330 return arena->Alloc(size, kArenaAllocRegAlloc); 331 } 332 333 bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; } 334 void MarkInUse() { master_->used_storage_ |= storage_mask_; } 335 void MarkFree() { master_->used_storage_ &= ~storage_mask_; } 336 // No part of the containing storage is live in this view. 337 bool IsDead() { return (master_->liveness_ & storage_mask_) == 0; } 338 // Liveness of this view matches. Note: not equivalent to !IsDead(). 339 bool IsLive() { return (master_->liveness_ & storage_mask_) == storage_mask_; } 340 void MarkLive() { master_->liveness_ |= storage_mask_; } 341 void MarkDead() { 342 master_->liveness_ &= ~storage_mask_; 343 SetSReg(INVALID_SREG); 344 } 345 RegStorage GetReg() { return reg_; } 346 void SetReg(RegStorage reg) { reg_ = reg; } 347 bool IsTemp() { return is_temp_; } 348 void SetIsTemp(bool val) { is_temp_ = val; } 349 bool IsWide() { return wide_value_; } 350 void SetIsWide(bool val) { wide_value_ = val; } 351 bool IsDirty() { return dirty_; } 352 void SetIsDirty(bool val) { dirty_ = val; } 353 RegStorage Partner() { return partner_; } 354 void SetPartner(RegStorage partner) { partner_ = partner; } 355 int SReg() { return s_reg_; } 356 void SetSReg(int s_reg) { s_reg_ = s_reg; } 357 uint64_t DefUseMask() { return def_use_mask_; } 358 void SetDefUseMask(uint64_t def_use_mask) { def_use_mask_ = def_use_mask; } 359 RegisterInfo* Master() { return master_; } 360 void SetMaster(RegisterInfo* master) { 361 master_ = master; 362 if (master != this) { 363 master_->aliased_ = true; 364 DCHECK(alias_chain_ == nullptr); 365 alias_chain_ = master_->alias_chain_; 366 master_->alias_chain_ = this; 367 } 368 } 369 bool IsAliased() { return aliased_; } 370 RegisterInfo* GetAliasChain() { return alias_chain_; } 371 uint32_t StorageMask() { return storage_mask_; } 372 void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; } 373 LIR* DefStart() { return def_start_; } 374 void SetDefStart(LIR* def_start) { def_start_ = def_start; } 375 LIR* DefEnd() { return def_end_; } 376 void SetDefEnd(LIR* def_end) { def_end_ = def_end; } 377 void ResetDefBody() { def_start_ = def_end_ = nullptr; } 378 379 380 private: 381 RegStorage reg_; 382 bool is_temp_; // Can allocate as temp? 383 bool wide_value_; // Holds a Dalvik wide value (either itself, or part of a pair). 384 bool dirty_; // If live, is it dirty? 385 bool aliased_; // Is this the master for other aliased RegisterInfo's? 386 RegStorage partner_; // If wide_value, other reg of pair or self if 64-bit register. 387 int s_reg_; // Name of live value. 388 uint64_t def_use_mask_; // Resources for this element. 389 uint32_t used_storage_; // 1 bit per 4 bytes of storage. Unused by aliases. 390 uint32_t liveness_; // 1 bit per 4 bytes of storage. Unused by aliases. 391 RegisterInfo* master_; // Pointer to controlling storage mask. 392 uint32_t storage_mask_; // Track allocation of sub-units. 393 LIR *def_start_; // Starting inst in last def sequence. 394 LIR *def_end_; // Ending inst in last def sequence. 395 RegisterInfo* alias_chain_; // Chain of aliased registers. 396 }; 397 398 class RegisterPool { 399 public: 400 RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena, const std::vector<RegStorage>& core_regs, 401 const std::vector<RegStorage>& sp_regs, const std::vector<RegStorage>& dp_regs, 402 const std::vector<RegStorage>& reserved_regs, 403 const std::vector<RegStorage>& core_temps, 404 const std::vector<RegStorage>& sp_temps, 405 const std::vector<RegStorage>& dp_temps); 406 ~RegisterPool() {} 407 static void* operator new(size_t size, ArenaAllocator* arena) { 408 return arena->Alloc(size, kArenaAllocRegAlloc); 409 } 410 void ResetNextTemp() { 411 next_core_reg_ = 0; 412 next_sp_reg_ = 0; 413 next_dp_reg_ = 0; 414 } 415 GrowableArray<RegisterInfo*> core_regs_; 416 int next_core_reg_; 417 GrowableArray<RegisterInfo*> sp_regs_; // Single precision float. 418 int next_sp_reg_; 419 GrowableArray<RegisterInfo*> dp_regs_; // Double precision float. 420 int next_dp_reg_; 421 422 private: 423 Mir2Lir* const m2l_; 424 }; 425 426 struct PromotionMap { 427 RegLocationType core_location:3; 428 uint8_t core_reg; 429 RegLocationType fp_location:3; 430 uint8_t FpReg; 431 bool first_in_pair; 432 }; 433 434 // 435 // Slow paths. This object is used generate a sequence of code that is executed in the 436 // slow path. For example, resolving a string or class is slow as it will only be executed 437 // once (after that it is resolved and doesn't need to be done again). We want slow paths 438 // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward 439 // branch over them. 440 // 441 // If you want to create a slow path, declare a class derived from LIRSlowPath and provide 442 // the Compile() function that will be called near the end of the code generated by the 443 // method. 444 // 445 // The basic flow for a slow path is: 446 // 447 // CMP reg, #value 448 // BEQ fromfast 449 // cont: 450 // ... 451 // fast path code 452 // ... 453 // more code 454 // ... 455 // RETURN 456 /// 457 // fromfast: 458 // ... 459 // slow path code 460 // ... 461 // B cont 462 // 463 // So you see we need two labels and two branches. The first branch (called fromfast) is 464 // the conditional branch to the slow path code. The second label (called cont) is used 465 // as an unconditional branch target for getting back to the code after the slow path 466 // has completed. 467 // 468 469 class LIRSlowPath { 470 public: 471 LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast, 472 LIR* cont = nullptr) : 473 m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) { 474 } 475 virtual ~LIRSlowPath() {} 476 virtual void Compile() = 0; 477 478 static void* operator new(size_t size, ArenaAllocator* arena) { 479 return arena->Alloc(size, kArenaAllocData); 480 } 481 482 protected: 483 LIR* GenerateTargetLabel(int opcode = kPseudoTargetLabel); 484 485 Mir2Lir* const m2l_; 486 CompilationUnit* const cu_; 487 const DexOffset current_dex_pc_; 488 LIR* const fromfast_; 489 LIR* const cont_; 490 }; 491 492 virtual ~Mir2Lir() {} 493 494 int32_t s4FromSwitchData(const void* switch_data) { 495 return *reinterpret_cast<const int32_t*>(switch_data); 496 } 497 498 /* 499 * TODO: this is a trace JIT vestige, and its use should be reconsidered. At the time 500 * it was introduced, it was intended to be a quick best guess of type without having to 501 * take the time to do type analysis. Currently, though, we have a much better idea of 502 * the types of Dalvik virtual registers. Instead of using this for a best guess, why not 503 * just use our knowledge of type to select the most appropriate register class? 504 */ 505 RegisterClass RegClassBySize(OpSize size) { 506 return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || 507 size == kSignedByte) ? kCoreReg : kAnyReg; 508 } 509 510 size_t CodeBufferSizeInBytes() { 511 return code_buffer_.size() / sizeof(code_buffer_[0]); 512 } 513 514 static bool IsPseudoLirOp(int opcode) { 515 return (opcode < 0); 516 } 517 518 /* 519 * LIR operands are 32-bit integers. Sometimes, (especially for managing 520 * instructions which require PC-relative fixups), we need the operands to carry 521 * pointers. To do this, we assign these pointers an index in pointer_storage_, and 522 * hold that index in the operand array. 523 * TUNING: If use of these utilities becomes more common on 32-bit builds, it 524 * may be worth conditionally-compiling a set of identity functions here. 525 */ 526 uint32_t WrapPointer(void* pointer) { 527 uint32_t res = pointer_storage_.Size(); 528 pointer_storage_.Insert(pointer); 529 return res; 530 } 531 532 void* UnwrapPointer(size_t index) { 533 return pointer_storage_.Get(index); 534 } 535 536 // strdup(), but allocates from the arena. 537 char* ArenaStrdup(const char* str) { 538 size_t len = strlen(str) + 1; 539 char* res = reinterpret_cast<char*>(arena_->Alloc(len, kArenaAllocMisc)); 540 if (res != NULL) { 541 strncpy(res, str, len); 542 } 543 return res; 544 } 545 546 // Shared by all targets - implemented in codegen_util.cc 547 void AppendLIR(LIR* lir); 548 void InsertLIRBefore(LIR* current_lir, LIR* new_lir); 549 void InsertLIRAfter(LIR* current_lir, LIR* new_lir); 550 551 /** 552 * @brief Provides the maximum number of compiler temporaries that the backend can/wants 553 * to place in a frame. 554 * @return Returns the maximum number of compiler temporaries. 555 */ 556 size_t GetMaxPossibleCompilerTemps() const; 557 558 /** 559 * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries. 560 * @return Returns the size in bytes for space needed for compiler temporary spill region. 561 */ 562 size_t GetNumBytesForCompilerTempSpillRegion(); 563 564 DexOffset GetCurrentDexPc() const { 565 return current_dalvik_offset_; 566 } 567 568 int ComputeFrameSize(); 569 virtual void Materialize(); 570 virtual CompiledMethod* GetCompiledMethod(); 571 void MarkSafepointPC(LIR* inst); 572 void SetupResourceMasks(LIR* lir); 573 void SetMemRefType(LIR* lir, bool is_load, int mem_type); 574 void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit); 575 void SetupRegMask(uint64_t* mask, int reg); 576 void DumpLIRInsn(LIR* arg, unsigned char* base_addr); 577 void DumpPromotionMap(); 578 void CodegenDump(); 579 LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0, 580 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL); 581 LIR* NewLIR0(int opcode); 582 LIR* NewLIR1(int opcode, int dest); 583 LIR* NewLIR2(int opcode, int dest, int src1); 584 LIR* NewLIR2NoDest(int opcode, int src, int info); 585 LIR* NewLIR3(int opcode, int dest, int src1, int src2); 586 LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info); 587 LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2); 588 LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta); 589 LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi); 590 LIR* AddWordData(LIR* *constant_list_p, int value); 591 LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi); 592 void ProcessSwitchTables(); 593 void DumpSparseSwitchTable(const uint16_t* table); 594 void DumpPackedSwitchTable(const uint16_t* table); 595 void MarkBoundary(DexOffset offset, const char* inst_str); 596 void NopLIR(LIR* lir); 597 void UnlinkLIR(LIR* lir); 598 bool EvaluateBranch(Instruction::Code opcode, int src1, int src2); 599 bool IsInexpensiveConstant(RegLocation rl_src); 600 ConditionCode FlipComparisonOrder(ConditionCode before); 601 ConditionCode NegateComparison(ConditionCode before); 602 virtual void InstallLiteralPools(); 603 void InstallSwitchTables(); 604 void InstallFillArrayData(); 605 bool VerifyCatchEntries(); 606 void CreateMappingTables(); 607 void CreateNativeGcMap(); 608 int AssignLiteralOffset(CodeOffset offset); 609 int AssignSwitchTablesOffset(CodeOffset offset); 610 int AssignFillArrayDataOffset(CodeOffset offset); 611 LIR* InsertCaseLabel(DexOffset vaddr, int keyVal); 612 void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec); 613 void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec); 614 // Handle bookkeeping to convert a wide RegLocation to a narow RegLocation. No code generated. 615 RegLocation NarrowRegLoc(RegLocation loc); 616 617 // Shared by all targets - implemented in local_optimizations.cc 618 void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src); 619 void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir); 620 void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir); 621 void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir); 622 623 // Shared by all targets - implemented in ralloc_util.cc 624 int GetSRegHi(int lowSreg); 625 bool LiveOut(int s_reg); 626 void SimpleRegAlloc(); 627 void ResetRegPool(); 628 void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num); 629 void DumpRegPool(GrowableArray<RegisterInfo*>* regs); 630 void DumpCoreRegPool(); 631 void DumpFpRegPool(); 632 void DumpRegPools(); 633 /* Mark a temp register as dead. Does not affect allocation state. */ 634 void Clobber(RegStorage reg); 635 void ClobberSReg(int s_reg); 636 void ClobberAliases(RegisterInfo* info); 637 int SRegToPMap(int s_reg); 638 void RecordCorePromotion(RegStorage reg, int s_reg); 639 RegStorage AllocPreservedCoreReg(int s_reg); 640 void RecordSinglePromotion(RegStorage reg, int s_reg); 641 void RecordDoublePromotion(RegStorage reg, int s_reg); 642 RegStorage AllocPreservedSingle(int s_reg); 643 virtual RegStorage AllocPreservedDouble(int s_reg); 644 RegStorage AllocTempBody(GrowableArray<RegisterInfo*> ®s, int* next_temp, bool required); 645 RegStorage AllocFreeTemp(); 646 RegStorage AllocTemp(); 647 RegStorage AllocTempSingle(); 648 RegStorage AllocTempDouble(); 649 void FlushReg(RegStorage reg); 650 void FlushRegWide(RegStorage reg); 651 RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide); 652 RegStorage FindLiveReg(GrowableArray<RegisterInfo*> ®s, int s_reg); 653 void FreeTemp(RegStorage reg); 654 bool IsLive(RegStorage reg); 655 bool IsTemp(RegStorage reg); 656 bool IsPromoted(RegStorage reg); 657 bool IsDirty(RegStorage reg); 658 void LockTemp(RegStorage reg); 659 void ResetDef(RegStorage reg); 660 void NullifyRange(RegStorage reg, int s_reg); 661 void MarkDef(RegLocation rl, LIR *start, LIR *finish); 662 void MarkDefWide(RegLocation rl, LIR *start, LIR *finish); 663 RegLocation WideToNarrow(RegLocation rl); 664 void ResetDefLoc(RegLocation rl); 665 void ResetDefLocWide(RegLocation rl); 666 void ResetDefTracking(); 667 void ClobberAllTemps(); 668 void FlushSpecificReg(RegisterInfo* info); 669 void FlushAllRegs(); 670 bool RegClassMatches(int reg_class, RegStorage reg); 671 void MarkLive(RegLocation loc); 672 void MarkLiveReg(RegStorage reg, int s_reg); 673 void MarkTemp(RegStorage reg); 674 void UnmarkTemp(RegStorage reg); 675 void MarkWide(RegStorage reg); 676 void MarkClean(RegLocation loc); 677 void MarkDirty(RegLocation loc); 678 void MarkInUse(RegStorage reg); 679 bool CheckCorePoolSanity(); 680 RegLocation UpdateLoc(RegLocation loc); 681 RegLocation UpdateLocWide(RegLocation loc); 682 RegLocation UpdateRawLoc(RegLocation loc); 683 684 /** 685 * @brief Used to prepare a register location to receive a wide value. 686 * @see EvalLoc 687 * @param loc the location where the value will be stored. 688 * @param reg_class Type of register needed. 689 * @param update Whether the liveness information should be updated. 690 * @return Returns the properly typed temporary in physical register pairs. 691 */ 692 RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update); 693 694 /** 695 * @brief Used to prepare a register location to receive a value. 696 * @param loc the location where the value will be stored. 697 * @param reg_class Type of register needed. 698 * @param update Whether the liveness information should be updated. 699 * @return Returns the properly typed temporary in physical register. 700 */ 701 RegLocation EvalLoc(RegLocation loc, int reg_class, bool update); 702 703 void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs); 704 void DumpCounts(const RefCounts* arr, int size, const char* msg); 705 void DoPromotion(); 706 int VRegOffset(int v_reg); 707 int SRegOffset(int s_reg); 708 RegLocation GetReturnWide(bool is_double); 709 RegLocation GetReturn(bool is_float); 710 RegisterInfo* GetRegInfo(RegStorage reg); 711 712 // Shared by all targets - implemented in gen_common.cc. 713 void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr); 714 bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 715 RegLocation rl_src, RegLocation rl_dest, int lit); 716 bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit); 717 void HandleSlowPaths(); 718 void GenBarrier(); 719 void GenDivZeroException(); 720 // c_code holds condition code that's generated from testing divisor against 0. 721 void GenDivZeroCheck(ConditionCode c_code); 722 // reg holds divisor. 723 void GenDivZeroCheck(RegStorage reg); 724 void GenArrayBoundsCheck(RegStorage index, RegStorage length); 725 void GenArrayBoundsCheck(int32_t index, RegStorage length); 726 LIR* GenNullCheck(RegStorage reg); 727 void MarkPossibleNullPointerException(int opt_flags); 728 void MarkPossibleStackOverflowException(); 729 void ForceImplicitNullCheck(RegStorage reg, int opt_flags); 730 LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind); 731 LIR* GenNullCheck(RegStorage m_reg, int opt_flags); 732 LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags); 733 void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 734 RegLocation rl_src2, LIR* taken, LIR* fall_through); 735 void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, 736 LIR* taken, LIR* fall_through); 737 void GenIntToLong(RegLocation rl_dest, RegLocation rl_src); 738 void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 739 RegLocation rl_src); 740 void GenNewArray(uint32_t type_idx, RegLocation rl_dest, 741 RegLocation rl_src); 742 void GenFilledNewArray(CallInfo* info); 743 void GenSput(MIR* mir, RegLocation rl_src, 744 bool is_long_or_double, bool is_object); 745 void GenSget(MIR* mir, RegLocation rl_dest, 746 bool is_long_or_double, bool is_object); 747 void GenIGet(MIR* mir, int opt_flags, OpSize size, 748 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object); 749 void GenIPut(MIR* mir, int opt_flags, OpSize size, 750 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object); 751 void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 752 RegLocation rl_src); 753 754 void GenConstClass(uint32_t type_idx, RegLocation rl_dest); 755 void GenConstString(uint32_t string_idx, RegLocation rl_dest); 756 void GenNewInstance(uint32_t type_idx, RegLocation rl_dest); 757 void GenThrow(RegLocation rl_src); 758 void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); 759 void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src); 760 void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 761 RegLocation rl_src1, RegLocation rl_src2); 762 void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 763 RegLocation rl_src1, RegLocation rl_shift); 764 void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, 765 RegLocation rl_src, int lit); 766 void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 767 RegLocation rl_src1, RegLocation rl_src2); 768 template <size_t pointer_size> 769 void GenConversionCall(ThreadOffset<pointer_size> func_offset, RegLocation rl_dest, 770 RegLocation rl_src); 771 void GenSuspendTest(int opt_flags); 772 void GenSuspendTestAndBranch(int opt_flags, LIR* target); 773 774 // This will be overridden by x86 implementation. 775 virtual void GenConstWide(RegLocation rl_dest, int64_t value); 776 virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 777 RegLocation rl_src1, RegLocation rl_src2); 778 779 // Shared by all targets - implemented in gen_invoke.cc. 780 template <size_t pointer_size> 781 LIR* CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset, bool safepoint_pc, 782 bool use_link = true); 783 RegStorage CallHelperSetup(ThreadOffset<4> helper_offset); 784 RegStorage CallHelperSetup(ThreadOffset<8> helper_offset); 785 template <size_t pointer_size> 786 void CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc); 787 template <size_t pointer_size> 788 void CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc); 789 template <size_t pointer_size> 790 void CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc); 791 template <size_t pointer_size> 792 void CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset, RegLocation arg0, 793 bool safepoint_pc); 794 template <size_t pointer_size> 795 void CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1, 796 bool safepoint_pc); 797 template <size_t pointer_size> 798 void CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0, 799 RegLocation arg1, bool safepoint_pc); 800 template <size_t pointer_size> 801 void CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset, RegLocation arg0, 802 int arg1, bool safepoint_pc); 803 template <size_t pointer_size> 804 void CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0, RegStorage arg1, 805 bool safepoint_pc); 806 template <size_t pointer_size> 807 void CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, int arg1, 808 bool safepoint_pc); 809 template <size_t pointer_size> 810 void CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0, 811 bool safepoint_pc); 812 template <size_t pointer_size> 813 void CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, 814 bool safepoint_pc); 815 template <size_t pointer_size> 816 void CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset, 817 RegStorage arg0, RegLocation arg2, bool safepoint_pc); 818 template <size_t pointer_size> 819 void CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, 820 RegLocation arg0, RegLocation arg1, 821 bool safepoint_pc); 822 template <size_t pointer_size> 823 void CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, 824 RegStorage arg1, bool safepoint_pc); 825 template <size_t pointer_size> 826 void CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, 827 RegStorage arg1, int arg2, bool safepoint_pc); 828 template <size_t pointer_size> 829 void CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0, 830 RegLocation arg2, bool safepoint_pc); 831 template <size_t pointer_size> 832 void CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg2, 833 bool safepoint_pc); 834 template <size_t pointer_size> 835 void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, 836 int arg0, RegLocation arg1, RegLocation arg2, 837 bool safepoint_pc); 838 template <size_t pointer_size> 839 void CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, 840 RegLocation arg0, RegLocation arg1, 841 RegLocation arg2, 842 bool safepoint_pc); 843 void GenInvoke(CallInfo* info); 844 void GenInvokeNoInline(CallInfo* info); 845 virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method); 846 int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel, 847 NextCallInsn next_call_insn, 848 const MethodReference& target_method, 849 uint32_t vtable_idx, 850 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 851 bool skip_this); 852 int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel, 853 NextCallInsn next_call_insn, 854 const MethodReference& target_method, 855 uint32_t vtable_idx, 856 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 857 bool skip_this); 858 859 /** 860 * @brief Used to determine the register location of destination. 861 * @details This is needed during generation of inline intrinsics because it finds destination 862 * of return, 863 * either the physical register or the target of move-result. 864 * @param info Information about the invoke. 865 * @return Returns the destination location. 866 */ 867 RegLocation InlineTarget(CallInfo* info); 868 869 /** 870 * @brief Used to determine the wide register location of destination. 871 * @see InlineTarget 872 * @param info Information about the invoke. 873 * @return Returns the destination location. 874 */ 875 RegLocation InlineTargetWide(CallInfo* info); 876 877 bool GenInlinedCharAt(CallInfo* info); 878 bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty); 879 bool GenInlinedReverseBytes(CallInfo* info, OpSize size); 880 bool GenInlinedAbsInt(CallInfo* info); 881 bool GenInlinedAbsLong(CallInfo* info); 882 bool GenInlinedAbsFloat(CallInfo* info); 883 bool GenInlinedAbsDouble(CallInfo* info); 884 bool GenInlinedFloatCvt(CallInfo* info); 885 bool GenInlinedDoubleCvt(CallInfo* info); 886 virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based); 887 bool GenInlinedStringCompareTo(CallInfo* info); 888 bool GenInlinedCurrentThread(CallInfo* info); 889 bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile); 890 bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object, 891 bool is_volatile, bool is_ordered); 892 virtual int LoadArgRegs(CallInfo* info, int call_state, 893 NextCallInsn next_call_insn, 894 const MethodReference& target_method, 895 uint32_t vtable_idx, 896 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 897 bool skip_this); 898 899 // Shared by all targets - implemented in gen_loadstore.cc. 900 RegLocation LoadCurrMethod(); 901 void LoadCurrMethodDirect(RegStorage r_tgt); 902 LIR* LoadConstant(RegStorage r_dest, int value); 903 // Natural word size. 904 LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { 905 return LoadBaseDisp(r_base, displacement, r_dest, kWord); 906 } 907 // Load 32 bits, regardless of target. 908 LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) { 909 return LoadBaseDisp(r_base, displacement, r_dest, k32); 910 } 911 // Load a reference at base + displacement and decompress into register. 912 LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest) { 913 return LoadBaseDisp(r_base, displacement, r_dest, kReference); 914 } 915 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 916 RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); 917 // Load Dalvik value with 64-bit memory storage. 918 RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind); 919 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 920 void LoadValueDirect(RegLocation rl_src, RegStorage r_dest); 921 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 922 void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest); 923 // Load Dalvik value with 64-bit memory storage. 924 void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest); 925 // Load Dalvik value with 64-bit memory storage. 926 void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest); 927 // Store an item of natural word size. 928 LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) { 929 return StoreBaseDisp(r_base, displacement, r_src, kWord); 930 } 931 // Store an uncompressed reference into a compressed 32-bit container. 932 LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src) { 933 return StoreBaseDisp(r_base, displacement, r_src, kReference); 934 } 935 // Store 32 bits, regardless of target. 936 LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) { 937 return StoreBaseDisp(r_base, displacement, r_src, k32); 938 } 939 940 /** 941 * @brief Used to do the final store in the destination as per bytecode semantics. 942 * @param rl_dest The destination dalvik register location. 943 * @param rl_src The source register location. Can be either physical register or dalvik register. 944 */ 945 void StoreValue(RegLocation rl_dest, RegLocation rl_src); 946 947 /** 948 * @brief Used to do the final store in a wide destination as per bytecode semantics. 949 * @see StoreValue 950 * @param rl_dest The destination dalvik register location. 951 * @param rl_src The source register location. Can be either physical register or dalvik 952 * register. 953 */ 954 void StoreValueWide(RegLocation rl_dest, RegLocation rl_src); 955 956 /** 957 * @brief Used to do the final store to a destination as per bytecode semantics. 958 * @see StoreValue 959 * @param rl_dest The destination dalvik register location. 960 * @param rl_src The source register location. It must be kLocPhysReg 961 * 962 * This is used for x86 two operand computations, where we have computed the correct 963 * register value that now needs to be properly registered. This is used to avoid an 964 * extra register copy that would result if StoreValue was called. 965 */ 966 void StoreFinalValue(RegLocation rl_dest, RegLocation rl_src); 967 968 /** 969 * @brief Used to do the final store in a wide destination as per bytecode semantics. 970 * @see StoreValueWide 971 * @param rl_dest The destination dalvik register location. 972 * @param rl_src The source register location. It must be kLocPhysReg 973 * 974 * This is used for x86 two operand computations, where we have computed the correct 975 * register values that now need to be properly registered. This is used to avoid an 976 * extra pair of register copies that would result if StoreValueWide was called. 977 */ 978 void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src); 979 980 // Shared by all targets - implemented in mir_to_lir.cc. 981 void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list); 982 void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir); 983 bool MethodBlockCodeGen(BasicBlock* bb); 984 bool SpecialMIR2LIR(const InlineMethod& special); 985 void MethodMIR2LIR(); 986 // Update LIR for verbose listings. 987 void UpdateLIROffsets(); 988 989 /* 990 * @brief Load the address of the dex method into the register. 991 * @param target_method The MethodReference of the method to be invoked. 992 * @param type How the method will be invoked. 993 * @param register that will contain the code address. 994 * @note register will be passed to TargetReg to get physical register. 995 */ 996 void LoadCodeAddress(const MethodReference& target_method, InvokeType type, 997 SpecialTargetRegister symbolic_reg); 998 999 /* 1000 * @brief Load the Method* of a dex method into the register. 1001 * @param target_method The MethodReference of the method to be invoked. 1002 * @param type How the method will be invoked. 1003 * @param register that will contain the code address. 1004 * @note register will be passed to TargetReg to get physical register. 1005 */ 1006 virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type, 1007 SpecialTargetRegister symbolic_reg); 1008 1009 /* 1010 * @brief Load the Class* of a Dex Class type into the register. 1011 * @param type How the method will be invoked. 1012 * @param register that will contain the code address. 1013 * @note register will be passed to TargetReg to get physical register. 1014 */ 1015 virtual void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg); 1016 1017 // Routines that work for the generic case, but may be overriden by target. 1018 /* 1019 * @brief Compare memory to immediate, and branch if condition true. 1020 * @param cond The condition code that when true will branch to the target. 1021 * @param temp_reg A temporary register that can be used if compare to memory is not 1022 * supported by the architecture. 1023 * @param base_reg The register holding the base address. 1024 * @param offset The offset from the base. 1025 * @param check_value The immediate to compare to. 1026 * @returns The branch instruction that was generated. 1027 */ 1028 virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 1029 int offset, int check_value, LIR* target); 1030 1031 // Required for target - codegen helpers. 1032 virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 1033 RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1034 virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1035 virtual LIR* CheckSuspendUsingLoad() = 0; 1036 1037 virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0; 1038 virtual RegStorage LoadHelper(ThreadOffset<8> offset) = 0; 1039 1040 virtual LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest, 1041 OpSize size) = 0; 1042 virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, 1043 OpSize size) = 0; 1044 virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 1045 int scale, OpSize size) = 0; 1046 virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 1047 int displacement, RegStorage r_dest, OpSize size) = 0; 1048 virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0; 1049 virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0; 1050 virtual LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src, 1051 OpSize size) = 0; 1052 virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, 1053 OpSize size) = 0; 1054 virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 1055 int scale, OpSize size) = 0; 1056 virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 1057 int displacement, RegStorage r_src, OpSize size) = 0; 1058 virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0; 1059 1060 // Required for target - register utilities. 1061 virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class) = 0; 1062 virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class) = 0; 1063 virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0; 1064 virtual RegStorage GetArgMappingToPhysicalReg(int arg_num) = 0; 1065 virtual RegLocation GetReturnAlt() = 0; 1066 virtual RegLocation GetReturnWideAlt() = 0; 1067 virtual RegLocation LocCReturn() = 0; 1068 virtual RegLocation LocCReturnDouble() = 0; 1069 virtual RegLocation LocCReturnFloat() = 0; 1070 virtual RegLocation LocCReturnWide() = 0; 1071 virtual uint64_t GetRegMaskCommon(RegStorage reg) = 0; 1072 virtual void AdjustSpillMask() = 0; 1073 virtual void ClobberCallerSave() = 0; 1074 virtual void FreeCallTemps() = 0; 1075 virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) = 0; 1076 virtual void LockCallTemps() = 0; 1077 virtual void MarkPreservedSingle(int v_reg, RegStorage reg) = 0; 1078 virtual void MarkPreservedDouble(int v_reg, RegStorage reg) = 0; 1079 virtual void CompilerInitializeRegAlloc() = 0; 1080 1081 // Required for target - miscellaneous. 1082 virtual void AssembleLIR() = 0; 1083 virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0; 1084 virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags) = 0; 1085 virtual const char* GetTargetInstFmt(int opcode) = 0; 1086 virtual const char* GetTargetInstName(int opcode) = 0; 1087 virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0; 1088 virtual uint64_t GetPCUseDefEncoding() = 0; 1089 virtual uint64_t GetTargetInstFlags(int opcode) = 0; 1090 virtual int GetInsnSize(LIR* lir) = 0; 1091 virtual bool IsUnconditionalBranch(LIR* lir) = 0; 1092 1093 // Check support for volatile load/store of a given size. 1094 virtual bool SupportsVolatileLoadStore(OpSize size) = 0; 1095 // Get the register class for load/store of a field. 1096 virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0; 1097 1098 // Required for target - Dalvik-level generators. 1099 virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1100 RegLocation rl_src1, RegLocation rl_src2) = 0; 1101 virtual void GenMulLong(Instruction::Code, 1102 RegLocation rl_dest, RegLocation rl_src1, 1103 RegLocation rl_src2) = 0; 1104 virtual void GenAddLong(Instruction::Code, 1105 RegLocation rl_dest, RegLocation rl_src1, 1106 RegLocation rl_src2) = 0; 1107 virtual void GenAndLong(Instruction::Code, 1108 RegLocation rl_dest, RegLocation rl_src1, 1109 RegLocation rl_src2) = 0; 1110 virtual void GenArithOpDouble(Instruction::Code opcode, 1111 RegLocation rl_dest, RegLocation rl_src1, 1112 RegLocation rl_src2) = 0; 1113 virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, 1114 RegLocation rl_src1, RegLocation rl_src2) = 0; 1115 virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, 1116 RegLocation rl_src1, RegLocation rl_src2) = 0; 1117 virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, 1118 RegLocation rl_src) = 0; 1119 virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0; 1120 1121 /** 1122 * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max. 1123 * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm 1124 * that applies on integers. The generated code will write the smallest or largest value 1125 * directly into the destination register as specified by the invoke information. 1126 * @param info Information about the invoke. 1127 * @param is_min If true generates code that computes minimum. Otherwise computes maximum. 1128 * @return Returns true if successfully generated 1129 */ 1130 virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min) = 0; 1131 1132 virtual bool GenInlinedSqrt(CallInfo* info) = 0; 1133 virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0; 1134 virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0; 1135 virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0; 1136 virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1137 RegLocation rl_src2) = 0; 1138 virtual void GenSubLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1139 RegLocation rl_src2) = 0; 1140 virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1141 RegLocation rl_src2) = 0; 1142 virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, 1143 bool is_div) = 0; 1144 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, 1145 bool is_div) = 0; 1146 /* 1147 * @brief Generate an integer div or rem operation by a literal. 1148 * @param rl_dest Destination Location. 1149 * @param rl_src1 Numerator Location. 1150 * @param rl_src2 Divisor Location. 1151 * @param is_div 'true' if this is a division, 'false' for a remainder. 1152 * @param check_zero 'true' if an exception should be generated if the divisor is 0. 1153 */ 1154 virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 1155 RegLocation rl_src2, bool is_div, bool check_zero) = 0; 1156 /* 1157 * @brief Generate an integer div or rem operation by a literal. 1158 * @param rl_dest Destination Location. 1159 * @param rl_src Numerator Location. 1160 * @param lit Divisor. 1161 * @param is_div 'true' if this is a division, 'false' for a remainder. 1162 */ 1163 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, 1164 bool is_div) = 0; 1165 virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0; 1166 1167 /** 1168 * @brief Used for generating code that throws ArithmeticException if both registers are zero. 1169 * @details This is used for generating DivideByZero checks when divisor is held in two 1170 * separate registers. 1171 * @param reg The register holding the pair of 32-bit values. 1172 */ 1173 virtual void GenDivZeroCheckWide(RegStorage reg) = 0; 1174 1175 virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0; 1176 virtual void GenExitSequence() = 0; 1177 virtual void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) = 0; 1178 virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0; 1179 virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0; 1180 1181 /** 1182 * @brief Lowers the kMirOpSelect MIR into LIR. 1183 * @param bb The basic block in which the MIR is from. 1184 * @param mir The MIR whose opcode is kMirOpSelect. 1185 */ 1186 virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0; 1187 1188 /** 1189 * @brief Used to generate a memory barrier in an architecture specific way. 1190 * @details The last generated LIR will be considered for use as barrier. Namely, 1191 * if the last LIR can be updated in a way where it will serve the semantics of 1192 * barrier, then it will be used as such. Otherwise, a new LIR will be generated 1193 * that can keep the semantics. 1194 * @param barrier_kind The kind of memory barrier to generate. 1195 */ 1196 virtual void GenMemBarrier(MemBarrierKind barrier_kind) = 0; 1197 1198 virtual void GenMoveException(RegLocation rl_dest) = 0; 1199 virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, 1200 int first_bit, int second_bit) = 0; 1201 virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0; 1202 virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0; 1203 virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1204 virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1205 virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 1206 RegLocation rl_index, RegLocation rl_dest, int scale) = 0; 1207 virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 1208 RegLocation rl_index, RegLocation rl_src, int scale, 1209 bool card_mark) = 0; 1210 virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1211 RegLocation rl_src1, RegLocation rl_shift) = 0; 1212 1213 // Required for target - single operation generators. 1214 virtual LIR* OpUnconditionalBranch(LIR* target) = 0; 1215 virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0; 1216 virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, 1217 LIR* target) = 0; 1218 virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0; 1219 virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0; 1220 virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1221 virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0; 1222 virtual void OpEndIT(LIR* it) = 0; 1223 virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0; 1224 virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0; 1225 virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0; 1226 virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1227 virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0; 1228 virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0; 1229 virtual LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) = 0; 1230 virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0; 1231 1232 /** 1233 * @brief Used to generate an LIR that does a load from mem to reg. 1234 * @param r_dest The destination physical register. 1235 * @param r_base The base physical register for memory operand. 1236 * @param offset The displacement for memory operand. 1237 * @param move_type Specification on the move desired (size, alignment, register kind). 1238 * @return Returns the generate move LIR. 1239 */ 1240 virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, 1241 MoveType move_type) = 0; 1242 1243 /** 1244 * @brief Used to generate an LIR that does a store from reg to mem. 1245 * @param r_base The base physical register for memory operand. 1246 * @param offset The displacement for memory operand. 1247 * @param r_src The destination physical register. 1248 * @param bytes_to_move The number of bytes to move. 1249 * @param is_aligned Whether the memory location is known to be aligned. 1250 * @return Returns the generate move LIR. 1251 */ 1252 virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, 1253 MoveType move_type) = 0; 1254 1255 /** 1256 * @brief Used for generating a conditional register to register operation. 1257 * @param op The opcode kind. 1258 * @param cc The condition code that when true will perform the opcode. 1259 * @param r_dest The destination physical register. 1260 * @param r_src The source physical register. 1261 * @return Returns the newly created LIR or null in case of creation failure. 1262 */ 1263 virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0; 1264 1265 virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0; 1266 virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, 1267 RegStorage r_src2) = 0; 1268 virtual LIR* OpTestSuspend(LIR* target) = 0; 1269 virtual LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) = 0; 1270 virtual LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) = 0; 1271 virtual LIR* OpVldm(RegStorage r_base, int count) = 0; 1272 virtual LIR* OpVstm(RegStorage r_base, int count) = 0; 1273 virtual void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, 1274 int offset) = 0; 1275 virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0; 1276 virtual void OpTlsCmp(ThreadOffset<4> offset, int val) = 0; 1277 virtual void OpTlsCmp(ThreadOffset<8> offset, int val) = 0; 1278 virtual bool InexpensiveConstantInt(int32_t value) = 0; 1279 virtual bool InexpensiveConstantFloat(int32_t value) = 0; 1280 virtual bool InexpensiveConstantLong(int64_t value) = 0; 1281 virtual bool InexpensiveConstantDouble(int64_t value) = 0; 1282 1283 // May be optimized by targets. 1284 virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src); 1285 virtual void GenMonitorExit(int opt_flags, RegLocation rl_src); 1286 1287 // Temp workaround 1288 void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg); 1289 1290 protected: 1291 Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); 1292 1293 CompilationUnit* GetCompilationUnit() { 1294 return cu_; 1295 } 1296 /* 1297 * @brief Returns the index of the lowest set bit in 'x'. 1298 * @param x Value to be examined. 1299 * @returns The bit number of the lowest bit set in the value. 1300 */ 1301 int32_t LowestSetBit(uint64_t x); 1302 /* 1303 * @brief Is this value a power of two? 1304 * @param x Value to be examined. 1305 * @returns 'true' if only 1 bit is set in the value. 1306 */ 1307 bool IsPowerOfTwo(uint64_t x); 1308 /* 1309 * @brief Do these SRs overlap? 1310 * @param rl_op1 One RegLocation 1311 * @param rl_op2 The other RegLocation 1312 * @return 'true' if the VR pairs overlap 1313 * 1314 * Check to see if a result pair has a misaligned overlap with an operand pair. This 1315 * is not usual for dx to generate, but it is legal (for now). In a future rev of 1316 * dex, we'll want to make this case illegal. 1317 */ 1318 bool BadOverlap(RegLocation rl_op1, RegLocation rl_op2); 1319 1320 /* 1321 * @brief Force a location (in a register) into a temporary register 1322 * @param loc location of result 1323 * @returns update location 1324 */ 1325 RegLocation ForceTemp(RegLocation loc); 1326 1327 /* 1328 * @brief Force a wide location (in registers) into temporary registers 1329 * @param loc location of result 1330 * @returns update location 1331 */ 1332 RegLocation ForceTempWide(RegLocation loc); 1333 1334 static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) { 1335 return wide ? k64 : ref ? kReference : k32; 1336 } 1337 1338 virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, 1339 RegLocation rl_dest, RegLocation rl_src); 1340 1341 void AddSlowPath(LIRSlowPath* slowpath); 1342 1343 virtual void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1344 bool type_known_abstract, bool use_declaring_class, 1345 bool can_assume_type_is_in_dex_cache, 1346 uint32_t type_idx, RegLocation rl_dest, 1347 RegLocation rl_src); 1348 /* 1349 * @brief Generate the debug_frame FDE information if possible. 1350 * @returns pointer to vector containg CFE information, or NULL. 1351 */ 1352 virtual std::vector<uint8_t>* ReturnCallFrameInformation(); 1353 1354 /** 1355 * @brief Used to insert marker that can be used to associate MIR with LIR. 1356 * @details Only inserts marker if verbosity is enabled. 1357 * @param mir The mir that is currently being generated. 1358 */ 1359 void GenPrintLabel(MIR* mir); 1360 1361 /** 1362 * @brief Used to generate return sequence when there is no frame. 1363 * @details Assumes that the return registers have already been populated. 1364 */ 1365 virtual void GenSpecialExitSequence() = 0; 1366 1367 /** 1368 * @brief Used to generate code for special methods that are known to be 1369 * small enough to work in frameless mode. 1370 * @param bb The basic block of the first MIR. 1371 * @param mir The first MIR of the special method. 1372 * @param special Information about the special method. 1373 * @return Returns whether or not this was handled successfully. Returns false 1374 * if caller should punt to normal MIR2LIR conversion. 1375 */ 1376 virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); 1377 1378 private: 1379 void ClobberBody(RegisterInfo* p); 1380 void SetCurrentDexPc(DexOffset dexpc) { 1381 current_dalvik_offset_ = dexpc; 1382 } 1383 1384 /** 1385 * @brief Used to lock register if argument at in_position was passed that way. 1386 * @details Does nothing if the argument is passed via stack. 1387 * @param in_position The argument number whose register to lock. 1388 * @param wide Whether the argument is wide. 1389 */ 1390 void LockArg(int in_position, bool wide = false); 1391 1392 /** 1393 * @brief Used to load VR argument to a physical register. 1394 * @details The load is only done if the argument is not already in physical register. 1395 * LockArg must have been previously called. 1396 * @param in_position The argument number to load. 1397 * @param wide Whether the argument is 64-bit or not. 1398 * @return Returns the register (or register pair) for the loaded argument. 1399 */ 1400 RegStorage LoadArg(int in_position, bool wide = false); 1401 1402 /** 1403 * @brief Used to load a VR argument directly to a specified register location. 1404 * @param in_position The argument number to place in register. 1405 * @param rl_dest The register location where to place argument. 1406 */ 1407 void LoadArgDirect(int in_position, RegLocation rl_dest); 1408 1409 /** 1410 * @brief Used to generate LIR for special getter method. 1411 * @param mir The mir that represents the iget. 1412 * @param special Information about the special getter method. 1413 * @return Returns whether LIR was successfully generated. 1414 */ 1415 bool GenSpecialIGet(MIR* mir, const InlineMethod& special); 1416 1417 /** 1418 * @brief Used to generate LIR for special setter method. 1419 * @param mir The mir that represents the iput. 1420 * @param special Information about the special setter method. 1421 * @return Returns whether LIR was successfully generated. 1422 */ 1423 bool GenSpecialIPut(MIR* mir, const InlineMethod& special); 1424 1425 /** 1426 * @brief Used to generate LIR for special return-args method. 1427 * @param mir The mir that represents the return of argument. 1428 * @param special Information about the special return-args method. 1429 * @return Returns whether LIR was successfully generated. 1430 */ 1431 bool GenSpecialIdentity(MIR* mir, const InlineMethod& special); 1432 1433 void AddDivZeroCheckSlowPath(LIR* branch); 1434 1435 // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using 1436 // kArg2 as temp. 1437 void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1); 1438 1439 public: 1440 // TODO: add accessors for these. 1441 LIR* literal_list_; // Constants. 1442 LIR* method_literal_list_; // Method literals requiring patching. 1443 LIR* class_literal_list_; // Class literals requiring patching. 1444 LIR* code_literal_list_; // Code literals requiring patching. 1445 LIR* first_fixup_; // Doubly-linked list of LIR nodes requiring fixups. 1446 1447 protected: 1448 CompilationUnit* const cu_; 1449 MIRGraph* const mir_graph_; 1450 GrowableArray<SwitchTable*> switch_tables_; 1451 GrowableArray<FillArrayData*> fill_array_data_; 1452 GrowableArray<RegisterInfo*> tempreg_info_; 1453 GrowableArray<RegisterInfo*> reginfo_map_; 1454 GrowableArray<void*> pointer_storage_; 1455 CodeOffset current_code_offset_; // Working byte offset of machine instructons. 1456 CodeOffset data_offset_; // starting offset of literal pool. 1457 size_t total_size_; // header + code size. 1458 LIR* block_label_list_; 1459 PromotionMap* promotion_map_; 1460 /* 1461 * TODO: The code generation utilities don't have a built-in 1462 * mechanism to propagate the original Dalvik opcode address to the 1463 * associated generated instructions. For the trace compiler, this wasn't 1464 * necessary because the interpreter handled all throws and debugging 1465 * requests. For now we'll handle this by placing the Dalvik offset 1466 * in the CompilationUnit struct before codegen for each instruction. 1467 * The low-level LIR creation utilites will pull it from here. Rework this. 1468 */ 1469 DexOffset current_dalvik_offset_; 1470 size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size. 1471 RegisterPool* reg_pool_; 1472 /* 1473 * Sanity checking for the register temp tracking. The same ssa 1474 * name should never be associated with one temp register per 1475 * instruction compilation. 1476 */ 1477 int live_sreg_; 1478 CodeBuffer code_buffer_; 1479 // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix. 1480 std::vector<uint8_t> encoded_mapping_table_; 1481 std::vector<uint32_t> core_vmap_table_; 1482 std::vector<uint32_t> fp_vmap_table_; 1483 std::vector<uint8_t> native_gc_map_; 1484 int num_core_spills_; 1485 int num_fp_spills_; 1486 int frame_size_; 1487 unsigned int core_spill_mask_; 1488 unsigned int fp_spill_mask_; 1489 LIR* first_lir_insn_; 1490 LIR* last_lir_insn_; 1491 1492 GrowableArray<LIRSlowPath*> slow_paths_; 1493}; // Class Mir2Lir 1494 1495} // namespace art 1496 1497#endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 1498