mir_to_lir.h revision 8dea81ca9c0201ceaa88086b927a5838a06a3e69
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 18#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 19 20#include "invoke_type.h" 21#include "compiled_method.h" 22#include "dex/compiler_enums.h" 23#include "dex/compiler_ir.h" 24#include "dex/reg_storage.h" 25#include "dex/backend.h" 26#include "dex/quick/resource_mask.h" 27#include "driver/compiler_driver.h" 28#include "leb128.h" 29#include "safe_map.h" 30#include "utils/array_ref.h" 31#include "utils/arena_allocator.h" 32#include "utils/growable_array.h" 33 34namespace art { 35 36/* 37 * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to 38 * add type safety (see runtime/offsets.h). 39 */ 40typedef uint32_t DexOffset; // Dex offset in code units. 41typedef uint16_t NarrowDexOffset; // For use in structs, Dex offsets range from 0 .. 0xffff. 42typedef uint32_t CodeOffset; // Native code offset in bytes. 43 44// Set to 1 to measure cost of suspend check. 45#define NO_SUSPEND 0 46 47#define IS_BINARY_OP (1ULL << kIsBinaryOp) 48#define IS_BRANCH (1ULL << kIsBranch) 49#define IS_IT (1ULL << kIsIT) 50#define IS_LOAD (1ULL << kMemLoad) 51#define IS_QUAD_OP (1ULL << kIsQuadOp) 52#define IS_QUIN_OP (1ULL << kIsQuinOp) 53#define IS_SEXTUPLE_OP (1ULL << kIsSextupleOp) 54#define IS_STORE (1ULL << kMemStore) 55#define IS_TERTIARY_OP (1ULL << kIsTertiaryOp) 56#define IS_UNARY_OP (1ULL << kIsUnaryOp) 57#define NEEDS_FIXUP (1ULL << kPCRelFixup) 58#define NO_OPERAND (1ULL << kNoOperand) 59#define REG_DEF0 (1ULL << kRegDef0) 60#define REG_DEF1 (1ULL << kRegDef1) 61#define REG_DEF2 (1ULL << kRegDef2) 62#define REG_DEFA (1ULL << kRegDefA) 63#define REG_DEFD (1ULL << kRegDefD) 64#define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0) 65#define REG_DEF_FPCS_LIST2 (1ULL << kRegDefFPCSList2) 66#define REG_DEF_LIST0 (1ULL << kRegDefList0) 67#define REG_DEF_LIST1 (1ULL << kRegDefList1) 68#define REG_DEF_LR (1ULL << kRegDefLR) 69#define REG_DEF_SP (1ULL << kRegDefSP) 70#define REG_USE0 (1ULL << kRegUse0) 71#define REG_USE1 (1ULL << kRegUse1) 72#define REG_USE2 (1ULL << kRegUse2) 73#define REG_USE3 (1ULL << kRegUse3) 74#define REG_USE4 (1ULL << kRegUse4) 75#define REG_USEA (1ULL << kRegUseA) 76#define REG_USEC (1ULL << kRegUseC) 77#define REG_USED (1ULL << kRegUseD) 78#define REG_USEB (1ULL << kRegUseB) 79#define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0) 80#define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2) 81#define REG_USE_LIST0 (1ULL << kRegUseList0) 82#define REG_USE_LIST1 (1ULL << kRegUseList1) 83#define REG_USE_LR (1ULL << kRegUseLR) 84#define REG_USE_PC (1ULL << kRegUsePC) 85#define REG_USE_SP (1ULL << kRegUseSP) 86#define SETS_CCODES (1ULL << kSetsCCodes) 87#define USES_CCODES (1ULL << kUsesCCodes) 88#define USE_FP_STACK (1ULL << kUseFpStack) 89#define REG_USE_LO (1ULL << kUseLo) 90#define REG_USE_HI (1ULL << kUseHi) 91#define REG_DEF_LO (1ULL << kDefLo) 92#define REG_DEF_HI (1ULL << kDefHi) 93 94// Common combo register usage patterns. 95#define REG_DEF01 (REG_DEF0 | REG_DEF1) 96#define REG_DEF012 (REG_DEF0 | REG_DEF1 | REG_DEF2) 97#define REG_DEF01_USE2 (REG_DEF0 | REG_DEF1 | REG_USE2) 98#define REG_DEF0_USE01 (REG_DEF0 | REG_USE01) 99#define REG_DEF0_USE0 (REG_DEF0 | REG_USE0) 100#define REG_DEF0_USE12 (REG_DEF0 | REG_USE12) 101#define REG_DEF0_USE123 (REG_DEF0 | REG_USE123) 102#define REG_DEF0_USE1 (REG_DEF0 | REG_USE1) 103#define REG_DEF0_USE2 (REG_DEF0 | REG_USE2) 104#define REG_DEFAD_USEAD (REG_DEFAD_USEA | REG_USED) 105#define REG_DEFAD_USEA (REG_DEFA_USEA | REG_DEFD) 106#define REG_DEFA_USEA (REG_DEFA | REG_USEA) 107#define REG_USE012 (REG_USE01 | REG_USE2) 108#define REG_USE014 (REG_USE01 | REG_USE4) 109#define REG_USE01 (REG_USE0 | REG_USE1) 110#define REG_USE02 (REG_USE0 | REG_USE2) 111#define REG_USE12 (REG_USE1 | REG_USE2) 112#define REG_USE23 (REG_USE2 | REG_USE3) 113#define REG_USE123 (REG_USE1 | REG_USE2 | REG_USE3) 114 115// TODO: #includes need a cleanup 116#ifndef INVALID_SREG 117#define INVALID_SREG (-1) 118#endif 119 120struct BasicBlock; 121struct CallInfo; 122struct CompilationUnit; 123struct InlineMethod; 124struct MIR; 125struct LIR; 126struct RegLocation; 127struct RegisterInfo; 128class DexFileMethodInliner; 129class MIRGraph; 130class Mir2Lir; 131 132typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, 133 const MethodReference& target_method, 134 uint32_t method_idx, uintptr_t direct_code, 135 uintptr_t direct_method, InvokeType type); 136 137typedef std::vector<uint8_t> CodeBuffer; 138 139struct UseDefMasks { 140 const ResourceMask* use_mask; // Resource mask for use. 141 const ResourceMask* def_mask; // Resource mask for def. 142}; 143 144struct AssemblyInfo { 145 LIR* pcrel_next; // Chain of LIR nodes needing pc relative fixups. 146}; 147 148struct LIR { 149 CodeOffset offset; // Offset of this instruction. 150 NarrowDexOffset dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words). 151 int16_t opcode; 152 LIR* next; 153 LIR* prev; 154 LIR* target; 155 struct { 156 unsigned int alias_info:17; // For Dalvik register disambiguation. 157 bool is_nop:1; // LIR is optimized away. 158 unsigned int size:4; // Note: size of encoded instruction is in bytes. 159 bool use_def_invalid:1; // If true, masks should not be used. 160 unsigned int generation:1; // Used to track visitation state during fixup pass. 161 unsigned int fixup:8; // Fixup kind. 162 } flags; 163 union { 164 UseDefMasks m; // Use & Def masks used during optimization. 165 AssemblyInfo a; // Instruction info used during assembly phase. 166 } u; 167 int32_t operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]. 168}; 169 170// Target-specific initialization. 171Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 172 ArenaAllocator* const arena); 173Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 174 ArenaAllocator* const arena); 175Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 176 ArenaAllocator* const arena); 177Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 178 ArenaAllocator* const arena); 179Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 180 ArenaAllocator* const arena); 181 182// Utility macros to traverse the LIR list. 183#define NEXT_LIR(lir) (lir->next) 184#define PREV_LIR(lir) (lir->prev) 185 186// Defines for alias_info (tracks Dalvik register references). 187#define DECODE_ALIAS_INFO_REG(X) (X & 0xffff) 188#define DECODE_ALIAS_INFO_WIDE_FLAG (0x10000) 189#define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0) 190#define ENCODE_ALIAS_INFO(REG, ISWIDE) (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0)) 191 192#define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8)) 193#define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \ 194 do { \ 195 low_reg = both_regs & 0xff; \ 196 high_reg = (both_regs >> 8) & 0xff; \ 197 } while (false) 198 199// Mask to denote sreg as the start of a double. Must not interfere with low 16 bits. 200#define STARTING_DOUBLE_SREG 0x10000 201 202// TODO: replace these macros 203#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath)) 204#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath)) 205#define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath)) 206#define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath)) 207#define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath)) 208 209class Mir2Lir : public Backend { 210 public: 211 /* 212 * Auxiliary information describing the location of data embedded in the Dalvik 213 * byte code stream. 214 */ 215 struct EmbeddedData { 216 CodeOffset offset; // Code offset of data block. 217 const uint16_t* table; // Original dex data. 218 DexOffset vaddr; // Dalvik offset of parent opcode. 219 }; 220 221 struct FillArrayData : EmbeddedData { 222 int32_t size; 223 }; 224 225 struct SwitchTable : EmbeddedData { 226 LIR* anchor; // Reference instruction for relative offsets. 227 LIR** targets; // Array of case targets. 228 }; 229 230 /* Static register use counts */ 231 struct RefCounts { 232 int count; 233 int s_reg; 234 }; 235 236 /* 237 * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits) 238 * and native register storage. The primary purpose is to reuse previuosly 239 * loaded values, if possible, and otherwise to keep the value in register 240 * storage as long as possible. 241 * 242 * NOTE 1: wide_value refers to the width of the Dalvik value contained in 243 * this register (or pair). For example, a 64-bit register containing a 32-bit 244 * Dalvik value would have wide_value==false even though the storage container itself 245 * is wide. Similarly, a 32-bit register containing half of a 64-bit Dalvik value 246 * would have wide_value==true (and additionally would have its partner field set to the 247 * other half whose wide_value field would also be true. 248 * 249 * NOTE 2: In the case of a register pair, you can determine which of the partners 250 * is the low half by looking at the s_reg names. The high s_reg will equal low_sreg + 1. 251 * 252 * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value 253 * will be true and partner==self. s_reg refers to the low-order word of the Dalvik 254 * value, and the s_reg of the high word is implied (s_reg + 1). 255 * 256 * NOTE 4: The reg and is_temp fields should always be correct. If is_temp is false no 257 * other fields have meaning. [perhaps not true, wide should work for promoted regs?] 258 * If is_temp==true and live==false, no other fields have 259 * meaning. If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start 260 * and def_end describe the relationship between the temp register/register pair and 261 * the Dalvik value[s] described by s_reg/s_reg+1. 262 * 263 * The fields used_storage, master_storage and storage_mask are used to track allocation 264 * in light of potential aliasing. For example, consider Arm's d2, which overlaps s4 & s5. 265 * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of 266 * storage use. For s4, it would be 0x0000001; for s5 0x00000002. These values should not 267 * change once initialized. The "used_storage" field tracks current allocation status. 268 * Although each record contains this field, only the field from the largest member of 269 * an aliased group is used. In our case, it would be d2's. The master_storage pointer 270 * of d2, s4 and s5 would all point to d2's used_storage field. Each bit in a used_storage 271 * represents 32 bits of storage. d2's used_storage would be initialized to 0xfffffffc. 272 * Then, if we wanted to determine whether s4 could be allocated, we would "and" 273 * s4's storage_mask with s4's *master_storage. If the result is zero, s4 is free and 274 * to allocate: *master_storage |= storage_mask. To free, *master_storage &= ~storage_mask. 275 * 276 * For an X86 vector register example, storage_mask would be: 277 * 0x00000001 for 32-bit view of xmm1 278 * 0x00000003 for 64-bit view of xmm1 279 * 0x0000000f for 128-bit view of xmm1 280 * 0x000000ff for 256-bit view of ymm1 // future expansion, if needed 281 * 0x0000ffff for 512-bit view of ymm1 // future expansion, if needed 282 * 0xffffffff for 1024-bit view of ymm1 // future expansion, if needed 283 * 284 * The "liveness" of a register is handled in a similar way. The liveness_ storage is 285 * held in the widest member of an aliased set. Note, though, that for a temp register to 286 * reused as live, it must both be marked live and the associated SReg() must match the 287 * desired s_reg. This gets a little complicated when dealing with aliased registers. All 288 * members of an aliased set will share the same liveness flags, but each will individually 289 * maintain s_reg_. In this way we can know that at least one member of an 290 * aliased set is live, but will only fully match on the appropriate alias view. For example, 291 * if Arm d1 is live as a double and has s_reg_ set to Dalvik v8 (which also implies v9 292 * because it is wide), its aliases s2 and s3 will show as live, but will have 293 * s_reg_ == INVALID_SREG. An attempt to later AllocLiveReg() of v9 with a single-precision 294 * view will fail because although s3's liveness bit is set, its s_reg_ will not match v9. 295 * This will cause all members of the aliased set to be clobbered and AllocLiveReg() will 296 * report that v9 is currently not live as a single (which is what we want). 297 * 298 * NOTE: the x86 usage is still somewhat in flux. There are competing notions of how 299 * to treat xmm registers: 300 * 1. Treat them all as 128-bits wide, but denote how much data used via bytes field. 301 * o This more closely matches reality, but means you'd need to be able to get 302 * to the associated RegisterInfo struct to figure out how it's being used. 303 * o This is how 64-bit core registers will be used - always 64 bits, but the 304 * "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage. 305 * 2. View the xmm registers based on contents. 306 * o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would 307 * be a k64BitVector. 308 * o Note that the two uses above would be considered distinct registers (but with 309 * the aliasing mechanism, we could detect interference). 310 * o This is how aliased double and single float registers will be handled on 311 * Arm and MIPS. 312 * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and 313 * mechanism 2 for aliased float registers and x86 vector registers. 314 */ 315 class RegisterInfo { 316 public: 317 RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll); 318 ~RegisterInfo() {} 319 static void* operator new(size_t size, ArenaAllocator* arena) { 320 return arena->Alloc(size, kArenaAllocRegAlloc); 321 } 322 323 static const uint32_t k32SoloStorageMask = 0x00000001; 324 static const uint32_t kLowSingleStorageMask = 0x00000001; 325 static const uint32_t kHighSingleStorageMask = 0x00000002; 326 static const uint32_t k64SoloStorageMask = 0x00000003; 327 static const uint32_t k128SoloStorageMask = 0x0000000f; 328 static const uint32_t k256SoloStorageMask = 0x000000ff; 329 static const uint32_t k512SoloStorageMask = 0x0000ffff; 330 static const uint32_t k1024SoloStorageMask = 0xffffffff; 331 332 bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; } 333 void MarkInUse() { master_->used_storage_ |= storage_mask_; } 334 void MarkFree() { master_->used_storage_ &= ~storage_mask_; } 335 // No part of the containing storage is live in this view. 336 bool IsDead() { return (master_->liveness_ & storage_mask_) == 0; } 337 // Liveness of this view matches. Note: not equivalent to !IsDead(). 338 bool IsLive() { return (master_->liveness_ & storage_mask_) == storage_mask_; } 339 void MarkLive(int s_reg) { 340 // TODO: Anything useful to assert here? 341 s_reg_ = s_reg; 342 master_->liveness_ |= storage_mask_; 343 } 344 void MarkDead() { 345 if (SReg() != INVALID_SREG) { 346 s_reg_ = INVALID_SREG; 347 master_->liveness_ &= ~storage_mask_; 348 ResetDefBody(); 349 } 350 } 351 RegStorage GetReg() { return reg_; } 352 void SetReg(RegStorage reg) { reg_ = reg; } 353 bool IsTemp() { return is_temp_; } 354 void SetIsTemp(bool val) { is_temp_ = val; } 355 bool IsWide() { return wide_value_; } 356 void SetIsWide(bool val) { 357 wide_value_ = val; 358 if (!val) { 359 // If not wide, reset partner to self. 360 SetPartner(GetReg()); 361 } 362 } 363 bool IsDirty() { return dirty_; } 364 void SetIsDirty(bool val) { dirty_ = val; } 365 RegStorage Partner() { return partner_; } 366 void SetPartner(RegStorage partner) { partner_ = partner; } 367 int SReg() { return (!IsTemp() || IsLive()) ? s_reg_ : INVALID_SREG; } 368 const ResourceMask& DefUseMask() { return def_use_mask_; } 369 void SetDefUseMask(const ResourceMask& def_use_mask) { def_use_mask_ = def_use_mask; } 370 RegisterInfo* Master() { return master_; } 371 void SetMaster(RegisterInfo* master) { 372 master_ = master; 373 if (master != this) { 374 master_->aliased_ = true; 375 DCHECK(alias_chain_ == nullptr); 376 alias_chain_ = master_->alias_chain_; 377 master_->alias_chain_ = this; 378 } 379 } 380 bool IsAliased() { return aliased_; } 381 RegisterInfo* GetAliasChain() { return alias_chain_; } 382 uint32_t StorageMask() { return storage_mask_; } 383 void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; } 384 LIR* DefStart() { return def_start_; } 385 void SetDefStart(LIR* def_start) { def_start_ = def_start; } 386 LIR* DefEnd() { return def_end_; } 387 void SetDefEnd(LIR* def_end) { def_end_ = def_end; } 388 void ResetDefBody() { def_start_ = def_end_ = nullptr; } 389 // Find member of aliased set matching storage_used; return nullptr if none. 390 RegisterInfo* FindMatchingView(uint32_t storage_used) { 391 RegisterInfo* res = Master(); 392 for (; res != nullptr; res = res->GetAliasChain()) { 393 if (res->StorageMask() == storage_used) 394 break; 395 } 396 return res; 397 } 398 399 private: 400 RegStorage reg_; 401 bool is_temp_; // Can allocate as temp? 402 bool wide_value_; // Holds a Dalvik wide value (either itself, or part of a pair). 403 bool dirty_; // If live, is it dirty? 404 bool aliased_; // Is this the master for other aliased RegisterInfo's? 405 RegStorage partner_; // If wide_value, other reg of pair or self if 64-bit register. 406 int s_reg_; // Name of live value. 407 ResourceMask def_use_mask_; // Resources for this element. 408 uint32_t used_storage_; // 1 bit per 4 bytes of storage. Unused by aliases. 409 uint32_t liveness_; // 1 bit per 4 bytes of storage. Unused by aliases. 410 RegisterInfo* master_; // Pointer to controlling storage mask. 411 uint32_t storage_mask_; // Track allocation of sub-units. 412 LIR *def_start_; // Starting inst in last def sequence. 413 LIR *def_end_; // Ending inst in last def sequence. 414 RegisterInfo* alias_chain_; // Chain of aliased registers. 415 }; 416 417 class RegisterPool { 418 public: 419 RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena, 420 const ArrayRef<const RegStorage>& core_regs, 421 const ArrayRef<const RegStorage>& core64_regs, 422 const ArrayRef<const RegStorage>& sp_regs, 423 const ArrayRef<const RegStorage>& dp_regs, 424 const ArrayRef<const RegStorage>& reserved_regs, 425 const ArrayRef<const RegStorage>& reserved64_regs, 426 const ArrayRef<const RegStorage>& core_temps, 427 const ArrayRef<const RegStorage>& core64_temps, 428 const ArrayRef<const RegStorage>& sp_temps, 429 const ArrayRef<const RegStorage>& dp_temps); 430 ~RegisterPool() {} 431 static void* operator new(size_t size, ArenaAllocator* arena) { 432 return arena->Alloc(size, kArenaAllocRegAlloc); 433 } 434 void ResetNextTemp() { 435 next_core_reg_ = 0; 436 next_sp_reg_ = 0; 437 next_dp_reg_ = 0; 438 } 439 GrowableArray<RegisterInfo*> core_regs_; 440 int next_core_reg_; 441 GrowableArray<RegisterInfo*> core64_regs_; 442 int next_core64_reg_; 443 GrowableArray<RegisterInfo*> sp_regs_; // Single precision float. 444 int next_sp_reg_; 445 GrowableArray<RegisterInfo*> dp_regs_; // Double precision float. 446 int next_dp_reg_; 447 GrowableArray<RegisterInfo*>* ref_regs_; // Points to core_regs_ or core64_regs_ 448 int* next_ref_reg_; 449 450 private: 451 Mir2Lir* const m2l_; 452 }; 453 454 struct PromotionMap { 455 RegLocationType core_location:3; 456 uint8_t core_reg; 457 RegLocationType fp_location:3; 458 uint8_t FpReg; 459 bool first_in_pair; 460 }; 461 462 // 463 // Slow paths. This object is used generate a sequence of code that is executed in the 464 // slow path. For example, resolving a string or class is slow as it will only be executed 465 // once (after that it is resolved and doesn't need to be done again). We want slow paths 466 // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward 467 // branch over them. 468 // 469 // If you want to create a slow path, declare a class derived from LIRSlowPath and provide 470 // the Compile() function that will be called near the end of the code generated by the 471 // method. 472 // 473 // The basic flow for a slow path is: 474 // 475 // CMP reg, #value 476 // BEQ fromfast 477 // cont: 478 // ... 479 // fast path code 480 // ... 481 // more code 482 // ... 483 // RETURN 484 /// 485 // fromfast: 486 // ... 487 // slow path code 488 // ... 489 // B cont 490 // 491 // So you see we need two labels and two branches. The first branch (called fromfast) is 492 // the conditional branch to the slow path code. The second label (called cont) is used 493 // as an unconditional branch target for getting back to the code after the slow path 494 // has completed. 495 // 496 497 class LIRSlowPath { 498 public: 499 LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast, 500 LIR* cont = nullptr) : 501 m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) { 502 m2l->StartSlowPath(cont); 503 } 504 virtual ~LIRSlowPath() {} 505 virtual void Compile() = 0; 506 507 static void* operator new(size_t size, ArenaAllocator* arena) { 508 return arena->Alloc(size, kArenaAllocData); 509 } 510 511 LIR *GetContinuationLabel() { 512 return cont_; 513 } 514 515 LIR *GetFromFast() { 516 return fromfast_; 517 } 518 519 protected: 520 LIR* GenerateTargetLabel(int opcode = kPseudoTargetLabel); 521 522 Mir2Lir* const m2l_; 523 CompilationUnit* const cu_; 524 const DexOffset current_dex_pc_; 525 LIR* const fromfast_; 526 LIR* const cont_; 527 }; 528 529 // Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_. 530 class ScopedMemRefType { 531 public: 532 ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type) 533 : m2l_(m2l), 534 old_mem_ref_type_(m2l->mem_ref_type_) { 535 m2l_->mem_ref_type_ = new_mem_ref_type; 536 } 537 538 ~ScopedMemRefType() { 539 m2l_->mem_ref_type_ = old_mem_ref_type_; 540 } 541 542 private: 543 Mir2Lir* const m2l_; 544 ResourceMask::ResourceBit old_mem_ref_type_; 545 546 DISALLOW_COPY_AND_ASSIGN(ScopedMemRefType); 547 }; 548 549 virtual ~Mir2Lir() {} 550 551 int32_t s4FromSwitchData(const void* switch_data) { 552 return *reinterpret_cast<const int32_t*>(switch_data); 553 } 554 555 /* 556 * TODO: this is a trace JIT vestige, and its use should be reconsidered. At the time 557 * it was introduced, it was intended to be a quick best guess of type without having to 558 * take the time to do type analysis. Currently, though, we have a much better idea of 559 * the types of Dalvik virtual registers. Instead of using this for a best guess, why not 560 * just use our knowledge of type to select the most appropriate register class? 561 */ 562 RegisterClass RegClassBySize(OpSize size) { 563 if (size == kReference) { 564 return kRefReg; 565 } else { 566 return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || 567 size == kSignedByte) ? kCoreReg : kAnyReg; 568 } 569 } 570 571 size_t CodeBufferSizeInBytes() { 572 return code_buffer_.size() / sizeof(code_buffer_[0]); 573 } 574 575 static bool IsPseudoLirOp(int opcode) { 576 return (opcode < 0); 577 } 578 579 /* 580 * LIR operands are 32-bit integers. Sometimes, (especially for managing 581 * instructions which require PC-relative fixups), we need the operands to carry 582 * pointers. To do this, we assign these pointers an index in pointer_storage_, and 583 * hold that index in the operand array. 584 * TUNING: If use of these utilities becomes more common on 32-bit builds, it 585 * may be worth conditionally-compiling a set of identity functions here. 586 */ 587 uint32_t WrapPointer(void* pointer) { 588 uint32_t res = pointer_storage_.Size(); 589 pointer_storage_.Insert(pointer); 590 return res; 591 } 592 593 void* UnwrapPointer(size_t index) { 594 return pointer_storage_.Get(index); 595 } 596 597 // strdup(), but allocates from the arena. 598 char* ArenaStrdup(const char* str) { 599 size_t len = strlen(str) + 1; 600 char* res = reinterpret_cast<char*>(arena_->Alloc(len, kArenaAllocMisc)); 601 if (res != NULL) { 602 strncpy(res, str, len); 603 } 604 return res; 605 } 606 607 // Shared by all targets - implemented in codegen_util.cc 608 void AppendLIR(LIR* lir); 609 void InsertLIRBefore(LIR* current_lir, LIR* new_lir); 610 void InsertLIRAfter(LIR* current_lir, LIR* new_lir); 611 612 /** 613 * @brief Provides the maximum number of compiler temporaries that the backend can/wants 614 * to place in a frame. 615 * @return Returns the maximum number of compiler temporaries. 616 */ 617 size_t GetMaxPossibleCompilerTemps() const; 618 619 /** 620 * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries. 621 * @return Returns the size in bytes for space needed for compiler temporary spill region. 622 */ 623 size_t GetNumBytesForCompilerTempSpillRegion(); 624 625 DexOffset GetCurrentDexPc() const { 626 return current_dalvik_offset_; 627 } 628 629 RegisterClass ShortyToRegClass(char shorty_type); 630 RegisterClass LocToRegClass(RegLocation loc); 631 int ComputeFrameSize(); 632 virtual void Materialize(); 633 virtual CompiledMethod* GetCompiledMethod(); 634 void MarkSafepointPC(LIR* inst); 635 void SetupResourceMasks(LIR* lir); 636 void SetMemRefType(LIR* lir, bool is_load, int mem_type); 637 void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit); 638 void SetupRegMask(ResourceMask* mask, int reg); 639 void DumpLIRInsn(LIR* arg, unsigned char* base_addr); 640 void DumpPromotionMap(); 641 void CodegenDump(); 642 LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0, 643 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL); 644 LIR* NewLIR0(int opcode); 645 LIR* NewLIR1(int opcode, int dest); 646 LIR* NewLIR2(int opcode, int dest, int src1); 647 LIR* NewLIR2NoDest(int opcode, int src, int info); 648 LIR* NewLIR3(int opcode, int dest, int src1, int src2); 649 LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info); 650 LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2); 651 LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta); 652 LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi); 653 LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method); 654 LIR* AddWordData(LIR* *constant_list_p, int value); 655 LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi); 656 void ProcessSwitchTables(); 657 void DumpSparseSwitchTable(const uint16_t* table); 658 void DumpPackedSwitchTable(const uint16_t* table); 659 void MarkBoundary(DexOffset offset, const char* inst_str); 660 void NopLIR(LIR* lir); 661 void UnlinkLIR(LIR* lir); 662 bool EvaluateBranch(Instruction::Code opcode, int src1, int src2); 663 bool IsInexpensiveConstant(RegLocation rl_src); 664 ConditionCode FlipComparisonOrder(ConditionCode before); 665 ConditionCode NegateComparison(ConditionCode before); 666 virtual void InstallLiteralPools(); 667 void InstallSwitchTables(); 668 void InstallFillArrayData(); 669 bool VerifyCatchEntries(); 670 void CreateMappingTables(); 671 void CreateNativeGcMap(); 672 int AssignLiteralOffset(CodeOffset offset); 673 int AssignSwitchTablesOffset(CodeOffset offset); 674 int AssignFillArrayDataOffset(CodeOffset offset); 675 LIR* InsertCaseLabel(DexOffset vaddr, int keyVal); 676 void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec); 677 void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec); 678 679 virtual void StartSlowPath(LIR *label) {} 680 virtual void BeginInvoke(CallInfo* info) {} 681 virtual void EndInvoke(CallInfo* info) {} 682 683 684 // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated. 685 RegLocation NarrowRegLoc(RegLocation loc); 686 687 // Shared by all targets - implemented in local_optimizations.cc 688 void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src); 689 void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir); 690 void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir); 691 virtual void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir); 692 693 // Shared by all targets - implemented in ralloc_util.cc 694 int GetSRegHi(int lowSreg); 695 bool LiveOut(int s_reg); 696 void SimpleRegAlloc(); 697 void ResetRegPool(); 698 void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num); 699 void DumpRegPool(GrowableArray<RegisterInfo*>* regs); 700 void DumpCoreRegPool(); 701 void DumpFpRegPool(); 702 void DumpRegPools(); 703 /* Mark a temp register as dead. Does not affect allocation state. */ 704 void Clobber(RegStorage reg); 705 void ClobberSReg(int s_reg); 706 void ClobberAliases(RegisterInfo* info, uint32_t clobber_mask); 707 int SRegToPMap(int s_reg); 708 void RecordCorePromotion(RegStorage reg, int s_reg); 709 RegStorage AllocPreservedCoreReg(int s_reg); 710 void RecordSinglePromotion(RegStorage reg, int s_reg); 711 void RecordDoublePromotion(RegStorage reg, int s_reg); 712 RegStorage AllocPreservedSingle(int s_reg); 713 virtual RegStorage AllocPreservedDouble(int s_reg); 714 RegStorage AllocTempBody(GrowableArray<RegisterInfo*> ®s, int* next_temp, bool required); 715 virtual RegStorage AllocFreeTemp(); 716 virtual RegStorage AllocTemp(); 717 virtual RegStorage AllocTempWide(); 718 virtual RegStorage AllocTempRef(); 719 virtual RegStorage AllocTempSingle(); 720 virtual RegStorage AllocTempDouble(); 721 virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class); 722 virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class); 723 void FlushReg(RegStorage reg); 724 void FlushRegWide(RegStorage reg); 725 RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide); 726 RegStorage FindLiveReg(GrowableArray<RegisterInfo*> ®s, int s_reg); 727 virtual void FreeTemp(RegStorage reg); 728 virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free); 729 virtual bool IsLive(RegStorage reg); 730 virtual bool IsTemp(RegStorage reg); 731 bool IsPromoted(RegStorage reg); 732 bool IsDirty(RegStorage reg); 733 void LockTemp(RegStorage reg); 734 void ResetDef(RegStorage reg); 735 void NullifyRange(RegStorage reg, int s_reg); 736 void MarkDef(RegLocation rl, LIR *start, LIR *finish); 737 void MarkDefWide(RegLocation rl, LIR *start, LIR *finish); 738 void ResetDefLoc(RegLocation rl); 739 void ResetDefLocWide(RegLocation rl); 740 void ResetDefTracking(); 741 void ClobberAllTemps(); 742 void FlushSpecificReg(RegisterInfo* info); 743 void FlushAllRegs(); 744 bool RegClassMatches(int reg_class, RegStorage reg); 745 void MarkLive(RegLocation loc); 746 void MarkTemp(RegStorage reg); 747 void UnmarkTemp(RegStorage reg); 748 void MarkWide(RegStorage reg); 749 void MarkNarrow(RegStorage reg); 750 void MarkClean(RegLocation loc); 751 void MarkDirty(RegLocation loc); 752 void MarkInUse(RegStorage reg); 753 bool CheckCorePoolSanity(); 754 virtual RegLocation UpdateLoc(RegLocation loc); 755 virtual RegLocation UpdateLocWide(RegLocation loc); 756 RegLocation UpdateRawLoc(RegLocation loc); 757 758 /** 759 * @brief Used to prepare a register location to receive a wide value. 760 * @see EvalLoc 761 * @param loc the location where the value will be stored. 762 * @param reg_class Type of register needed. 763 * @param update Whether the liveness information should be updated. 764 * @return Returns the properly typed temporary in physical register pairs. 765 */ 766 virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update); 767 768 /** 769 * @brief Used to prepare a register location to receive a value. 770 * @param loc the location where the value will be stored. 771 * @param reg_class Type of register needed. 772 * @param update Whether the liveness information should be updated. 773 * @return Returns the properly typed temporary in physical register. 774 */ 775 virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update); 776 777 void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs); 778 void DumpCounts(const RefCounts* arr, int size, const char* msg); 779 void DoPromotion(); 780 int VRegOffset(int v_reg); 781 int SRegOffset(int s_reg); 782 RegLocation GetReturnWide(RegisterClass reg_class); 783 RegLocation GetReturn(RegisterClass reg_class); 784 RegisterInfo* GetRegInfo(RegStorage reg); 785 786 // Shared by all targets - implemented in gen_common.cc. 787 void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr); 788 bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 789 RegLocation rl_src, RegLocation rl_dest, int lit); 790 bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit); 791 virtual void HandleSlowPaths(); 792 void GenBarrier(); 793 void GenDivZeroException(); 794 // c_code holds condition code that's generated from testing divisor against 0. 795 void GenDivZeroCheck(ConditionCode c_code); 796 // reg holds divisor. 797 void GenDivZeroCheck(RegStorage reg); 798 void GenArrayBoundsCheck(RegStorage index, RegStorage length); 799 void GenArrayBoundsCheck(int32_t index, RegStorage length); 800 LIR* GenNullCheck(RegStorage reg); 801 void MarkPossibleNullPointerException(int opt_flags); 802 void MarkPossibleStackOverflowException(); 803 void ForceImplicitNullCheck(RegStorage reg, int opt_flags); 804 LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind); 805 LIR* GenNullCheck(RegStorage m_reg, int opt_flags); 806 LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags); 807 void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 808 RegLocation rl_src2, LIR* taken, LIR* fall_through); 809 void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, 810 LIR* taken, LIR* fall_through); 811 virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src); 812 void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 813 RegLocation rl_src); 814 void GenNewArray(uint32_t type_idx, RegLocation rl_dest, 815 RegLocation rl_src); 816 void GenFilledNewArray(CallInfo* info); 817 void GenSput(MIR* mir, RegLocation rl_src, 818 bool is_long_or_double, bool is_object); 819 void GenSget(MIR* mir, RegLocation rl_dest, 820 bool is_long_or_double, bool is_object); 821 void GenIGet(MIR* mir, int opt_flags, OpSize size, 822 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object); 823 void GenIPut(MIR* mir, int opt_flags, OpSize size, 824 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object); 825 void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 826 RegLocation rl_src); 827 828 void GenConstClass(uint32_t type_idx, RegLocation rl_dest); 829 void GenConstString(uint32_t string_idx, RegLocation rl_dest); 830 void GenNewInstance(uint32_t type_idx, RegLocation rl_dest); 831 void GenThrow(RegLocation rl_src); 832 void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); 833 void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src); 834 void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 835 RegLocation rl_src1, RegLocation rl_src2); 836 virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 837 RegLocation rl_src1, RegLocation rl_shift); 838 void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, 839 RegLocation rl_src, int lit); 840 void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 841 RegLocation rl_src1, RegLocation rl_src2); 842 template <size_t pointer_size> 843 void GenConversionCall(ThreadOffset<pointer_size> func_offset, RegLocation rl_dest, 844 RegLocation rl_src); 845 virtual void GenSuspendTest(int opt_flags); 846 virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target); 847 848 // This will be overridden by x86 implementation. 849 virtual void GenConstWide(RegLocation rl_dest, int64_t value); 850 virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 851 RegLocation rl_src1, RegLocation rl_src2); 852 853 // Shared by all targets - implemented in gen_invoke.cc. 854 template <size_t pointer_size> 855 LIR* CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset, bool safepoint_pc, 856 bool use_link = true); 857 RegStorage CallHelperSetup(ThreadOffset<4> helper_offset); 858 RegStorage CallHelperSetup(ThreadOffset<8> helper_offset); 859 template <size_t pointer_size> 860 void CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc); 861 template <size_t pointer_size> 862 void CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc); 863 template <size_t pointer_size> 864 void CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc); 865 template <size_t pointer_size> 866 void CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset, RegLocation arg0, 867 bool safepoint_pc); 868 template <size_t pointer_size> 869 void CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1, 870 bool safepoint_pc); 871 template <size_t pointer_size> 872 void CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0, 873 RegLocation arg1, bool safepoint_pc); 874 template <size_t pointer_size> 875 void CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset, RegLocation arg0, 876 int arg1, bool safepoint_pc); 877 template <size_t pointer_size> 878 void CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0, RegStorage arg1, 879 bool safepoint_pc); 880 template <size_t pointer_size> 881 void CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, int arg1, 882 bool safepoint_pc); 883 template <size_t pointer_size> 884 void CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0, 885 bool safepoint_pc); 886 template <size_t pointer_size> 887 void CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, 888 bool safepoint_pc); 889 template <size_t pointer_size> 890 void CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset, 891 RegStorage arg0, RegLocation arg2, bool safepoint_pc); 892 template <size_t pointer_size> 893 void CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, 894 RegLocation arg0, RegLocation arg1, 895 bool safepoint_pc); 896 template <size_t pointer_size> 897 void CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, 898 RegStorage arg1, bool safepoint_pc); 899 template <size_t pointer_size> 900 void CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, 901 RegStorage arg1, int arg2, bool safepoint_pc); 902 template <size_t pointer_size> 903 void CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0, 904 RegLocation arg2, bool safepoint_pc); 905 template <size_t pointer_size> 906 void CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg2, 907 bool safepoint_pc); 908 template <size_t pointer_size> 909 void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, 910 int arg0, RegLocation arg1, RegLocation arg2, 911 bool safepoint_pc); 912 template <size_t pointer_size> 913 void CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, 914 RegLocation arg0, RegLocation arg1, 915 RegLocation arg2, 916 bool safepoint_pc); 917 void GenInvoke(CallInfo* info); 918 void GenInvokeNoInline(CallInfo* info); 919 virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method); 920 virtual int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel, 921 NextCallInsn next_call_insn, 922 const MethodReference& target_method, 923 uint32_t vtable_idx, 924 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 925 bool skip_this); 926 virtual int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel, 927 NextCallInsn next_call_insn, 928 const MethodReference& target_method, 929 uint32_t vtable_idx, 930 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 931 bool skip_this); 932 933 /** 934 * @brief Used to determine the register location of destination. 935 * @details This is needed during generation of inline intrinsics because it finds destination 936 * of return, 937 * either the physical register or the target of move-result. 938 * @param info Information about the invoke. 939 * @return Returns the destination location. 940 */ 941 RegLocation InlineTarget(CallInfo* info); 942 943 /** 944 * @brief Used to determine the wide register location of destination. 945 * @see InlineTarget 946 * @param info Information about the invoke. 947 * @return Returns the destination location. 948 */ 949 RegLocation InlineTargetWide(CallInfo* info); 950 951 bool GenInlinedCharAt(CallInfo* info); 952 bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty); 953 bool GenInlinedReverseBytes(CallInfo* info, OpSize size); 954 bool GenInlinedAbsInt(CallInfo* info); 955 bool GenInlinedAbsLong(CallInfo* info); 956 bool GenInlinedAbsFloat(CallInfo* info); 957 bool GenInlinedAbsDouble(CallInfo* info); 958 bool GenInlinedFloatCvt(CallInfo* info); 959 bool GenInlinedDoubleCvt(CallInfo* info); 960 virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based); 961 bool GenInlinedStringCompareTo(CallInfo* info); 962 bool GenInlinedCurrentThread(CallInfo* info); 963 bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile); 964 bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object, 965 bool is_volatile, bool is_ordered); 966 virtual int LoadArgRegs(CallInfo* info, int call_state, 967 NextCallInsn next_call_insn, 968 const MethodReference& target_method, 969 uint32_t vtable_idx, 970 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 971 bool skip_this); 972 973 // Shared by all targets - implemented in gen_loadstore.cc. 974 RegLocation LoadCurrMethod(); 975 void LoadCurrMethodDirect(RegStorage r_tgt); 976 virtual LIR* LoadConstant(RegStorage r_dest, int value); 977 // Natural word size. 978 virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { 979 return LoadBaseDisp(r_base, displacement, r_dest, kWord); 980 } 981 // Load 32 bits, regardless of target. 982 virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) { 983 return LoadBaseDisp(r_base, displacement, r_dest, k32); 984 } 985 // Load a reference at base + displacement and decompress into register. 986 virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest) { 987 return LoadBaseDisp(r_base, displacement, r_dest, kReference); 988 } 989 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 990 virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); 991 // Same as above, but derive the target register class from the location record. 992 virtual RegLocation LoadValue(RegLocation rl_src); 993 // Load Dalvik value with 64-bit memory storage. 994 virtual RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind); 995 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 996 virtual void LoadValueDirect(RegLocation rl_src, RegStorage r_dest); 997 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 998 virtual void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest); 999 // Load Dalvik value with 64-bit memory storage. 1000 virtual void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest); 1001 // Load Dalvik value with 64-bit memory storage. 1002 virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest); 1003 // Store an item of natural word size. 1004 virtual LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) { 1005 return StoreBaseDisp(r_base, displacement, r_src, kWord); 1006 } 1007 // Store an uncompressed reference into a compressed 32-bit container. 1008 virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src) { 1009 return StoreBaseDisp(r_base, displacement, r_src, kReference); 1010 } 1011 // Store 32 bits, regardless of target. 1012 virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) { 1013 return StoreBaseDisp(r_base, displacement, r_src, k32); 1014 } 1015 1016 /** 1017 * @brief Used to do the final store in the destination as per bytecode semantics. 1018 * @param rl_dest The destination dalvik register location. 1019 * @param rl_src The source register location. Can be either physical register or dalvik register. 1020 */ 1021 virtual void StoreValue(RegLocation rl_dest, RegLocation rl_src); 1022 1023 /** 1024 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1025 * @see StoreValue 1026 * @param rl_dest The destination dalvik register location. 1027 * @param rl_src The source register location. Can be either physical register or dalvik 1028 * register. 1029 */ 1030 virtual void StoreValueWide(RegLocation rl_dest, RegLocation rl_src); 1031 1032 /** 1033 * @brief Used to do the final store to a destination as per bytecode semantics. 1034 * @see StoreValue 1035 * @param rl_dest The destination dalvik register location. 1036 * @param rl_src The source register location. It must be kLocPhysReg 1037 * 1038 * This is used for x86 two operand computations, where we have computed the correct 1039 * register value that now needs to be properly registered. This is used to avoid an 1040 * extra register copy that would result if StoreValue was called. 1041 */ 1042 virtual void StoreFinalValue(RegLocation rl_dest, RegLocation rl_src); 1043 1044 /** 1045 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1046 * @see StoreValueWide 1047 * @param rl_dest The destination dalvik register location. 1048 * @param rl_src The source register location. It must be kLocPhysReg 1049 * 1050 * This is used for x86 two operand computations, where we have computed the correct 1051 * register values that now need to be properly registered. This is used to avoid an 1052 * extra pair of register copies that would result if StoreValueWide was called. 1053 */ 1054 virtual void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src); 1055 1056 // Shared by all targets - implemented in mir_to_lir.cc. 1057 void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list); 1058 virtual void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1059 bool MethodBlockCodeGen(BasicBlock* bb); 1060 bool SpecialMIR2LIR(const InlineMethod& special); 1061 virtual void MethodMIR2LIR(); 1062 // Update LIR for verbose listings. 1063 void UpdateLIROffsets(); 1064 1065 /* 1066 * @brief Load the address of the dex method into the register. 1067 * @param target_method The MethodReference of the method to be invoked. 1068 * @param type How the method will be invoked. 1069 * @param register that will contain the code address. 1070 * @note register will be passed to TargetReg to get physical register. 1071 */ 1072 void LoadCodeAddress(const MethodReference& target_method, InvokeType type, 1073 SpecialTargetRegister symbolic_reg); 1074 1075 /* 1076 * @brief Load the Method* of a dex method into the register. 1077 * @param target_method The MethodReference of the method to be invoked. 1078 * @param type How the method will be invoked. 1079 * @param register that will contain the code address. 1080 * @note register will be passed to TargetReg to get physical register. 1081 */ 1082 virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type, 1083 SpecialTargetRegister symbolic_reg); 1084 1085 /* 1086 * @brief Load the Class* of a Dex Class type into the register. 1087 * @param type How the method will be invoked. 1088 * @param register that will contain the code address. 1089 * @note register will be passed to TargetReg to get physical register. 1090 */ 1091 virtual void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg); 1092 1093 // Routines that work for the generic case, but may be overriden by target. 1094 /* 1095 * @brief Compare memory to immediate, and branch if condition true. 1096 * @param cond The condition code that when true will branch to the target. 1097 * @param temp_reg A temporary register that can be used if compare to memory is not 1098 * supported by the architecture. 1099 * @param base_reg The register holding the base address. 1100 * @param offset The offset from the base. 1101 * @param check_value The immediate to compare to. 1102 * @returns The branch instruction that was generated. 1103 */ 1104 virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 1105 int offset, int check_value, LIR* target); 1106 1107 // Required for target - codegen helpers. 1108 virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 1109 RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1110 virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1111 virtual LIR* CheckSuspendUsingLoad() = 0; 1112 1113 virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0; 1114 virtual RegStorage LoadHelper(ThreadOffset<8> offset) = 0; 1115 1116 virtual LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest, 1117 OpSize size) = 0; 1118 virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, 1119 OpSize size) = 0; 1120 virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 1121 int scale, OpSize size) = 0; 1122 virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 1123 int displacement, RegStorage r_dest, OpSize size) = 0; 1124 virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0; 1125 virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0; 1126 virtual LIR* StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src, 1127 OpSize size) = 0; 1128 virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, 1129 OpSize size) = 0; 1130 virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 1131 int scale, OpSize size) = 0; 1132 virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 1133 int displacement, RegStorage r_src, OpSize size) = 0; 1134 virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0; 1135 1136 // Required for target - register utilities. 1137 virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0; 1138 virtual RegStorage GetArgMappingToPhysicalReg(int arg_num) = 0; 1139 virtual RegLocation GetReturnAlt() = 0; 1140 virtual RegLocation GetReturnWideAlt() = 0; 1141 virtual RegLocation LocCReturn() = 0; 1142 virtual RegLocation LocCReturnRef() = 0; 1143 virtual RegLocation LocCReturnDouble() = 0; 1144 virtual RegLocation LocCReturnFloat() = 0; 1145 virtual RegLocation LocCReturnWide() = 0; 1146 virtual ResourceMask GetRegMaskCommon(const RegStorage& reg) const = 0; 1147 virtual void AdjustSpillMask() = 0; 1148 virtual void ClobberCallerSave() = 0; 1149 virtual void FreeCallTemps() = 0; 1150 virtual void LockCallTemps() = 0; 1151 virtual void MarkPreservedSingle(int v_reg, RegStorage reg) = 0; 1152 virtual void MarkPreservedDouble(int v_reg, RegStorage reg) = 0; 1153 virtual void CompilerInitializeRegAlloc() = 0; 1154 1155 // Required for target - miscellaneous. 1156 virtual void AssembleLIR() = 0; 1157 virtual void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) = 0; 1158 virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags, 1159 ResourceMask* use_mask, ResourceMask* def_mask) = 0; 1160 virtual const char* GetTargetInstFmt(int opcode) = 0; 1161 virtual const char* GetTargetInstName(int opcode) = 0; 1162 virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0; 1163 virtual ResourceMask GetPCUseDefEncoding() const = 0; 1164 virtual uint64_t GetTargetInstFlags(int opcode) = 0; 1165 virtual int GetInsnSize(LIR* lir) = 0; 1166 virtual bool IsUnconditionalBranch(LIR* lir) = 0; 1167 1168 // Check support for volatile load/store of a given size. 1169 virtual bool SupportsVolatileLoadStore(OpSize size) = 0; 1170 // Get the register class for load/store of a field. 1171 virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0; 1172 1173 // Required for target - Dalvik-level generators. 1174 virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1175 RegLocation rl_src1, RegLocation rl_src2) = 0; 1176 virtual void GenMulLong(Instruction::Code, 1177 RegLocation rl_dest, RegLocation rl_src1, 1178 RegLocation rl_src2) = 0; 1179 virtual void GenAddLong(Instruction::Code, 1180 RegLocation rl_dest, RegLocation rl_src1, 1181 RegLocation rl_src2) = 0; 1182 virtual void GenAndLong(Instruction::Code, 1183 RegLocation rl_dest, RegLocation rl_src1, 1184 RegLocation rl_src2) = 0; 1185 virtual void GenArithOpDouble(Instruction::Code opcode, 1186 RegLocation rl_dest, RegLocation rl_src1, 1187 RegLocation rl_src2) = 0; 1188 virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, 1189 RegLocation rl_src1, RegLocation rl_src2) = 0; 1190 virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, 1191 RegLocation rl_src1, RegLocation rl_src2) = 0; 1192 virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, 1193 RegLocation rl_src) = 0; 1194 virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0; 1195 1196 /** 1197 * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max. 1198 * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm 1199 * that applies on integers. The generated code will write the smallest or largest value 1200 * directly into the destination register as specified by the invoke information. 1201 * @param info Information about the invoke. 1202 * @param is_min If true generates code that computes minimum. Otherwise computes maximum. 1203 * @return Returns true if successfully generated 1204 */ 1205 virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min) = 0; 1206 1207 virtual bool GenInlinedSqrt(CallInfo* info) = 0; 1208 virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0; 1209 virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0; 1210 virtual void GenNotLong(RegLocation rl_dest, RegLocation rl_src) = 0; 1211 virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0; 1212 virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1213 RegLocation rl_src2) = 0; 1214 virtual void GenSubLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1215 RegLocation rl_src2) = 0; 1216 virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1217 RegLocation rl_src2) = 0; 1218 virtual void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1219 RegLocation rl_src2, bool is_div) = 0; 1220 virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, 1221 bool is_div) = 0; 1222 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, 1223 bool is_div) = 0; 1224 /* 1225 * @brief Generate an integer div or rem operation by a literal. 1226 * @param rl_dest Destination Location. 1227 * @param rl_src1 Numerator Location. 1228 * @param rl_src2 Divisor Location. 1229 * @param is_div 'true' if this is a division, 'false' for a remainder. 1230 * @param check_zero 'true' if an exception should be generated if the divisor is 0. 1231 */ 1232 virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 1233 RegLocation rl_src2, bool is_div, bool check_zero) = 0; 1234 /* 1235 * @brief Generate an integer div or rem operation by a literal. 1236 * @param rl_dest Destination Location. 1237 * @param rl_src Numerator Location. 1238 * @param lit Divisor. 1239 * @param is_div 'true' if this is a division, 'false' for a remainder. 1240 */ 1241 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, 1242 bool is_div) = 0; 1243 virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0; 1244 1245 /** 1246 * @brief Used for generating code that throws ArithmeticException if both registers are zero. 1247 * @details This is used for generating DivideByZero checks when divisor is held in two 1248 * separate registers. 1249 * @param reg The register holding the pair of 32-bit values. 1250 */ 1251 virtual void GenDivZeroCheckWide(RegStorage reg) = 0; 1252 1253 virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0; 1254 virtual void GenExitSequence() = 0; 1255 virtual void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) = 0; 1256 virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0; 1257 virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0; 1258 1259 /* 1260 * @brief Handle Machine Specific MIR Extended opcodes. 1261 * @param bb The basic block in which the MIR is from. 1262 * @param mir The MIR whose opcode is not standard extended MIR. 1263 * @note Base class implementation will abort for unknown opcodes. 1264 */ 1265 virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1266 1267 /** 1268 * @brief Lowers the kMirOpSelect MIR into LIR. 1269 * @param bb The basic block in which the MIR is from. 1270 * @param mir The MIR whose opcode is kMirOpSelect. 1271 */ 1272 virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0; 1273 1274 /** 1275 * @brief Used to generate a memory barrier in an architecture specific way. 1276 * @details The last generated LIR will be considered for use as barrier. Namely, 1277 * if the last LIR can be updated in a way where it will serve the semantics of 1278 * barrier, then it will be used as such. Otherwise, a new LIR will be generated 1279 * that can keep the semantics. 1280 * @param barrier_kind The kind of memory barrier to generate. 1281 * @return whether a new instruction was generated. 1282 */ 1283 virtual bool GenMemBarrier(MemBarrierKind barrier_kind) = 0; 1284 1285 virtual void GenMoveException(RegLocation rl_dest) = 0; 1286 virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, 1287 int first_bit, int second_bit) = 0; 1288 virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0; 1289 virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0; 1290 virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1291 virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1292 virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 1293 RegLocation rl_index, RegLocation rl_dest, int scale) = 0; 1294 virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 1295 RegLocation rl_index, RegLocation rl_src, int scale, 1296 bool card_mark) = 0; 1297 virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1298 RegLocation rl_src1, RegLocation rl_shift) = 0; 1299 1300 // Required for target - single operation generators. 1301 virtual LIR* OpUnconditionalBranch(LIR* target) = 0; 1302 virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0; 1303 virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, 1304 LIR* target) = 0; 1305 virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0; 1306 virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0; 1307 virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1308 virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0; 1309 virtual void OpEndIT(LIR* it) = 0; 1310 virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0; 1311 virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0; 1312 virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0; 1313 virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1314 virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0; 1315 virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0; 1316 virtual LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) = 0; 1317 virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0; 1318 1319 /** 1320 * @brief Used to generate an LIR that does a load from mem to reg. 1321 * @param r_dest The destination physical register. 1322 * @param r_base The base physical register for memory operand. 1323 * @param offset The displacement for memory operand. 1324 * @param move_type Specification on the move desired (size, alignment, register kind). 1325 * @return Returns the generate move LIR. 1326 */ 1327 virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, 1328 MoveType move_type) = 0; 1329 1330 /** 1331 * @brief Used to generate an LIR that does a store from reg to mem. 1332 * @param r_base The base physical register for memory operand. 1333 * @param offset The displacement for memory operand. 1334 * @param r_src The destination physical register. 1335 * @param bytes_to_move The number of bytes to move. 1336 * @param is_aligned Whether the memory location is known to be aligned. 1337 * @return Returns the generate move LIR. 1338 */ 1339 virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, 1340 MoveType move_type) = 0; 1341 1342 /** 1343 * @brief Used for generating a conditional register to register operation. 1344 * @param op The opcode kind. 1345 * @param cc The condition code that when true will perform the opcode. 1346 * @param r_dest The destination physical register. 1347 * @param r_src The source physical register. 1348 * @return Returns the newly created LIR or null in case of creation failure. 1349 */ 1350 virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0; 1351 1352 virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0; 1353 virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, 1354 RegStorage r_src2) = 0; 1355 virtual LIR* OpTestSuspend(LIR* target) = 0; 1356 virtual LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) = 0; 1357 virtual LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) = 0; 1358 virtual LIR* OpVldm(RegStorage r_base, int count) = 0; 1359 virtual LIR* OpVstm(RegStorage r_base, int count) = 0; 1360 virtual void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, 1361 int offset) = 0; 1362 virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0; 1363 virtual void OpTlsCmp(ThreadOffset<4> offset, int val) = 0; 1364 virtual void OpTlsCmp(ThreadOffset<8> offset, int val) = 0; 1365 virtual bool InexpensiveConstantInt(int32_t value) = 0; 1366 virtual bool InexpensiveConstantFloat(int32_t value) = 0; 1367 virtual bool InexpensiveConstantLong(int64_t value) = 0; 1368 virtual bool InexpensiveConstantDouble(int64_t value) = 0; 1369 1370 // May be optimized by targets. 1371 virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src); 1372 virtual void GenMonitorExit(int opt_flags, RegLocation rl_src); 1373 1374 // Temp workaround 1375 void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg); 1376 1377 protected: 1378 Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); 1379 1380 CompilationUnit* GetCompilationUnit() { 1381 return cu_; 1382 } 1383 /* 1384 * @brief Returns the index of the lowest set bit in 'x'. 1385 * @param x Value to be examined. 1386 * @returns The bit number of the lowest bit set in the value. 1387 */ 1388 int32_t LowestSetBit(uint64_t x); 1389 /* 1390 * @brief Is this value a power of two? 1391 * @param x Value to be examined. 1392 * @returns 'true' if only 1 bit is set in the value. 1393 */ 1394 bool IsPowerOfTwo(uint64_t x); 1395 /* 1396 * @brief Do these SRs overlap? 1397 * @param rl_op1 One RegLocation 1398 * @param rl_op2 The other RegLocation 1399 * @return 'true' if the VR pairs overlap 1400 * 1401 * Check to see if a result pair has a misaligned overlap with an operand pair. This 1402 * is not usual for dx to generate, but it is legal (for now). In a future rev of 1403 * dex, we'll want to make this case illegal. 1404 */ 1405 bool BadOverlap(RegLocation rl_op1, RegLocation rl_op2); 1406 1407 /* 1408 * @brief Force a location (in a register) into a temporary register 1409 * @param loc location of result 1410 * @returns update location 1411 */ 1412 virtual RegLocation ForceTemp(RegLocation loc); 1413 1414 /* 1415 * @brief Force a wide location (in registers) into temporary registers 1416 * @param loc location of result 1417 * @returns update location 1418 */ 1419 virtual RegLocation ForceTempWide(RegLocation loc); 1420 1421 static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) { 1422 return wide ? k64 : ref ? kReference : k32; 1423 } 1424 1425 virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, 1426 RegLocation rl_dest, RegLocation rl_src); 1427 1428 void AddSlowPath(LIRSlowPath* slowpath); 1429 1430 virtual void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1431 bool type_known_abstract, bool use_declaring_class, 1432 bool can_assume_type_is_in_dex_cache, 1433 uint32_t type_idx, RegLocation rl_dest, 1434 RegLocation rl_src); 1435 /* 1436 * @brief Generate the debug_frame FDE information if possible. 1437 * @returns pointer to vector containg CFE information, or NULL. 1438 */ 1439 virtual std::vector<uint8_t>* ReturnCallFrameInformation(); 1440 1441 /** 1442 * @brief Used to insert marker that can be used to associate MIR with LIR. 1443 * @details Only inserts marker if verbosity is enabled. 1444 * @param mir The mir that is currently being generated. 1445 */ 1446 void GenPrintLabel(MIR* mir); 1447 1448 /** 1449 * @brief Used to generate return sequence when there is no frame. 1450 * @details Assumes that the return registers have already been populated. 1451 */ 1452 virtual void GenSpecialExitSequence() = 0; 1453 1454 /** 1455 * @brief Used to generate code for special methods that are known to be 1456 * small enough to work in frameless mode. 1457 * @param bb The basic block of the first MIR. 1458 * @param mir The first MIR of the special method. 1459 * @param special Information about the special method. 1460 * @return Returns whether or not this was handled successfully. Returns false 1461 * if caller should punt to normal MIR2LIR conversion. 1462 */ 1463 virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); 1464 1465 protected: 1466 void ClobberBody(RegisterInfo* p); 1467 void SetCurrentDexPc(DexOffset dexpc) { 1468 current_dalvik_offset_ = dexpc; 1469 } 1470 1471 /** 1472 * @brief Used to lock register if argument at in_position was passed that way. 1473 * @details Does nothing if the argument is passed via stack. 1474 * @param in_position The argument number whose register to lock. 1475 * @param wide Whether the argument is wide. 1476 */ 1477 void LockArg(int in_position, bool wide = false); 1478 1479 /** 1480 * @brief Used to load VR argument to a physical register. 1481 * @details The load is only done if the argument is not already in physical register. 1482 * LockArg must have been previously called. 1483 * @param in_position The argument number to load. 1484 * @param wide Whether the argument is 64-bit or not. 1485 * @return Returns the register (or register pair) for the loaded argument. 1486 */ 1487 RegStorage LoadArg(int in_position, RegisterClass reg_class, bool wide = false); 1488 1489 /** 1490 * @brief Used to load a VR argument directly to a specified register location. 1491 * @param in_position The argument number to place in register. 1492 * @param rl_dest The register location where to place argument. 1493 */ 1494 void LoadArgDirect(int in_position, RegLocation rl_dest); 1495 1496 /** 1497 * @brief Used to generate LIR for special getter method. 1498 * @param mir The mir that represents the iget. 1499 * @param special Information about the special getter method. 1500 * @return Returns whether LIR was successfully generated. 1501 */ 1502 bool GenSpecialIGet(MIR* mir, const InlineMethod& special); 1503 1504 /** 1505 * @brief Used to generate LIR for special setter method. 1506 * @param mir The mir that represents the iput. 1507 * @param special Information about the special setter method. 1508 * @return Returns whether LIR was successfully generated. 1509 */ 1510 bool GenSpecialIPut(MIR* mir, const InlineMethod& special); 1511 1512 /** 1513 * @brief Used to generate LIR for special return-args method. 1514 * @param mir The mir that represents the return of argument. 1515 * @param special Information about the special return-args method. 1516 * @return Returns whether LIR was successfully generated. 1517 */ 1518 bool GenSpecialIdentity(MIR* mir, const InlineMethod& special); 1519 1520 void AddDivZeroCheckSlowPath(LIR* branch); 1521 1522 // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using 1523 // kArg2 as temp. 1524 virtual void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1); 1525 1526 /** 1527 * @brief Load Constant into RegLocation 1528 * @param rl_dest Destination RegLocation 1529 * @param value Constant value 1530 */ 1531 virtual void GenConst(RegLocation rl_dest, int value); 1532 1533 public: 1534 // TODO: add accessors for these. 1535 LIR* literal_list_; // Constants. 1536 LIR* method_literal_list_; // Method literals requiring patching. 1537 LIR* class_literal_list_; // Class literals requiring patching. 1538 LIR* code_literal_list_; // Code literals requiring patching. 1539 LIR* first_fixup_; // Doubly-linked list of LIR nodes requiring fixups. 1540 1541 protected: 1542 CompilationUnit* const cu_; 1543 MIRGraph* const mir_graph_; 1544 GrowableArray<SwitchTable*> switch_tables_; 1545 GrowableArray<FillArrayData*> fill_array_data_; 1546 GrowableArray<RegisterInfo*> tempreg_info_; 1547 GrowableArray<RegisterInfo*> reginfo_map_; 1548 GrowableArray<void*> pointer_storage_; 1549 CodeOffset current_code_offset_; // Working byte offset of machine instructons. 1550 CodeOffset data_offset_; // starting offset of literal pool. 1551 size_t total_size_; // header + code size. 1552 LIR* block_label_list_; 1553 PromotionMap* promotion_map_; 1554 /* 1555 * TODO: The code generation utilities don't have a built-in 1556 * mechanism to propagate the original Dalvik opcode address to the 1557 * associated generated instructions. For the trace compiler, this wasn't 1558 * necessary because the interpreter handled all throws and debugging 1559 * requests. For now we'll handle this by placing the Dalvik offset 1560 * in the CompilationUnit struct before codegen for each instruction. 1561 * The low-level LIR creation utilites will pull it from here. Rework this. 1562 */ 1563 DexOffset current_dalvik_offset_; 1564 size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size. 1565 RegisterPool* reg_pool_; 1566 /* 1567 * Sanity checking for the register temp tracking. The same ssa 1568 * name should never be associated with one temp register per 1569 * instruction compilation. 1570 */ 1571 int live_sreg_; 1572 CodeBuffer code_buffer_; 1573 // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix. 1574 std::vector<uint8_t> encoded_mapping_table_; 1575 std::vector<uint32_t> core_vmap_table_; 1576 std::vector<uint32_t> fp_vmap_table_; 1577 std::vector<uint8_t> native_gc_map_; 1578 int num_core_spills_; 1579 int num_fp_spills_; 1580 int frame_size_; 1581 unsigned int core_spill_mask_; 1582 unsigned int fp_spill_mask_; 1583 LIR* first_lir_insn_; 1584 LIR* last_lir_insn_; 1585 1586 GrowableArray<LIRSlowPath*> slow_paths_; 1587 1588 // The memory reference type for new LIRs. 1589 // NOTE: Passing this as an explicit parameter by all functions that directly or indirectly 1590 // invoke RawLIR() would clutter the code and reduce the readability. 1591 ResourceMask::ResourceBit mem_ref_type_; 1592 1593 // Each resource mask now takes 16-bytes, so having both use/def masks directly in a LIR 1594 // would consume 32 bytes per LIR. Instead, the LIR now holds only pointers to the masks 1595 // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache 1596 // to deduplicate the masks. 1597 ResourceMaskCache mask_cache_; 1598}; // Class Mir2Lir 1599 1600} // namespace art 1601 1602#endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 1603