mir_to_lir.h revision 5030d3ee8c6fe10394912ede107cbc8df63b7b16
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 18#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 19 20#include "invoke_type.h" 21#include "compiled_method.h" 22#include "dex/compiler_enums.h" 23#include "dex/compiler_ir.h" 24#include "dex/reg_location.h" 25#include "dex/reg_storage.h" 26#include "dex/backend.h" 27#include "dex/quick/resource_mask.h" 28#include "driver/compiler_driver.h" 29#include "instruction_set.h" 30#include "leb128.h" 31#include "safe_map.h" 32#include "utils/array_ref.h" 33#include "utils/arena_allocator.h" 34#include "utils/growable_array.h" 35 36namespace art { 37 38/* 39 * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to 40 * add type safety (see runtime/offsets.h). 41 */ 42typedef uint32_t DexOffset; // Dex offset in code units. 43typedef uint16_t NarrowDexOffset; // For use in structs, Dex offsets range from 0 .. 0xffff. 44typedef uint32_t CodeOffset; // Native code offset in bytes. 45 46// Set to 1 to measure cost of suspend check. 47#define NO_SUSPEND 0 48 49#define IS_BINARY_OP (1ULL << kIsBinaryOp) 50#define IS_BRANCH (1ULL << kIsBranch) 51#define IS_IT (1ULL << kIsIT) 52#define IS_LOAD (1ULL << kMemLoad) 53#define IS_QUAD_OP (1ULL << kIsQuadOp) 54#define IS_QUIN_OP (1ULL << kIsQuinOp) 55#define IS_SEXTUPLE_OP (1ULL << kIsSextupleOp) 56#define IS_STORE (1ULL << kMemStore) 57#define IS_TERTIARY_OP (1ULL << kIsTertiaryOp) 58#define IS_UNARY_OP (1ULL << kIsUnaryOp) 59#define NEEDS_FIXUP (1ULL << kPCRelFixup) 60#define NO_OPERAND (1ULL << kNoOperand) 61#define REG_DEF0 (1ULL << kRegDef0) 62#define REG_DEF1 (1ULL << kRegDef1) 63#define REG_DEF2 (1ULL << kRegDef2) 64#define REG_DEFA (1ULL << kRegDefA) 65#define REG_DEFD (1ULL << kRegDefD) 66#define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0) 67#define REG_DEF_FPCS_LIST2 (1ULL << kRegDefFPCSList2) 68#define REG_DEF_LIST0 (1ULL << kRegDefList0) 69#define REG_DEF_LIST1 (1ULL << kRegDefList1) 70#define REG_DEF_LR (1ULL << kRegDefLR) 71#define REG_DEF_SP (1ULL << kRegDefSP) 72#define REG_USE0 (1ULL << kRegUse0) 73#define REG_USE1 (1ULL << kRegUse1) 74#define REG_USE2 (1ULL << kRegUse2) 75#define REG_USE3 (1ULL << kRegUse3) 76#define REG_USE4 (1ULL << kRegUse4) 77#define REG_USEA (1ULL << kRegUseA) 78#define REG_USEC (1ULL << kRegUseC) 79#define REG_USED (1ULL << kRegUseD) 80#define REG_USEB (1ULL << kRegUseB) 81#define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0) 82#define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2) 83#define REG_USE_LIST0 (1ULL << kRegUseList0) 84#define REG_USE_LIST1 (1ULL << kRegUseList1) 85#define REG_USE_LR (1ULL << kRegUseLR) 86#define REG_USE_PC (1ULL << kRegUsePC) 87#define REG_USE_SP (1ULL << kRegUseSP) 88#define SETS_CCODES (1ULL << kSetsCCodes) 89#define USES_CCODES (1ULL << kUsesCCodes) 90#define USE_FP_STACK (1ULL << kUseFpStack) 91#define REG_USE_LO (1ULL << kUseLo) 92#define REG_USE_HI (1ULL << kUseHi) 93#define REG_DEF_LO (1ULL << kDefLo) 94#define REG_DEF_HI (1ULL << kDefHi) 95 96// Common combo register usage patterns. 97#define REG_DEF01 (REG_DEF0 | REG_DEF1) 98#define REG_DEF012 (REG_DEF0 | REG_DEF1 | REG_DEF2) 99#define REG_DEF01_USE2 (REG_DEF0 | REG_DEF1 | REG_USE2) 100#define REG_DEF0_USE01 (REG_DEF0 | REG_USE01) 101#define REG_DEF0_USE0 (REG_DEF0 | REG_USE0) 102#define REG_DEF0_USE12 (REG_DEF0 | REG_USE12) 103#define REG_DEF0_USE123 (REG_DEF0 | REG_USE123) 104#define REG_DEF0_USE1 (REG_DEF0 | REG_USE1) 105#define REG_DEF0_USE2 (REG_DEF0 | REG_USE2) 106#define REG_DEFAD_USEAD (REG_DEFAD_USEA | REG_USED) 107#define REG_DEFAD_USEA (REG_DEFA_USEA | REG_DEFD) 108#define REG_DEFA_USEA (REG_DEFA | REG_USEA) 109#define REG_USE012 (REG_USE01 | REG_USE2) 110#define REG_USE014 (REG_USE01 | REG_USE4) 111#define REG_USE01 (REG_USE0 | REG_USE1) 112#define REG_USE02 (REG_USE0 | REG_USE2) 113#define REG_USE12 (REG_USE1 | REG_USE2) 114#define REG_USE23 (REG_USE2 | REG_USE3) 115#define REG_USE123 (REG_USE1 | REG_USE2 | REG_USE3) 116 117// TODO: #includes need a cleanup 118#ifndef INVALID_SREG 119#define INVALID_SREG (-1) 120#endif 121 122struct BasicBlock; 123struct CallInfo; 124struct CompilationUnit; 125struct InlineMethod; 126struct MIR; 127struct LIR; 128struct RegisterInfo; 129class DexFileMethodInliner; 130class MIRGraph; 131class Mir2Lir; 132 133typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, 134 const MethodReference& target_method, 135 uint32_t method_idx, uintptr_t direct_code, 136 uintptr_t direct_method, InvokeType type); 137 138typedef std::vector<uint8_t> CodeBuffer; 139 140struct UseDefMasks { 141 const ResourceMask* use_mask; // Resource mask for use. 142 const ResourceMask* def_mask; // Resource mask for def. 143}; 144 145struct AssemblyInfo { 146 LIR* pcrel_next; // Chain of LIR nodes needing pc relative fixups. 147}; 148 149struct LIR { 150 CodeOffset offset; // Offset of this instruction. 151 NarrowDexOffset dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words). 152 int16_t opcode; 153 LIR* next; 154 LIR* prev; 155 LIR* target; 156 struct { 157 unsigned int alias_info:17; // For Dalvik register disambiguation. 158 bool is_nop:1; // LIR is optimized away. 159 unsigned int size:4; // Note: size of encoded instruction is in bytes. 160 bool use_def_invalid:1; // If true, masks should not be used. 161 unsigned int generation:1; // Used to track visitation state during fixup pass. 162 unsigned int fixup:8; // Fixup kind. 163 } flags; 164 union { 165 UseDefMasks m; // Use & Def masks used during optimization. 166 AssemblyInfo a; // Instruction info used during assembly phase. 167 } u; 168 int32_t operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]. 169}; 170 171// Target-specific initialization. 172Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 173 ArenaAllocator* const arena); 174Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 175 ArenaAllocator* const arena); 176Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 177 ArenaAllocator* const arena); 178Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 179 ArenaAllocator* const arena); 180 181// Utility macros to traverse the LIR list. 182#define NEXT_LIR(lir) (lir->next) 183#define PREV_LIR(lir) (lir->prev) 184 185// Defines for alias_info (tracks Dalvik register references). 186#define DECODE_ALIAS_INFO_REG(X) (X & 0xffff) 187#define DECODE_ALIAS_INFO_WIDE_FLAG (0x10000) 188#define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0) 189#define ENCODE_ALIAS_INFO(REG, ISWIDE) (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0)) 190 191#define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8)) 192#define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \ 193 do { \ 194 low_reg = both_regs & 0xff; \ 195 high_reg = (both_regs >> 8) & 0xff; \ 196 } while (false) 197 198// Mask to denote sreg as the start of a 64-bit item. Must not interfere with low 16 bits. 199#define STARTING_WIDE_SREG 0x10000 200 201// TODO: replace these macros 202#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath)) 203#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath)) 204#define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath)) 205#define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath)) 206#define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath)) 207 208// Size of a frame that we definitely consider large. Anything larger than this should 209// definitely get a stack overflow check. 210static constexpr size_t kLargeFrameSize = 2 * KB; 211 212// Size of a frame that should be small. Anything leaf method smaller than this should run 213// without a stack overflow check. 214// The constant is from experience with frameworks code. 215static constexpr size_t kSmallFrameSize = 1 * KB; 216 217// Determine whether a frame is small or large, used in the decision on whether to elide a 218// stack overflow check on method entry. 219// 220// A frame is considered large when it's either above kLargeFrameSize, or a quarter of the 221// overflow-usable stack space. 222static constexpr bool IsLargeFrame(size_t size, InstructionSet isa) { 223 return size >= kLargeFrameSize || size >= GetStackOverflowReservedBytes(isa) / 4; 224} 225 226// We want to ensure that on all systems kSmallFrameSize will lead to false in IsLargeFrame. 227COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kArm), 228 kSmallFrameSize_is_not_a_small_frame_arm); 229COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kArm64), 230 kSmallFrameSize_is_not_a_small_frame_arm64); 231COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kMips), 232 kSmallFrameSize_is_not_a_small_frame_mips); 233COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kX86), 234 kSmallFrameSize_is_not_a_small_frame_x86); 235COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kX86_64), 236 kSmallFrameSize_is_not_a_small_frame_x64_64); 237 238class Mir2Lir : public Backend { 239 public: 240 static constexpr bool kFailOnSizeError = true && kIsDebugBuild; 241 static constexpr bool kReportSizeError = true && kIsDebugBuild; 242 243 /* 244 * Auxiliary information describing the location of data embedded in the Dalvik 245 * byte code stream. 246 */ 247 struct EmbeddedData { 248 CodeOffset offset; // Code offset of data block. 249 const uint16_t* table; // Original dex data. 250 DexOffset vaddr; // Dalvik offset of parent opcode. 251 }; 252 253 struct FillArrayData : EmbeddedData { 254 int32_t size; 255 }; 256 257 struct SwitchTable : EmbeddedData { 258 LIR* anchor; // Reference instruction for relative offsets. 259 LIR** targets; // Array of case targets. 260 }; 261 262 /* Static register use counts */ 263 struct RefCounts { 264 int count; 265 int s_reg; 266 }; 267 268 /* 269 * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits) 270 * and native register storage. The primary purpose is to reuse previuosly 271 * loaded values, if possible, and otherwise to keep the value in register 272 * storage as long as possible. 273 * 274 * NOTE 1: wide_value refers to the width of the Dalvik value contained in 275 * this register (or pair). For example, a 64-bit register containing a 32-bit 276 * Dalvik value would have wide_value==false even though the storage container itself 277 * is wide. Similarly, a 32-bit register containing half of a 64-bit Dalvik value 278 * would have wide_value==true (and additionally would have its partner field set to the 279 * other half whose wide_value field would also be true. 280 * 281 * NOTE 2: In the case of a register pair, you can determine which of the partners 282 * is the low half by looking at the s_reg names. The high s_reg will equal low_sreg + 1. 283 * 284 * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value 285 * will be true and partner==self. s_reg refers to the low-order word of the Dalvik 286 * value, and the s_reg of the high word is implied (s_reg + 1). 287 * 288 * NOTE 4: The reg and is_temp fields should always be correct. If is_temp is false no 289 * other fields have meaning. [perhaps not true, wide should work for promoted regs?] 290 * If is_temp==true and live==false, no other fields have 291 * meaning. If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start 292 * and def_end describe the relationship between the temp register/register pair and 293 * the Dalvik value[s] described by s_reg/s_reg+1. 294 * 295 * The fields used_storage, master_storage and storage_mask are used to track allocation 296 * in light of potential aliasing. For example, consider Arm's d2, which overlaps s4 & s5. 297 * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of 298 * storage use. For s4, it would be 0x0000001; for s5 0x00000002. These values should not 299 * change once initialized. The "used_storage" field tracks current allocation status. 300 * Although each record contains this field, only the field from the largest member of 301 * an aliased group is used. In our case, it would be d2's. The master_storage pointer 302 * of d2, s4 and s5 would all point to d2's used_storage field. Each bit in a used_storage 303 * represents 32 bits of storage. d2's used_storage would be initialized to 0xfffffffc. 304 * Then, if we wanted to determine whether s4 could be allocated, we would "and" 305 * s4's storage_mask with s4's *master_storage. If the result is zero, s4 is free and 306 * to allocate: *master_storage |= storage_mask. To free, *master_storage &= ~storage_mask. 307 * 308 * For an X86 vector register example, storage_mask would be: 309 * 0x00000001 for 32-bit view of xmm1 310 * 0x00000003 for 64-bit view of xmm1 311 * 0x0000000f for 128-bit view of xmm1 312 * 0x000000ff for 256-bit view of ymm1 // future expansion, if needed 313 * 0x0000ffff for 512-bit view of ymm1 // future expansion, if needed 314 * 0xffffffff for 1024-bit view of ymm1 // future expansion, if needed 315 * 316 * The "liveness" of a register is handled in a similar way. The liveness_ storage is 317 * held in the widest member of an aliased set. Note, though, that for a temp register to 318 * reused as live, it must both be marked live and the associated SReg() must match the 319 * desired s_reg. This gets a little complicated when dealing with aliased registers. All 320 * members of an aliased set will share the same liveness flags, but each will individually 321 * maintain s_reg_. In this way we can know that at least one member of an 322 * aliased set is live, but will only fully match on the appropriate alias view. For example, 323 * if Arm d1 is live as a double and has s_reg_ set to Dalvik v8 (which also implies v9 324 * because it is wide), its aliases s2 and s3 will show as live, but will have 325 * s_reg_ == INVALID_SREG. An attempt to later AllocLiveReg() of v9 with a single-precision 326 * view will fail because although s3's liveness bit is set, its s_reg_ will not match v9. 327 * This will cause all members of the aliased set to be clobbered and AllocLiveReg() will 328 * report that v9 is currently not live as a single (which is what we want). 329 * 330 * NOTE: the x86 usage is still somewhat in flux. There are competing notions of how 331 * to treat xmm registers: 332 * 1. Treat them all as 128-bits wide, but denote how much data used via bytes field. 333 * o This more closely matches reality, but means you'd need to be able to get 334 * to the associated RegisterInfo struct to figure out how it's being used. 335 * o This is how 64-bit core registers will be used - always 64 bits, but the 336 * "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage. 337 * 2. View the xmm registers based on contents. 338 * o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would 339 * be a k64BitVector. 340 * o Note that the two uses above would be considered distinct registers (but with 341 * the aliasing mechanism, we could detect interference). 342 * o This is how aliased double and single float registers will be handled on 343 * Arm and MIPS. 344 * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and 345 * mechanism 2 for aliased float registers and x86 vector registers. 346 */ 347 class RegisterInfo { 348 public: 349 RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll); 350 ~RegisterInfo() {} 351 static void* operator new(size_t size, ArenaAllocator* arena) { 352 return arena->Alloc(size, kArenaAllocRegAlloc); 353 } 354 355 static const uint32_t k32SoloStorageMask = 0x00000001; 356 static const uint32_t kLowSingleStorageMask = 0x00000001; 357 static const uint32_t kHighSingleStorageMask = 0x00000002; 358 static const uint32_t k64SoloStorageMask = 0x00000003; 359 static const uint32_t k128SoloStorageMask = 0x0000000f; 360 static const uint32_t k256SoloStorageMask = 0x000000ff; 361 static const uint32_t k512SoloStorageMask = 0x0000ffff; 362 static const uint32_t k1024SoloStorageMask = 0xffffffff; 363 364 bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; } 365 void MarkInUse() { master_->used_storage_ |= storage_mask_; } 366 void MarkFree() { master_->used_storage_ &= ~storage_mask_; } 367 // No part of the containing storage is live in this view. 368 bool IsDead() { return (master_->liveness_ & storage_mask_) == 0; } 369 // Liveness of this view matches. Note: not equivalent to !IsDead(). 370 bool IsLive() { return (master_->liveness_ & storage_mask_) == storage_mask_; } 371 void MarkLive(int s_reg) { 372 // TODO: Anything useful to assert here? 373 s_reg_ = s_reg; 374 master_->liveness_ |= storage_mask_; 375 } 376 void MarkDead() { 377 if (SReg() != INVALID_SREG) { 378 s_reg_ = INVALID_SREG; 379 master_->liveness_ &= ~storage_mask_; 380 ResetDefBody(); 381 } 382 } 383 RegStorage GetReg() { return reg_; } 384 void SetReg(RegStorage reg) { reg_ = reg; } 385 bool IsTemp() { return is_temp_; } 386 void SetIsTemp(bool val) { is_temp_ = val; } 387 bool IsWide() { return wide_value_; } 388 void SetIsWide(bool val) { 389 wide_value_ = val; 390 if (!val) { 391 // If not wide, reset partner to self. 392 SetPartner(GetReg()); 393 } 394 } 395 bool IsDirty() { return dirty_; } 396 void SetIsDirty(bool val) { dirty_ = val; } 397 RegStorage Partner() { return partner_; } 398 void SetPartner(RegStorage partner) { partner_ = partner; } 399 int SReg() { return (!IsTemp() || IsLive()) ? s_reg_ : INVALID_SREG; } 400 const ResourceMask& DefUseMask() { return def_use_mask_; } 401 void SetDefUseMask(const ResourceMask& def_use_mask) { def_use_mask_ = def_use_mask; } 402 RegisterInfo* Master() { return master_; } 403 void SetMaster(RegisterInfo* master) { 404 master_ = master; 405 if (master != this) { 406 master_->aliased_ = true; 407 DCHECK(alias_chain_ == nullptr); 408 alias_chain_ = master_->alias_chain_; 409 master_->alias_chain_ = this; 410 } 411 } 412 bool IsAliased() { return aliased_; } 413 RegisterInfo* GetAliasChain() { return alias_chain_; } 414 uint32_t StorageMask() { return storage_mask_; } 415 void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; } 416 LIR* DefStart() { return def_start_; } 417 void SetDefStart(LIR* def_start) { def_start_ = def_start; } 418 LIR* DefEnd() { return def_end_; } 419 void SetDefEnd(LIR* def_end) { def_end_ = def_end; } 420 void ResetDefBody() { def_start_ = def_end_ = nullptr; } 421 // Find member of aliased set matching storage_used; return nullptr if none. 422 RegisterInfo* FindMatchingView(uint32_t storage_used) { 423 RegisterInfo* res = Master(); 424 for (; res != nullptr; res = res->GetAliasChain()) { 425 if (res->StorageMask() == storage_used) 426 break; 427 } 428 return res; 429 } 430 431 private: 432 RegStorage reg_; 433 bool is_temp_; // Can allocate as temp? 434 bool wide_value_; // Holds a Dalvik wide value (either itself, or part of a pair). 435 bool dirty_; // If live, is it dirty? 436 bool aliased_; // Is this the master for other aliased RegisterInfo's? 437 RegStorage partner_; // If wide_value, other reg of pair or self if 64-bit register. 438 int s_reg_; // Name of live value. 439 ResourceMask def_use_mask_; // Resources for this element. 440 uint32_t used_storage_; // 1 bit per 4 bytes of storage. Unused by aliases. 441 uint32_t liveness_; // 1 bit per 4 bytes of storage. Unused by aliases. 442 RegisterInfo* master_; // Pointer to controlling storage mask. 443 uint32_t storage_mask_; // Track allocation of sub-units. 444 LIR *def_start_; // Starting inst in last def sequence. 445 LIR *def_end_; // Ending inst in last def sequence. 446 RegisterInfo* alias_chain_; // Chain of aliased registers. 447 }; 448 449 class RegisterPool { 450 public: 451 RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena, 452 const ArrayRef<const RegStorage>& core_regs, 453 const ArrayRef<const RegStorage>& core64_regs, 454 const ArrayRef<const RegStorage>& sp_regs, 455 const ArrayRef<const RegStorage>& dp_regs, 456 const ArrayRef<const RegStorage>& reserved_regs, 457 const ArrayRef<const RegStorage>& reserved64_regs, 458 const ArrayRef<const RegStorage>& core_temps, 459 const ArrayRef<const RegStorage>& core64_temps, 460 const ArrayRef<const RegStorage>& sp_temps, 461 const ArrayRef<const RegStorage>& dp_temps); 462 ~RegisterPool() {} 463 static void* operator new(size_t size, ArenaAllocator* arena) { 464 return arena->Alloc(size, kArenaAllocRegAlloc); 465 } 466 void ResetNextTemp() { 467 next_core_reg_ = 0; 468 next_sp_reg_ = 0; 469 next_dp_reg_ = 0; 470 } 471 GrowableArray<RegisterInfo*> core_regs_; 472 int next_core_reg_; 473 GrowableArray<RegisterInfo*> core64_regs_; 474 int next_core64_reg_; 475 GrowableArray<RegisterInfo*> sp_regs_; // Single precision float. 476 int next_sp_reg_; 477 GrowableArray<RegisterInfo*> dp_regs_; // Double precision float. 478 int next_dp_reg_; 479 GrowableArray<RegisterInfo*>* ref_regs_; // Points to core_regs_ or core64_regs_ 480 int* next_ref_reg_; 481 482 private: 483 Mir2Lir* const m2l_; 484 }; 485 486 struct PromotionMap { 487 RegLocationType core_location:3; 488 uint8_t core_reg; 489 RegLocationType fp_location:3; 490 uint8_t fp_reg; 491 bool first_in_pair; 492 }; 493 494 // 495 // Slow paths. This object is used generate a sequence of code that is executed in the 496 // slow path. For example, resolving a string or class is slow as it will only be executed 497 // once (after that it is resolved and doesn't need to be done again). We want slow paths 498 // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward 499 // branch over them. 500 // 501 // If you want to create a slow path, declare a class derived from LIRSlowPath and provide 502 // the Compile() function that will be called near the end of the code generated by the 503 // method. 504 // 505 // The basic flow for a slow path is: 506 // 507 // CMP reg, #value 508 // BEQ fromfast 509 // cont: 510 // ... 511 // fast path code 512 // ... 513 // more code 514 // ... 515 // RETURN 516 /// 517 // fromfast: 518 // ... 519 // slow path code 520 // ... 521 // B cont 522 // 523 // So you see we need two labels and two branches. The first branch (called fromfast) is 524 // the conditional branch to the slow path code. The second label (called cont) is used 525 // as an unconditional branch target for getting back to the code after the slow path 526 // has completed. 527 // 528 529 class LIRSlowPath { 530 public: 531 LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast, 532 LIR* cont = nullptr) : 533 m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) { 534 m2l->StartSlowPath(this); 535 } 536 virtual ~LIRSlowPath() {} 537 virtual void Compile() = 0; 538 539 static void* operator new(size_t size, ArenaAllocator* arena) { 540 return arena->Alloc(size, kArenaAllocData); 541 } 542 543 LIR *GetContinuationLabel() { 544 return cont_; 545 } 546 547 LIR *GetFromFast() { 548 return fromfast_; 549 } 550 551 protected: 552 LIR* GenerateTargetLabel(int opcode = kPseudoTargetLabel); 553 554 Mir2Lir* const m2l_; 555 CompilationUnit* const cu_; 556 const DexOffset current_dex_pc_; 557 LIR* const fromfast_; 558 LIR* const cont_; 559 }; 560 561 // Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_. 562 class ScopedMemRefType { 563 public: 564 ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type) 565 : m2l_(m2l), 566 old_mem_ref_type_(m2l->mem_ref_type_) { 567 m2l_->mem_ref_type_ = new_mem_ref_type; 568 } 569 570 ~ScopedMemRefType() { 571 m2l_->mem_ref_type_ = old_mem_ref_type_; 572 } 573 574 private: 575 Mir2Lir* const m2l_; 576 ResourceMask::ResourceBit old_mem_ref_type_; 577 578 DISALLOW_COPY_AND_ASSIGN(ScopedMemRefType); 579 }; 580 581 virtual ~Mir2Lir() {} 582 583 int32_t s4FromSwitchData(const void* switch_data) { 584 return *reinterpret_cast<const int32_t*>(switch_data); 585 } 586 587 /* 588 * TODO: this is a trace JIT vestige, and its use should be reconsidered. At the time 589 * it was introduced, it was intended to be a quick best guess of type without having to 590 * take the time to do type analysis. Currently, though, we have a much better idea of 591 * the types of Dalvik virtual registers. Instead of using this for a best guess, why not 592 * just use our knowledge of type to select the most appropriate register class? 593 */ 594 RegisterClass RegClassBySize(OpSize size) { 595 if (size == kReference) { 596 return kRefReg; 597 } else { 598 return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || 599 size == kSignedByte) ? kCoreReg : kAnyReg; 600 } 601 } 602 603 size_t CodeBufferSizeInBytes() { 604 return code_buffer_.size() / sizeof(code_buffer_[0]); 605 } 606 607 static bool IsPseudoLirOp(int opcode) { 608 return (opcode < 0); 609 } 610 611 /* 612 * LIR operands are 32-bit integers. Sometimes, (especially for managing 613 * instructions which require PC-relative fixups), we need the operands to carry 614 * pointers. To do this, we assign these pointers an index in pointer_storage_, and 615 * hold that index in the operand array. 616 * TUNING: If use of these utilities becomes more common on 32-bit builds, it 617 * may be worth conditionally-compiling a set of identity functions here. 618 */ 619 uint32_t WrapPointer(void* pointer) { 620 uint32_t res = pointer_storage_.Size(); 621 pointer_storage_.Insert(pointer); 622 return res; 623 } 624 625 void* UnwrapPointer(size_t index) { 626 return pointer_storage_.Get(index); 627 } 628 629 // strdup(), but allocates from the arena. 630 char* ArenaStrdup(const char* str) { 631 size_t len = strlen(str) + 1; 632 char* res = reinterpret_cast<char*>(arena_->Alloc(len, kArenaAllocMisc)); 633 if (res != NULL) { 634 strncpy(res, str, len); 635 } 636 return res; 637 } 638 639 // Shared by all targets - implemented in codegen_util.cc 640 void AppendLIR(LIR* lir); 641 void InsertLIRBefore(LIR* current_lir, LIR* new_lir); 642 void InsertLIRAfter(LIR* current_lir, LIR* new_lir); 643 644 /** 645 * @brief Provides the maximum number of compiler temporaries that the backend can/wants 646 * to place in a frame. 647 * @return Returns the maximum number of compiler temporaries. 648 */ 649 size_t GetMaxPossibleCompilerTemps() const; 650 651 /** 652 * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries. 653 * @return Returns the size in bytes for space needed for compiler temporary spill region. 654 */ 655 size_t GetNumBytesForCompilerTempSpillRegion(); 656 657 DexOffset GetCurrentDexPc() const { 658 return current_dalvik_offset_; 659 } 660 661 RegisterClass ShortyToRegClass(char shorty_type); 662 RegisterClass LocToRegClass(RegLocation loc); 663 int ComputeFrameSize(); 664 virtual void Materialize(); 665 virtual CompiledMethod* GetCompiledMethod(); 666 void MarkSafepointPC(LIR* inst); 667 void MarkSafepointPCAfter(LIR* after); 668 void SetupResourceMasks(LIR* lir); 669 void SetMemRefType(LIR* lir, bool is_load, int mem_type); 670 void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit); 671 void SetupRegMask(ResourceMask* mask, int reg); 672 void DumpLIRInsn(LIR* arg, unsigned char* base_addr); 673 void DumpPromotionMap(); 674 void CodegenDump(); 675 LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0, 676 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL); 677 LIR* NewLIR0(int opcode); 678 LIR* NewLIR1(int opcode, int dest); 679 LIR* NewLIR2(int opcode, int dest, int src1); 680 LIR* NewLIR2NoDest(int opcode, int src, int info); 681 LIR* NewLIR3(int opcode, int dest, int src1, int src2); 682 LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info); 683 LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2); 684 LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta); 685 LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi); 686 LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method); 687 LIR* AddWordData(LIR* *constant_list_p, int value); 688 LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi); 689 void ProcessSwitchTables(); 690 void DumpSparseSwitchTable(const uint16_t* table); 691 void DumpPackedSwitchTable(const uint16_t* table); 692 void MarkBoundary(DexOffset offset, const char* inst_str); 693 void NopLIR(LIR* lir); 694 void UnlinkLIR(LIR* lir); 695 bool EvaluateBranch(Instruction::Code opcode, int src1, int src2); 696 bool IsInexpensiveConstant(RegLocation rl_src); 697 ConditionCode FlipComparisonOrder(ConditionCode before); 698 ConditionCode NegateComparison(ConditionCode before); 699 virtual void InstallLiteralPools(); 700 void InstallSwitchTables(); 701 void InstallFillArrayData(); 702 bool VerifyCatchEntries(); 703 void CreateMappingTables(); 704 void CreateNativeGcMap(); 705 int AssignLiteralOffset(CodeOffset offset); 706 int AssignSwitchTablesOffset(CodeOffset offset); 707 int AssignFillArrayDataOffset(CodeOffset offset); 708 virtual LIR* InsertCaseLabel(DexOffset vaddr, int keyVal); 709 void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec); 710 void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec); 711 712 virtual void StartSlowPath(LIRSlowPath* slowpath) {} 713 virtual void BeginInvoke(CallInfo* info) {} 714 virtual void EndInvoke(CallInfo* info) {} 715 716 717 // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated. 718 virtual RegLocation NarrowRegLoc(RegLocation loc); 719 720 // Shared by all targets - implemented in local_optimizations.cc 721 void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src); 722 void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir); 723 void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir); 724 virtual void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir); 725 726 // Shared by all targets - implemented in ralloc_util.cc 727 int GetSRegHi(int lowSreg); 728 bool LiveOut(int s_reg); 729 void SimpleRegAlloc(); 730 void ResetRegPool(); 731 void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num); 732 void DumpRegPool(GrowableArray<RegisterInfo*>* regs); 733 void DumpCoreRegPool(); 734 void DumpFpRegPool(); 735 void DumpRegPools(); 736 /* Mark a temp register as dead. Does not affect allocation state. */ 737 void Clobber(RegStorage reg); 738 void ClobberSReg(int s_reg); 739 void ClobberAliases(RegisterInfo* info, uint32_t clobber_mask); 740 int SRegToPMap(int s_reg); 741 void RecordCorePromotion(RegStorage reg, int s_reg); 742 RegStorage AllocPreservedCoreReg(int s_reg); 743 void RecordFpPromotion(RegStorage reg, int s_reg); 744 RegStorage AllocPreservedFpReg(int s_reg); 745 virtual RegStorage AllocPreservedSingle(int s_reg); 746 virtual RegStorage AllocPreservedDouble(int s_reg); 747 RegStorage AllocTempBody(GrowableArray<RegisterInfo*> ®s, int* next_temp, bool required); 748 virtual RegStorage AllocFreeTemp(); 749 virtual RegStorage AllocTemp(); 750 virtual RegStorage AllocTempWide(); 751 virtual RegStorage AllocTempRef(); 752 virtual RegStorage AllocTempSingle(); 753 virtual RegStorage AllocTempDouble(); 754 virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class); 755 virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class); 756 void FlushReg(RegStorage reg); 757 void FlushRegWide(RegStorage reg); 758 RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide); 759 RegStorage FindLiveReg(GrowableArray<RegisterInfo*> ®s, int s_reg); 760 virtual void FreeTemp(RegStorage reg); 761 virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free); 762 virtual bool IsLive(RegStorage reg); 763 virtual bool IsTemp(RegStorage reg); 764 bool IsPromoted(RegStorage reg); 765 bool IsDirty(RegStorage reg); 766 virtual void LockTemp(RegStorage reg); 767 void ResetDef(RegStorage reg); 768 void NullifyRange(RegStorage reg, int s_reg); 769 void MarkDef(RegLocation rl, LIR *start, LIR *finish); 770 void MarkDefWide(RegLocation rl, LIR *start, LIR *finish); 771 void ResetDefLoc(RegLocation rl); 772 void ResetDefLocWide(RegLocation rl); 773 void ResetDefTracking(); 774 void ClobberAllTemps(); 775 void FlushSpecificReg(RegisterInfo* info); 776 void FlushAllRegs(); 777 bool RegClassMatches(int reg_class, RegStorage reg); 778 void MarkLive(RegLocation loc); 779 void MarkTemp(RegStorage reg); 780 void UnmarkTemp(RegStorage reg); 781 void MarkWide(RegStorage reg); 782 void MarkNarrow(RegStorage reg); 783 void MarkClean(RegLocation loc); 784 void MarkDirty(RegLocation loc); 785 void MarkInUse(RegStorage reg); 786 bool CheckCorePoolSanity(); 787 virtual RegLocation UpdateLoc(RegLocation loc); 788 virtual RegLocation UpdateLocWide(RegLocation loc); 789 RegLocation UpdateRawLoc(RegLocation loc); 790 791 /** 792 * @brief Used to prepare a register location to receive a wide value. 793 * @see EvalLoc 794 * @param loc the location where the value will be stored. 795 * @param reg_class Type of register needed. 796 * @param update Whether the liveness information should be updated. 797 * @return Returns the properly typed temporary in physical register pairs. 798 */ 799 virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update); 800 801 /** 802 * @brief Used to prepare a register location to receive a value. 803 * @param loc the location where the value will be stored. 804 * @param reg_class Type of register needed. 805 * @param update Whether the liveness information should be updated. 806 * @return Returns the properly typed temporary in physical register. 807 */ 808 virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update); 809 810 void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs); 811 void DumpCounts(const RefCounts* arr, int size, const char* msg); 812 void DoPromotion(); 813 int VRegOffset(int v_reg); 814 int SRegOffset(int s_reg); 815 RegLocation GetReturnWide(RegisterClass reg_class); 816 RegLocation GetReturn(RegisterClass reg_class); 817 RegisterInfo* GetRegInfo(RegStorage reg); 818 819 // Shared by all targets - implemented in gen_common.cc. 820 void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr); 821 virtual bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 822 RegLocation rl_src, RegLocation rl_dest, int lit); 823 bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit); 824 virtual void HandleSlowPaths(); 825 void GenBarrier(); 826 void GenDivZeroException(); 827 // c_code holds condition code that's generated from testing divisor against 0. 828 void GenDivZeroCheck(ConditionCode c_code); 829 // reg holds divisor. 830 void GenDivZeroCheck(RegStorage reg); 831 void GenArrayBoundsCheck(RegStorage index, RegStorage length); 832 void GenArrayBoundsCheck(int32_t index, RegStorage length); 833 LIR* GenNullCheck(RegStorage reg); 834 void MarkPossibleNullPointerException(int opt_flags); 835 void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after); 836 void MarkPossibleStackOverflowException(); 837 void ForceImplicitNullCheck(RegStorage reg, int opt_flags); 838 LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind); 839 LIR* GenNullCheck(RegStorage m_reg, int opt_flags); 840 LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags); 841 virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags); 842 void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 843 RegLocation rl_src2, LIR* taken, LIR* fall_through); 844 void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, 845 LIR* taken, LIR* fall_through); 846 virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src); 847 void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 848 RegLocation rl_src); 849 void GenNewArray(uint32_t type_idx, RegLocation rl_dest, 850 RegLocation rl_src); 851 void GenFilledNewArray(CallInfo* info); 852 void GenSput(MIR* mir, RegLocation rl_src, 853 bool is_long_or_double, bool is_object); 854 void GenSget(MIR* mir, RegLocation rl_dest, 855 bool is_long_or_double, bool is_object); 856 void GenIGet(MIR* mir, int opt_flags, OpSize size, 857 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object); 858 void GenIPut(MIR* mir, int opt_flags, OpSize size, 859 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object); 860 void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 861 RegLocation rl_src); 862 863 void GenConstClass(uint32_t type_idx, RegLocation rl_dest); 864 void GenConstString(uint32_t string_idx, RegLocation rl_dest); 865 void GenNewInstance(uint32_t type_idx, RegLocation rl_dest); 866 void GenThrow(RegLocation rl_src); 867 void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); 868 void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src); 869 void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 870 RegLocation rl_src1, RegLocation rl_src2); 871 virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 872 RegLocation rl_src1, RegLocation rl_shift); 873 void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, 874 RegLocation rl_src, int lit); 875 void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 876 RegLocation rl_src1, RegLocation rl_src2); 877 template <size_t pointer_size> 878 void GenConversionCall(ThreadOffset<pointer_size> func_offset, RegLocation rl_dest, 879 RegLocation rl_src); 880 virtual void GenSuspendTest(int opt_flags); 881 virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target); 882 883 // This will be overridden by x86 implementation. 884 virtual void GenConstWide(RegLocation rl_dest, int64_t value); 885 virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 886 RegLocation rl_src1, RegLocation rl_src2); 887 888 // Shared by all targets - implemented in gen_invoke.cc. 889 template <size_t pointer_size> 890 LIR* CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset, bool safepoint_pc, 891 bool use_link = true); 892 RegStorage CallHelperSetup(ThreadOffset<4> helper_offset); 893 RegStorage CallHelperSetup(ThreadOffset<8> helper_offset); 894 template <size_t pointer_size> 895 void CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc); 896 template <size_t pointer_size> 897 void CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc); 898 template <size_t pointer_size> 899 void CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc); 900 template <size_t pointer_size> 901 void CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset, RegLocation arg0, 902 bool safepoint_pc); 903 template <size_t pointer_size> 904 void CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1, 905 bool safepoint_pc); 906 template <size_t pointer_size> 907 void CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0, 908 RegLocation arg1, bool safepoint_pc); 909 template <size_t pointer_size> 910 void CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset, RegLocation arg0, 911 int arg1, bool safepoint_pc); 912 template <size_t pointer_size> 913 void CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0, RegStorage arg1, 914 bool safepoint_pc); 915 template <size_t pointer_size> 916 void CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, int arg1, 917 bool safepoint_pc); 918 template <size_t pointer_size> 919 void CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0, 920 bool safepoint_pc); 921 template <size_t pointer_size> 922 void CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, 923 bool safepoint_pc); 924 template <size_t pointer_size> 925 void CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset, 926 RegStorage arg0, RegLocation arg2, bool safepoint_pc); 927 template <size_t pointer_size> 928 void CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, 929 RegLocation arg0, RegLocation arg1, 930 bool safepoint_pc); 931 template <size_t pointer_size> 932 void CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, 933 RegStorage arg1, bool safepoint_pc); 934 template <size_t pointer_size> 935 void CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, 936 RegStorage arg1, int arg2, bool safepoint_pc); 937 template <size_t pointer_size> 938 void CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0, 939 RegLocation arg2, bool safepoint_pc); 940 template <size_t pointer_size> 941 void CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg2, 942 bool safepoint_pc); 943 template <size_t pointer_size> 944 void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, 945 int arg0, RegLocation arg1, RegLocation arg2, 946 bool safepoint_pc); 947 template <size_t pointer_size> 948 void CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, 949 RegLocation arg0, RegLocation arg1, 950 RegLocation arg2, 951 bool safepoint_pc); 952 void GenInvoke(CallInfo* info); 953 void GenInvokeNoInline(CallInfo* info); 954 virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method); 955 virtual int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel, 956 NextCallInsn next_call_insn, 957 const MethodReference& target_method, 958 uint32_t vtable_idx, 959 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 960 bool skip_this); 961 virtual int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel, 962 NextCallInsn next_call_insn, 963 const MethodReference& target_method, 964 uint32_t vtable_idx, 965 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 966 bool skip_this); 967 968 /** 969 * @brief Used to determine the register location of destination. 970 * @details This is needed during generation of inline intrinsics because it finds destination 971 * of return, 972 * either the physical register or the target of move-result. 973 * @param info Information about the invoke. 974 * @return Returns the destination location. 975 */ 976 RegLocation InlineTarget(CallInfo* info); 977 978 /** 979 * @brief Used to determine the wide register location of destination. 980 * @see InlineTarget 981 * @param info Information about the invoke. 982 * @return Returns the destination location. 983 */ 984 RegLocation InlineTargetWide(CallInfo* info); 985 986 bool GenInlinedGet(CallInfo* info); 987 bool GenInlinedCharAt(CallInfo* info); 988 bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty); 989 virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size); 990 bool GenInlinedReverseBytes(CallInfo* info, OpSize size); 991 bool GenInlinedAbsInt(CallInfo* info); 992 virtual bool GenInlinedAbsLong(CallInfo* info); 993 virtual bool GenInlinedAbsFloat(CallInfo* info) = 0; 994 virtual bool GenInlinedAbsDouble(CallInfo* info) = 0; 995 bool GenInlinedFloatCvt(CallInfo* info); 996 bool GenInlinedDoubleCvt(CallInfo* info); 997 virtual bool GenInlinedArrayCopyCharArray(CallInfo* info); 998 virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based); 999 bool GenInlinedStringCompareTo(CallInfo* info); 1000 bool GenInlinedCurrentThread(CallInfo* info); 1001 bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile); 1002 bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object, 1003 bool is_volatile, bool is_ordered); 1004 virtual int LoadArgRegs(CallInfo* info, int call_state, 1005 NextCallInsn next_call_insn, 1006 const MethodReference& target_method, 1007 uint32_t vtable_idx, 1008 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 1009 bool skip_this); 1010 1011 // Shared by all targets - implemented in gen_loadstore.cc. 1012 RegLocation LoadCurrMethod(); 1013 void LoadCurrMethodDirect(RegStorage r_tgt); 1014 virtual LIR* LoadConstant(RegStorage r_dest, int value); 1015 // Natural word size. 1016 virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { 1017 return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile); 1018 } 1019 // Load 32 bits, regardless of target. 1020 virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) { 1021 return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile); 1022 } 1023 // Load a reference at base + displacement and decompress into register. 1024 virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest, 1025 VolatileKind is_volatile) { 1026 return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile); 1027 } 1028 // Load a reference at base + index and decompress into register. 1029 virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 1030 int scale) { 1031 return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference); 1032 } 1033 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 1034 virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); 1035 // Same as above, but derive the target register class from the location record. 1036 virtual RegLocation LoadValue(RegLocation rl_src); 1037 // Load Dalvik value with 64-bit memory storage. 1038 virtual RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind); 1039 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 1040 virtual void LoadValueDirect(RegLocation rl_src, RegStorage r_dest); 1041 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 1042 virtual void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest); 1043 // Load Dalvik value with 64-bit memory storage. 1044 virtual void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest); 1045 // Load Dalvik value with 64-bit memory storage. 1046 virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest); 1047 // Store an item of natural word size. 1048 virtual LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) { 1049 return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile); 1050 } 1051 // Store an uncompressed reference into a compressed 32-bit container. 1052 virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, 1053 VolatileKind is_volatile) { 1054 return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile); 1055 } 1056 // Store an uncompressed reference into a compressed 32-bit container by index. 1057 virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 1058 int scale) { 1059 return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference); 1060 } 1061 // Store 32 bits, regardless of target. 1062 virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) { 1063 return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile); 1064 } 1065 1066 /** 1067 * @brief Used to do the final store in the destination as per bytecode semantics. 1068 * @param rl_dest The destination dalvik register location. 1069 * @param rl_src The source register location. Can be either physical register or dalvik register. 1070 */ 1071 virtual void StoreValue(RegLocation rl_dest, RegLocation rl_src); 1072 1073 /** 1074 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1075 * @see StoreValue 1076 * @param rl_dest The destination dalvik register location. 1077 * @param rl_src The source register location. Can be either physical register or dalvik 1078 * register. 1079 */ 1080 virtual void StoreValueWide(RegLocation rl_dest, RegLocation rl_src); 1081 1082 /** 1083 * @brief Used to do the final store to a destination as per bytecode semantics. 1084 * @see StoreValue 1085 * @param rl_dest The destination dalvik register location. 1086 * @param rl_src The source register location. It must be kLocPhysReg 1087 * 1088 * This is used for x86 two operand computations, where we have computed the correct 1089 * register value that now needs to be properly registered. This is used to avoid an 1090 * extra register copy that would result if StoreValue was called. 1091 */ 1092 virtual void StoreFinalValue(RegLocation rl_dest, RegLocation rl_src); 1093 1094 /** 1095 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1096 * @see StoreValueWide 1097 * @param rl_dest The destination dalvik register location. 1098 * @param rl_src The source register location. It must be kLocPhysReg 1099 * 1100 * This is used for x86 two operand computations, where we have computed the correct 1101 * register values that now need to be properly registered. This is used to avoid an 1102 * extra pair of register copies that would result if StoreValueWide was called. 1103 */ 1104 virtual void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src); 1105 1106 // Shared by all targets - implemented in mir_to_lir.cc. 1107 void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list); 1108 virtual void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1109 bool MethodBlockCodeGen(BasicBlock* bb); 1110 bool SpecialMIR2LIR(const InlineMethod& special); 1111 virtual void MethodMIR2LIR(); 1112 // Update LIR for verbose listings. 1113 void UpdateLIROffsets(); 1114 1115 /* 1116 * @brief Load the address of the dex method into the register. 1117 * @param target_method The MethodReference of the method to be invoked. 1118 * @param type How the method will be invoked. 1119 * @param register that will contain the code address. 1120 * @note register will be passed to TargetReg to get physical register. 1121 */ 1122 void LoadCodeAddress(const MethodReference& target_method, InvokeType type, 1123 SpecialTargetRegister symbolic_reg); 1124 1125 /* 1126 * @brief Load the Method* of a dex method into the register. 1127 * @param target_method The MethodReference of the method to be invoked. 1128 * @param type How the method will be invoked. 1129 * @param register that will contain the code address. 1130 * @note register will be passed to TargetReg to get physical register. 1131 */ 1132 virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type, 1133 SpecialTargetRegister symbolic_reg); 1134 1135 /* 1136 * @brief Load the Class* of a Dex Class type into the register. 1137 * @param type How the method will be invoked. 1138 * @param register that will contain the code address. 1139 * @note register will be passed to TargetReg to get physical register. 1140 */ 1141 virtual void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg); 1142 1143 // Routines that work for the generic case, but may be overriden by target. 1144 /* 1145 * @brief Compare memory to immediate, and branch if condition true. 1146 * @param cond The condition code that when true will branch to the target. 1147 * @param temp_reg A temporary register that can be used if compare to memory is not 1148 * supported by the architecture. 1149 * @param base_reg The register holding the base address. 1150 * @param offset The offset from the base. 1151 * @param check_value The immediate to compare to. 1152 * @param target branch target (or nullptr) 1153 * @param compare output for getting LIR for comparison (or nullptr) 1154 * @returns The branch instruction that was generated. 1155 */ 1156 virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 1157 int offset, int check_value, LIR* target, LIR** compare); 1158 1159 // Required for target - codegen helpers. 1160 virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 1161 RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1162 virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1163 virtual LIR* CheckSuspendUsingLoad() = 0; 1164 1165 virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0; 1166 virtual RegStorage LoadHelper(ThreadOffset<8> offset) = 0; 1167 1168 virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, 1169 OpSize size, VolatileKind is_volatile) = 0; 1170 virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 1171 int scale, OpSize size) = 0; 1172 virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 1173 int displacement, RegStorage r_dest, OpSize size) = 0; 1174 virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0; 1175 virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0; 1176 virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, 1177 OpSize size, VolatileKind is_volatile) = 0; 1178 virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 1179 int scale, OpSize size) = 0; 1180 virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 1181 int displacement, RegStorage r_src, OpSize size) = 0; 1182 virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0; 1183 1184 // Required for target - register utilities. 1185 1186 bool IsSameReg(RegStorage reg1, RegStorage reg2) { 1187 RegisterInfo* info1 = GetRegInfo(reg1); 1188 RegisterInfo* info2 = GetRegInfo(reg2); 1189 return (info1->Master() == info2->Master() && 1190 (info1->StorageMask() & info2->StorageMask()) != 0); 1191 } 1192 1193 /** 1194 * @brief Portable way of getting special registers from the backend. 1195 * @param reg Enumeration describing the purpose of the register. 1196 * @return Return the #RegStorage corresponding to the given purpose @p reg. 1197 * @note This function is currently allowed to return any suitable view of the registers 1198 * (e.g. this could be 64-bit solo or 32-bit solo for 64-bit backends). 1199 */ 1200 virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0; 1201 1202 /** 1203 * @brief Portable way of getting special registers from the backend. 1204 * @param reg Enumeration describing the purpose of the register. 1205 * @param wide_kind What kind of view of the special register is required. 1206 * @return Return the #RegStorage corresponding to the given purpose @p reg. 1207 * 1208 * @note For 32b system, wide (kWide) views only make sense for the argument registers and the 1209 * return. In that case, this function should return a pair where the first component of 1210 * the result will be the indicated special register. 1211 */ 1212 virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) { 1213 if (wide_kind == kWide) { 1214 DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg7) || (kRet0 == reg)); 1215 COMPILE_ASSERT((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) && 1216 (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) && 1217 (kArg7 == kArg6 + 1), kargs_range_unexpected); 1218 COMPILE_ASSERT((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) && 1219 (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) && 1220 (kFArg7 == kFArg6 + 1), kfargs_range_unexpected); 1221 COMPILE_ASSERT(kRet1 == kRet0 + 1, kret_range_unexpected); 1222 return RegStorage::MakeRegPair(TargetReg(reg), 1223 TargetReg(static_cast<SpecialTargetRegister>(reg + 1))); 1224 } else { 1225 return TargetReg(reg); 1226 } 1227 } 1228 1229 /** 1230 * @brief Portable way of getting a special register for storing a pointer. 1231 * @see TargetReg() 1232 */ 1233 virtual RegStorage TargetPtrReg(SpecialTargetRegister reg) { 1234 return TargetReg(reg); 1235 } 1236 1237 // Get a reg storage corresponding to the wide & ref flags of the reg location. 1238 virtual RegStorage TargetReg(SpecialTargetRegister reg, RegLocation loc) { 1239 if (loc.ref) { 1240 return TargetReg(reg, kRef); 1241 } else { 1242 return TargetReg(reg, loc.wide ? kWide : kNotWide); 1243 } 1244 } 1245 1246 virtual RegStorage GetArgMappingToPhysicalReg(int arg_num) = 0; 1247 virtual RegLocation GetReturnAlt() = 0; 1248 virtual RegLocation GetReturnWideAlt() = 0; 1249 virtual RegLocation LocCReturn() = 0; 1250 virtual RegLocation LocCReturnRef() = 0; 1251 virtual RegLocation LocCReturnDouble() = 0; 1252 virtual RegLocation LocCReturnFloat() = 0; 1253 virtual RegLocation LocCReturnWide() = 0; 1254 virtual ResourceMask GetRegMaskCommon(const RegStorage& reg) const = 0; 1255 virtual void AdjustSpillMask() = 0; 1256 virtual void ClobberCallerSave() = 0; 1257 virtual void FreeCallTemps() = 0; 1258 virtual void LockCallTemps() = 0; 1259 virtual void CompilerInitializeRegAlloc() = 0; 1260 1261 // Required for target - miscellaneous. 1262 virtual void AssembleLIR() = 0; 1263 virtual void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) = 0; 1264 virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags, 1265 ResourceMask* use_mask, ResourceMask* def_mask) = 0; 1266 virtual const char* GetTargetInstFmt(int opcode) = 0; 1267 virtual const char* GetTargetInstName(int opcode) = 0; 1268 virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0; 1269 1270 // Note: This may return kEncodeNone on architectures that do not expose a PC. The caller must 1271 // take care of this. 1272 virtual ResourceMask GetPCUseDefEncoding() const = 0; 1273 virtual uint64_t GetTargetInstFlags(int opcode) = 0; 1274 virtual size_t GetInsnSize(LIR* lir) = 0; 1275 virtual bool IsUnconditionalBranch(LIR* lir) = 0; 1276 1277 // Get the register class for load/store of a field. 1278 virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0; 1279 1280 // Required for target - Dalvik-level generators. 1281 virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1282 RegLocation rl_src1, RegLocation rl_src2) = 0; 1283 virtual void GenMulLong(Instruction::Code, 1284 RegLocation rl_dest, RegLocation rl_src1, 1285 RegLocation rl_src2) = 0; 1286 virtual void GenAddLong(Instruction::Code, 1287 RegLocation rl_dest, RegLocation rl_src1, 1288 RegLocation rl_src2) = 0; 1289 virtual void GenAndLong(Instruction::Code, 1290 RegLocation rl_dest, RegLocation rl_src1, 1291 RegLocation rl_src2) = 0; 1292 virtual void GenArithOpDouble(Instruction::Code opcode, 1293 RegLocation rl_dest, RegLocation rl_src1, 1294 RegLocation rl_src2) = 0; 1295 virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, 1296 RegLocation rl_src1, RegLocation rl_src2) = 0; 1297 virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, 1298 RegLocation rl_src1, RegLocation rl_src2) = 0; 1299 virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, 1300 RegLocation rl_src) = 0; 1301 virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0; 1302 1303 /** 1304 * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max. 1305 * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm 1306 * that applies on integers. The generated code will write the smallest or largest value 1307 * directly into the destination register as specified by the invoke information. 1308 * @param info Information about the invoke. 1309 * @param is_min If true generates code that computes minimum. Otherwise computes maximum. 1310 * @param is_long If true the value value is Long. Otherwise the value is Int. 1311 * @return Returns true if successfully generated 1312 */ 1313 virtual bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) = 0; 1314 virtual bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double); 1315 1316 virtual bool GenInlinedSqrt(CallInfo* info) = 0; 1317 virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0; 1318 virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0; 1319 virtual void GenNotLong(RegLocation rl_dest, RegLocation rl_src) = 0; 1320 virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0; 1321 virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1322 RegLocation rl_src2) = 0; 1323 virtual void GenSubLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1324 RegLocation rl_src2) = 0; 1325 virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1326 RegLocation rl_src2) = 0; 1327 virtual void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1328 RegLocation rl_src2, bool is_div) = 0; 1329 virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, 1330 bool is_div) = 0; 1331 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, 1332 bool is_div) = 0; 1333 /* 1334 * @brief Generate an integer div or rem operation by a literal. 1335 * @param rl_dest Destination Location. 1336 * @param rl_src1 Numerator Location. 1337 * @param rl_src2 Divisor Location. 1338 * @param is_div 'true' if this is a division, 'false' for a remainder. 1339 * @param check_zero 'true' if an exception should be generated if the divisor is 0. 1340 */ 1341 virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 1342 RegLocation rl_src2, bool is_div, bool check_zero) = 0; 1343 /* 1344 * @brief Generate an integer div or rem operation by a literal. 1345 * @param rl_dest Destination Location. 1346 * @param rl_src Numerator Location. 1347 * @param lit Divisor. 1348 * @param is_div 'true' if this is a division, 'false' for a remainder. 1349 */ 1350 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, 1351 bool is_div) = 0; 1352 virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0; 1353 1354 /** 1355 * @brief Used for generating code that throws ArithmeticException if both registers are zero. 1356 * @details This is used for generating DivideByZero checks when divisor is held in two 1357 * separate registers. 1358 * @param reg The register holding the pair of 32-bit values. 1359 */ 1360 virtual void GenDivZeroCheckWide(RegStorage reg) = 0; 1361 1362 virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0; 1363 virtual void GenExitSequence() = 0; 1364 virtual void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) = 0; 1365 virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0; 1366 virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0; 1367 1368 /* 1369 * @brief Handle Machine Specific MIR Extended opcodes. 1370 * @param bb The basic block in which the MIR is from. 1371 * @param mir The MIR whose opcode is not standard extended MIR. 1372 * @note Base class implementation will abort for unknown opcodes. 1373 */ 1374 virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1375 1376 /** 1377 * @brief Lowers the kMirOpSelect MIR into LIR. 1378 * @param bb The basic block in which the MIR is from. 1379 * @param mir The MIR whose opcode is kMirOpSelect. 1380 */ 1381 virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0; 1382 1383 /** 1384 * @brief Generates code to select one of the given constants depending on the given opcode. 1385 * @note Will neither call EvalLoc nor StoreValue for rl_dest. 1386 */ 1387 virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code, 1388 int32_t true_val, int32_t false_val, RegStorage rs_dest, 1389 int dest_reg_class) = 0; 1390 1391 /** 1392 * @brief Used to generate a memory barrier in an architecture specific way. 1393 * @details The last generated LIR will be considered for use as barrier. Namely, 1394 * if the last LIR can be updated in a way where it will serve the semantics of 1395 * barrier, then it will be used as such. Otherwise, a new LIR will be generated 1396 * that can keep the semantics. 1397 * @param barrier_kind The kind of memory barrier to generate. 1398 * @return whether a new instruction was generated. 1399 */ 1400 virtual bool GenMemBarrier(MemBarrierKind barrier_kind) = 0; 1401 1402 virtual void GenMoveException(RegLocation rl_dest) = 0; 1403 virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, 1404 int first_bit, int second_bit) = 0; 1405 virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0; 1406 virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0; 1407 virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1408 virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1409 virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 1410 RegLocation rl_index, RegLocation rl_dest, int scale) = 0; 1411 virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 1412 RegLocation rl_index, RegLocation rl_src, int scale, 1413 bool card_mark) = 0; 1414 virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1415 RegLocation rl_src1, RegLocation rl_shift) = 0; 1416 1417 // Required for target - single operation generators. 1418 virtual LIR* OpUnconditionalBranch(LIR* target) = 0; 1419 virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0; 1420 virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, 1421 LIR* target) = 0; 1422 virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0; 1423 virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0; 1424 virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1425 virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0; 1426 virtual void OpEndIT(LIR* it) = 0; 1427 virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0; 1428 virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0; 1429 virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0; 1430 virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1431 virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0; 1432 virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0; 1433 virtual LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) = 0; 1434 virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0; 1435 1436 /** 1437 * @brief Used to generate an LIR that does a load from mem to reg. 1438 * @param r_dest The destination physical register. 1439 * @param r_base The base physical register for memory operand. 1440 * @param offset The displacement for memory operand. 1441 * @param move_type Specification on the move desired (size, alignment, register kind). 1442 * @return Returns the generate move LIR. 1443 */ 1444 virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, 1445 MoveType move_type) = 0; 1446 1447 /** 1448 * @brief Used to generate an LIR that does a store from reg to mem. 1449 * @param r_base The base physical register for memory operand. 1450 * @param offset The displacement for memory operand. 1451 * @param r_src The destination physical register. 1452 * @param bytes_to_move The number of bytes to move. 1453 * @param is_aligned Whether the memory location is known to be aligned. 1454 * @return Returns the generate move LIR. 1455 */ 1456 virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, 1457 MoveType move_type) = 0; 1458 1459 /** 1460 * @brief Used for generating a conditional register to register operation. 1461 * @param op The opcode kind. 1462 * @param cc The condition code that when true will perform the opcode. 1463 * @param r_dest The destination physical register. 1464 * @param r_src The source physical register. 1465 * @return Returns the newly created LIR or null in case of creation failure. 1466 */ 1467 virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0; 1468 1469 virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0; 1470 virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, 1471 RegStorage r_src2) = 0; 1472 virtual LIR* OpTestSuspend(LIR* target) = 0; 1473 virtual LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) = 0; 1474 virtual LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) = 0; 1475 virtual LIR* OpVldm(RegStorage r_base, int count) = 0; 1476 virtual LIR* OpVstm(RegStorage r_base, int count) = 0; 1477 virtual void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, 1478 int offset) = 0; 1479 virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0; 1480 virtual void OpTlsCmp(ThreadOffset<4> offset, int val) = 0; 1481 virtual void OpTlsCmp(ThreadOffset<8> offset, int val) = 0; 1482 virtual bool InexpensiveConstantInt(int32_t value) = 0; 1483 virtual bool InexpensiveConstantFloat(int32_t value) = 0; 1484 virtual bool InexpensiveConstantLong(int64_t value) = 0; 1485 virtual bool InexpensiveConstantDouble(int64_t value) = 0; 1486 1487 // May be optimized by targets. 1488 virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src); 1489 virtual void GenMonitorExit(int opt_flags, RegLocation rl_src); 1490 1491 // Temp workaround 1492 void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg); 1493 1494 protected: 1495 Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); 1496 1497 CompilationUnit* GetCompilationUnit() { 1498 return cu_; 1499 } 1500 /* 1501 * @brief Returns the index of the lowest set bit in 'x'. 1502 * @param x Value to be examined. 1503 * @returns The bit number of the lowest bit set in the value. 1504 */ 1505 int32_t LowestSetBit(uint64_t x); 1506 /* 1507 * @brief Is this value a power of two? 1508 * @param x Value to be examined. 1509 * @returns 'true' if only 1 bit is set in the value. 1510 */ 1511 bool IsPowerOfTwo(uint64_t x); 1512 /* 1513 * @brief Do these SRs overlap? 1514 * @param rl_op1 One RegLocation 1515 * @param rl_op2 The other RegLocation 1516 * @return 'true' if the VR pairs overlap 1517 * 1518 * Check to see if a result pair has a misaligned overlap with an operand pair. This 1519 * is not usual for dx to generate, but it is legal (for now). In a future rev of 1520 * dex, we'll want to make this case illegal. 1521 */ 1522 bool BadOverlap(RegLocation rl_op1, RegLocation rl_op2); 1523 1524 /* 1525 * @brief Force a location (in a register) into a temporary register 1526 * @param loc location of result 1527 * @returns update location 1528 */ 1529 virtual RegLocation ForceTemp(RegLocation loc); 1530 1531 /* 1532 * @brief Force a wide location (in registers) into temporary registers 1533 * @param loc location of result 1534 * @returns update location 1535 */ 1536 virtual RegLocation ForceTempWide(RegLocation loc); 1537 1538 static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) { 1539 return wide ? k64 : ref ? kReference : k32; 1540 } 1541 1542 virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, 1543 RegLocation rl_dest, RegLocation rl_src); 1544 1545 void AddSlowPath(LIRSlowPath* slowpath); 1546 1547 virtual void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1548 bool type_known_abstract, bool use_declaring_class, 1549 bool can_assume_type_is_in_dex_cache, 1550 uint32_t type_idx, RegLocation rl_dest, 1551 RegLocation rl_src); 1552 /* 1553 * @brief Generate the debug_frame FDE information if possible. 1554 * @returns pointer to vector containg CFE information, or NULL. 1555 */ 1556 virtual std::vector<uint8_t>* ReturnCallFrameInformation(); 1557 1558 /** 1559 * @brief Used to insert marker that can be used to associate MIR with LIR. 1560 * @details Only inserts marker if verbosity is enabled. 1561 * @param mir The mir that is currently being generated. 1562 */ 1563 void GenPrintLabel(MIR* mir); 1564 1565 /** 1566 * @brief Used to generate return sequence when there is no frame. 1567 * @details Assumes that the return registers have already been populated. 1568 */ 1569 virtual void GenSpecialExitSequence() = 0; 1570 1571 /** 1572 * @brief Used to generate code for special methods that are known to be 1573 * small enough to work in frameless mode. 1574 * @param bb The basic block of the first MIR. 1575 * @param mir The first MIR of the special method. 1576 * @param special Information about the special method. 1577 * @return Returns whether or not this was handled successfully. Returns false 1578 * if caller should punt to normal MIR2LIR conversion. 1579 */ 1580 virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); 1581 1582 protected: 1583 void ClobberBody(RegisterInfo* p); 1584 void SetCurrentDexPc(DexOffset dexpc) { 1585 current_dalvik_offset_ = dexpc; 1586 } 1587 1588 /** 1589 * @brief Used to lock register if argument at in_position was passed that way. 1590 * @details Does nothing if the argument is passed via stack. 1591 * @param in_position The argument number whose register to lock. 1592 * @param wide Whether the argument is wide. 1593 */ 1594 void LockArg(int in_position, bool wide = false); 1595 1596 /** 1597 * @brief Used to load VR argument to a physical register. 1598 * @details The load is only done if the argument is not already in physical register. 1599 * LockArg must have been previously called. 1600 * @param in_position The argument number to load. 1601 * @param wide Whether the argument is 64-bit or not. 1602 * @return Returns the register (or register pair) for the loaded argument. 1603 */ 1604 RegStorage LoadArg(int in_position, RegisterClass reg_class, bool wide = false); 1605 1606 /** 1607 * @brief Used to load a VR argument directly to a specified register location. 1608 * @param in_position The argument number to place in register. 1609 * @param rl_dest The register location where to place argument. 1610 */ 1611 void LoadArgDirect(int in_position, RegLocation rl_dest); 1612 1613 /** 1614 * @brief Used to generate LIR for special getter method. 1615 * @param mir The mir that represents the iget. 1616 * @param special Information about the special getter method. 1617 * @return Returns whether LIR was successfully generated. 1618 */ 1619 bool GenSpecialIGet(MIR* mir, const InlineMethod& special); 1620 1621 /** 1622 * @brief Used to generate LIR for special setter method. 1623 * @param mir The mir that represents the iput. 1624 * @param special Information about the special setter method. 1625 * @return Returns whether LIR was successfully generated. 1626 */ 1627 bool GenSpecialIPut(MIR* mir, const InlineMethod& special); 1628 1629 /** 1630 * @brief Used to generate LIR for special return-args method. 1631 * @param mir The mir that represents the return of argument. 1632 * @param special Information about the special return-args method. 1633 * @return Returns whether LIR was successfully generated. 1634 */ 1635 bool GenSpecialIdentity(MIR* mir, const InlineMethod& special); 1636 1637 void AddDivZeroCheckSlowPath(LIR* branch); 1638 1639 // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using 1640 // kArg2 as temp. 1641 virtual void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1); 1642 1643 /** 1644 * @brief Load Constant into RegLocation 1645 * @param rl_dest Destination RegLocation 1646 * @param value Constant value 1647 */ 1648 virtual void GenConst(RegLocation rl_dest, int value); 1649 1650 /** 1651 * Returns true iff wide GPRs are just different views on the same physical register. 1652 */ 1653 virtual bool WideGPRsAreAliases() = 0; 1654 1655 /** 1656 * Returns true iff wide FPRs are just different views on the same physical register. 1657 */ 1658 virtual bool WideFPRsAreAliases() = 0; 1659 1660 1661 enum class WidenessCheck { // private 1662 kIgnoreWide, 1663 kCheckWide, 1664 kCheckNotWide 1665 }; 1666 1667 enum class RefCheck { // private 1668 kIgnoreRef, 1669 kCheckRef, 1670 kCheckNotRef 1671 }; 1672 1673 enum class FPCheck { // private 1674 kIgnoreFP, 1675 kCheckFP, 1676 kCheckNotFP 1677 }; 1678 1679 /** 1680 * Check whether a reg storage seems well-formed, that is, if a reg storage is valid, 1681 * that it has the expected form for the flags. 1682 * A flag value of 0 means ignore. A flag value of -1 means false. A flag value of 1 means true. 1683 */ 1684 void CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp, bool fail, 1685 bool report) 1686 const; 1687 1688 /** 1689 * Check whether a reg location seems well-formed, that is, if a reg storage is encoded, 1690 * that it has the expected size. 1691 */ 1692 void CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const; 1693 1694 // See CheckRegStorageImpl. Will print or fail depending on kFailOnSizeError and 1695 // kReportSizeError. 1696 void CheckRegStorage(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp) const; 1697 // See CheckRegLocationImpl. 1698 void CheckRegLocation(RegLocation rl) const; 1699 1700 public: 1701 // TODO: add accessors for these. 1702 LIR* literal_list_; // Constants. 1703 LIR* method_literal_list_; // Method literals requiring patching. 1704 LIR* class_literal_list_; // Class literals requiring patching. 1705 LIR* code_literal_list_; // Code literals requiring patching. 1706 LIR* first_fixup_; // Doubly-linked list of LIR nodes requiring fixups. 1707 1708 protected: 1709 CompilationUnit* const cu_; 1710 MIRGraph* const mir_graph_; 1711 GrowableArray<SwitchTable*> switch_tables_; 1712 GrowableArray<FillArrayData*> fill_array_data_; 1713 GrowableArray<RegisterInfo*> tempreg_info_; 1714 GrowableArray<RegisterInfo*> reginfo_map_; 1715 GrowableArray<void*> pointer_storage_; 1716 CodeOffset current_code_offset_; // Working byte offset of machine instructons. 1717 CodeOffset data_offset_; // starting offset of literal pool. 1718 size_t total_size_; // header + code size. 1719 LIR* block_label_list_; 1720 PromotionMap* promotion_map_; 1721 /* 1722 * TODO: The code generation utilities don't have a built-in 1723 * mechanism to propagate the original Dalvik opcode address to the 1724 * associated generated instructions. For the trace compiler, this wasn't 1725 * necessary because the interpreter handled all throws and debugging 1726 * requests. For now we'll handle this by placing the Dalvik offset 1727 * in the CompilationUnit struct before codegen for each instruction. 1728 * The low-level LIR creation utilites will pull it from here. Rework this. 1729 */ 1730 DexOffset current_dalvik_offset_; 1731 size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size. 1732 RegisterPool* reg_pool_; 1733 /* 1734 * Sanity checking for the register temp tracking. The same ssa 1735 * name should never be associated with one temp register per 1736 * instruction compilation. 1737 */ 1738 int live_sreg_; 1739 CodeBuffer code_buffer_; 1740 // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix. 1741 std::vector<uint8_t> encoded_mapping_table_; 1742 std::vector<uint32_t> core_vmap_table_; 1743 std::vector<uint32_t> fp_vmap_table_; 1744 std::vector<uint8_t> native_gc_map_; 1745 int num_core_spills_; 1746 int num_fp_spills_; 1747 int frame_size_; 1748 unsigned int core_spill_mask_; 1749 unsigned int fp_spill_mask_; 1750 LIR* first_lir_insn_; 1751 LIR* last_lir_insn_; 1752 1753 GrowableArray<LIRSlowPath*> slow_paths_; 1754 1755 // The memory reference type for new LIRs. 1756 // NOTE: Passing this as an explicit parameter by all functions that directly or indirectly 1757 // invoke RawLIR() would clutter the code and reduce the readability. 1758 ResourceMask::ResourceBit mem_ref_type_; 1759 1760 // Each resource mask now takes 16-bytes, so having both use/def masks directly in a LIR 1761 // would consume 32 bytes per LIR. Instead, the LIR now holds only pointers to the masks 1762 // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache 1763 // to deduplicate the masks. 1764 ResourceMaskCache mask_cache_; 1765}; // Class Mir2Lir 1766 1767} // namespace art 1768 1769#endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 1770