mir_to_lir.h revision 2eba1fa7e9e5f91e18ae3778d529520bd2c78d55
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 18#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 19 20#include "invoke_type.h" 21#include "compiled_method.h" 22#include "dex/compiler_enums.h" 23#include "dex/compiler_ir.h" 24#include "dex/reg_location.h" 25#include "dex/reg_storage.h" 26#include "dex/backend.h" 27#include "dex/quick/resource_mask.h" 28#include "driver/compiler_driver.h" 29#include "instruction_set.h" 30#include "leb128.h" 31#include "entrypoints/quick/quick_entrypoints_enum.h" 32#include "safe_map.h" 33#include "utils/array_ref.h" 34#include "utils/arena_allocator.h" 35#include "utils/growable_array.h" 36#include "utils/stack_checks.h" 37 38namespace art { 39 40/* 41 * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to 42 * add type safety (see runtime/offsets.h). 43 */ 44typedef uint32_t DexOffset; // Dex offset in code units. 45typedef uint16_t NarrowDexOffset; // For use in structs, Dex offsets range from 0 .. 0xffff. 46typedef uint32_t CodeOffset; // Native code offset in bytes. 47 48// Set to 1 to measure cost of suspend check. 49#define NO_SUSPEND 0 50 51#define IS_BINARY_OP (1ULL << kIsBinaryOp) 52#define IS_BRANCH (1ULL << kIsBranch) 53#define IS_IT (1ULL << kIsIT) 54#define IS_MOVE (1ULL << kIsMoveOp) 55#define IS_LOAD (1ULL << kMemLoad) 56#define IS_QUAD_OP (1ULL << kIsQuadOp) 57#define IS_QUIN_OP (1ULL << kIsQuinOp) 58#define IS_SEXTUPLE_OP (1ULL << kIsSextupleOp) 59#define IS_STORE (1ULL << kMemStore) 60#define IS_TERTIARY_OP (1ULL << kIsTertiaryOp) 61#define IS_UNARY_OP (1ULL << kIsUnaryOp) 62#define IS_VOLATILE (1ULL << kMemVolatile) 63#define NEEDS_FIXUP (1ULL << kPCRelFixup) 64#define NO_OPERAND (1ULL << kNoOperand) 65#define REG_DEF0 (1ULL << kRegDef0) 66#define REG_DEF1 (1ULL << kRegDef1) 67#define REG_DEF2 (1ULL << kRegDef2) 68#define REG_DEFA (1ULL << kRegDefA) 69#define REG_DEFD (1ULL << kRegDefD) 70#define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0) 71#define REG_DEF_FPCS_LIST2 (1ULL << kRegDefFPCSList2) 72#define REG_DEF_LIST0 (1ULL << kRegDefList0) 73#define REG_DEF_LIST1 (1ULL << kRegDefList1) 74#define REG_DEF_LR (1ULL << kRegDefLR) 75#define REG_DEF_SP (1ULL << kRegDefSP) 76#define REG_USE0 (1ULL << kRegUse0) 77#define REG_USE1 (1ULL << kRegUse1) 78#define REG_USE2 (1ULL << kRegUse2) 79#define REG_USE3 (1ULL << kRegUse3) 80#define REG_USE4 (1ULL << kRegUse4) 81#define REG_USEA (1ULL << kRegUseA) 82#define REG_USEC (1ULL << kRegUseC) 83#define REG_USED (1ULL << kRegUseD) 84#define REG_USEB (1ULL << kRegUseB) 85#define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0) 86#define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2) 87#define REG_USE_LIST0 (1ULL << kRegUseList0) 88#define REG_USE_LIST1 (1ULL << kRegUseList1) 89#define REG_USE_LR (1ULL << kRegUseLR) 90#define REG_USE_PC (1ULL << kRegUsePC) 91#define REG_USE_SP (1ULL << kRegUseSP) 92#define SETS_CCODES (1ULL << kSetsCCodes) 93#define USES_CCODES (1ULL << kUsesCCodes) 94#define USE_FP_STACK (1ULL << kUseFpStack) 95#define REG_USE_LO (1ULL << kUseLo) 96#define REG_USE_HI (1ULL << kUseHi) 97#define REG_DEF_LO (1ULL << kDefLo) 98#define REG_DEF_HI (1ULL << kDefHi) 99#define SCALED_OFFSET_X0 (1ULL << kMemScaledx0) 100#define SCALED_OFFSET_X2 (1ULL << kMemScaledx2) 101#define SCALED_OFFSET_X4 (1ULL << kMemScaledx4) 102 103// Special load/stores 104#define IS_LOADX (IS_LOAD | IS_VOLATILE) 105#define IS_LOAD_OFF (IS_LOAD | SCALED_OFFSET_X0) 106#define IS_LOAD_OFF2 (IS_LOAD | SCALED_OFFSET_X2) 107#define IS_LOAD_OFF4 (IS_LOAD | SCALED_OFFSET_X4) 108 109#define IS_STOREX (IS_STORE | IS_VOLATILE) 110#define IS_STORE_OFF (IS_STORE | SCALED_OFFSET_X0) 111#define IS_STORE_OFF2 (IS_STORE | SCALED_OFFSET_X2) 112#define IS_STORE_OFF4 (IS_STORE | SCALED_OFFSET_X4) 113 114// Common combo register usage patterns. 115#define REG_DEF01 (REG_DEF0 | REG_DEF1) 116#define REG_DEF012 (REG_DEF0 | REG_DEF1 | REG_DEF2) 117#define REG_DEF01_USE2 (REG_DEF0 | REG_DEF1 | REG_USE2) 118#define REG_DEF0_USE01 (REG_DEF0 | REG_USE01) 119#define REG_DEF0_USE0 (REG_DEF0 | REG_USE0) 120#define REG_DEF0_USE12 (REG_DEF0 | REG_USE12) 121#define REG_DEF0_USE123 (REG_DEF0 | REG_USE123) 122#define REG_DEF0_USE1 (REG_DEF0 | REG_USE1) 123#define REG_DEF0_USE2 (REG_DEF0 | REG_USE2) 124#define REG_DEFAD_USEAD (REG_DEFAD_USEA | REG_USED) 125#define REG_DEFAD_USEA (REG_DEFA_USEA | REG_DEFD) 126#define REG_DEFA_USEA (REG_DEFA | REG_USEA) 127#define REG_USE012 (REG_USE01 | REG_USE2) 128#define REG_USE014 (REG_USE01 | REG_USE4) 129#define REG_USE01 (REG_USE0 | REG_USE1) 130#define REG_USE02 (REG_USE0 | REG_USE2) 131#define REG_USE12 (REG_USE1 | REG_USE2) 132#define REG_USE23 (REG_USE2 | REG_USE3) 133#define REG_USE123 (REG_USE1 | REG_USE2 | REG_USE3) 134 135// TODO: #includes need a cleanup 136#ifndef INVALID_SREG 137#define INVALID_SREG (-1) 138#endif 139 140struct BasicBlock; 141struct CallInfo; 142struct CompilationUnit; 143struct InlineMethod; 144struct MIR; 145struct LIR; 146struct RegisterInfo; 147class DexFileMethodInliner; 148class MIRGraph; 149class Mir2Lir; 150 151typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, 152 const MethodReference& target_method, 153 uint32_t method_idx, uintptr_t direct_code, 154 uintptr_t direct_method, InvokeType type); 155 156typedef std::vector<uint8_t> CodeBuffer; 157 158struct UseDefMasks { 159 const ResourceMask* use_mask; // Resource mask for use. 160 const ResourceMask* def_mask; // Resource mask for def. 161}; 162 163struct AssemblyInfo { 164 LIR* pcrel_next; // Chain of LIR nodes needing pc relative fixups. 165}; 166 167struct LIR { 168 CodeOffset offset; // Offset of this instruction. 169 NarrowDexOffset dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words). 170 int16_t opcode; 171 LIR* next; 172 LIR* prev; 173 LIR* target; 174 struct { 175 unsigned int alias_info:17; // For Dalvik register disambiguation. 176 bool is_nop:1; // LIR is optimized away. 177 unsigned int size:4; // Note: size of encoded instruction is in bytes. 178 bool use_def_invalid:1; // If true, masks should not be used. 179 unsigned int generation:1; // Used to track visitation state during fixup pass. 180 unsigned int fixup:8; // Fixup kind. 181 } flags; 182 union { 183 UseDefMasks m; // Use & Def masks used during optimization. 184 AssemblyInfo a; // Instruction info used during assembly phase. 185 } u; 186 int32_t operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]. 187}; 188 189// Target-specific initialization. 190Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 191 ArenaAllocator* const arena); 192Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 193 ArenaAllocator* const arena); 194Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 195 ArenaAllocator* const arena); 196Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 197 ArenaAllocator* const arena); 198 199// Utility macros to traverse the LIR list. 200#define NEXT_LIR(lir) (lir->next) 201#define PREV_LIR(lir) (lir->prev) 202 203// Defines for alias_info (tracks Dalvik register references). 204#define DECODE_ALIAS_INFO_REG(X) (X & 0xffff) 205#define DECODE_ALIAS_INFO_WIDE_FLAG (0x10000) 206#define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0) 207#define ENCODE_ALIAS_INFO(REG, ISWIDE) (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0)) 208 209#define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8)) 210#define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \ 211 do { \ 212 low_reg = both_regs & 0xff; \ 213 high_reg = (both_regs >> 8) & 0xff; \ 214 } while (false) 215 216// Mask to denote sreg as the start of a 64-bit item. Must not interfere with low 16 bits. 217#define STARTING_WIDE_SREG 0x10000 218 219// TODO: replace these macros 220#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath)) 221#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath)) 222#define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath)) 223#define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath)) 224#define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath)) 225 226class Mir2Lir : public Backend { 227 public: 228 static constexpr bool kFailOnSizeError = true && kIsDebugBuild; 229 static constexpr bool kReportSizeError = true && kIsDebugBuild; 230 231 /* 232 * Auxiliary information describing the location of data embedded in the Dalvik 233 * byte code stream. 234 */ 235 struct EmbeddedData { 236 CodeOffset offset; // Code offset of data block. 237 const uint16_t* table; // Original dex data. 238 DexOffset vaddr; // Dalvik offset of parent opcode. 239 }; 240 241 struct FillArrayData : EmbeddedData { 242 int32_t size; 243 }; 244 245 struct SwitchTable : EmbeddedData { 246 LIR* anchor; // Reference instruction for relative offsets. 247 LIR** targets; // Array of case targets. 248 }; 249 250 /* Static register use counts */ 251 struct RefCounts { 252 int count; 253 int s_reg; 254 }; 255 256 /* 257 * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits) 258 * and native register storage. The primary purpose is to reuse previuosly 259 * loaded values, if possible, and otherwise to keep the value in register 260 * storage as long as possible. 261 * 262 * NOTE 1: wide_value refers to the width of the Dalvik value contained in 263 * this register (or pair). For example, a 64-bit register containing a 32-bit 264 * Dalvik value would have wide_value==false even though the storage container itself 265 * is wide. Similarly, a 32-bit register containing half of a 64-bit Dalvik value 266 * would have wide_value==true (and additionally would have its partner field set to the 267 * other half whose wide_value field would also be true. 268 * 269 * NOTE 2: In the case of a register pair, you can determine which of the partners 270 * is the low half by looking at the s_reg names. The high s_reg will equal low_sreg + 1. 271 * 272 * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value 273 * will be true and partner==self. s_reg refers to the low-order word of the Dalvik 274 * value, and the s_reg of the high word is implied (s_reg + 1). 275 * 276 * NOTE 4: The reg and is_temp fields should always be correct. If is_temp is false no 277 * other fields have meaning. [perhaps not true, wide should work for promoted regs?] 278 * If is_temp==true and live==false, no other fields have 279 * meaning. If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start 280 * and def_end describe the relationship between the temp register/register pair and 281 * the Dalvik value[s] described by s_reg/s_reg+1. 282 * 283 * The fields used_storage, master_storage and storage_mask are used to track allocation 284 * in light of potential aliasing. For example, consider Arm's d2, which overlaps s4 & s5. 285 * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of 286 * storage use. For s4, it would be 0x0000001; for s5 0x00000002. These values should not 287 * change once initialized. The "used_storage" field tracks current allocation status. 288 * Although each record contains this field, only the field from the largest member of 289 * an aliased group is used. In our case, it would be d2's. The master_storage pointer 290 * of d2, s4 and s5 would all point to d2's used_storage field. Each bit in a used_storage 291 * represents 32 bits of storage. d2's used_storage would be initialized to 0xfffffffc. 292 * Then, if we wanted to determine whether s4 could be allocated, we would "and" 293 * s4's storage_mask with s4's *master_storage. If the result is zero, s4 is free and 294 * to allocate: *master_storage |= storage_mask. To free, *master_storage &= ~storage_mask. 295 * 296 * For an X86 vector register example, storage_mask would be: 297 * 0x00000001 for 32-bit view of xmm1 298 * 0x00000003 for 64-bit view of xmm1 299 * 0x0000000f for 128-bit view of xmm1 300 * 0x000000ff for 256-bit view of ymm1 // future expansion, if needed 301 * 0x0000ffff for 512-bit view of ymm1 // future expansion, if needed 302 * 0xffffffff for 1024-bit view of ymm1 // future expansion, if needed 303 * 304 * The "liveness" of a register is handled in a similar way. The liveness_ storage is 305 * held in the widest member of an aliased set. Note, though, that for a temp register to 306 * reused as live, it must both be marked live and the associated SReg() must match the 307 * desired s_reg. This gets a little complicated when dealing with aliased registers. All 308 * members of an aliased set will share the same liveness flags, but each will individually 309 * maintain s_reg_. In this way we can know that at least one member of an 310 * aliased set is live, but will only fully match on the appropriate alias view. For example, 311 * if Arm d1 is live as a double and has s_reg_ set to Dalvik v8 (which also implies v9 312 * because it is wide), its aliases s2 and s3 will show as live, but will have 313 * s_reg_ == INVALID_SREG. An attempt to later AllocLiveReg() of v9 with a single-precision 314 * view will fail because although s3's liveness bit is set, its s_reg_ will not match v9. 315 * This will cause all members of the aliased set to be clobbered and AllocLiveReg() will 316 * report that v9 is currently not live as a single (which is what we want). 317 * 318 * NOTE: the x86 usage is still somewhat in flux. There are competing notions of how 319 * to treat xmm registers: 320 * 1. Treat them all as 128-bits wide, but denote how much data used via bytes field. 321 * o This more closely matches reality, but means you'd need to be able to get 322 * to the associated RegisterInfo struct to figure out how it's being used. 323 * o This is how 64-bit core registers will be used - always 64 bits, but the 324 * "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage. 325 * 2. View the xmm registers based on contents. 326 * o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would 327 * be a k64BitVector. 328 * o Note that the two uses above would be considered distinct registers (but with 329 * the aliasing mechanism, we could detect interference). 330 * o This is how aliased double and single float registers will be handled on 331 * Arm and MIPS. 332 * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and 333 * mechanism 2 for aliased float registers and x86 vector registers. 334 */ 335 class RegisterInfo { 336 public: 337 RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll); 338 ~RegisterInfo() {} 339 static void* operator new(size_t size, ArenaAllocator* arena) { 340 return arena->Alloc(size, kArenaAllocRegAlloc); 341 } 342 343 static const uint32_t k32SoloStorageMask = 0x00000001; 344 static const uint32_t kLowSingleStorageMask = 0x00000001; 345 static const uint32_t kHighSingleStorageMask = 0x00000002; 346 static const uint32_t k64SoloStorageMask = 0x00000003; 347 static const uint32_t k128SoloStorageMask = 0x0000000f; 348 static const uint32_t k256SoloStorageMask = 0x000000ff; 349 static const uint32_t k512SoloStorageMask = 0x0000ffff; 350 static const uint32_t k1024SoloStorageMask = 0xffffffff; 351 352 bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; } 353 void MarkInUse() { master_->used_storage_ |= storage_mask_; } 354 void MarkFree() { master_->used_storage_ &= ~storage_mask_; } 355 // No part of the containing storage is live in this view. 356 bool IsDead() { return (master_->liveness_ & storage_mask_) == 0; } 357 // Liveness of this view matches. Note: not equivalent to !IsDead(). 358 bool IsLive() { return (master_->liveness_ & storage_mask_) == storage_mask_; } 359 void MarkLive(int s_reg) { 360 // TODO: Anything useful to assert here? 361 s_reg_ = s_reg; 362 master_->liveness_ |= storage_mask_; 363 } 364 void MarkDead() { 365 if (SReg() != INVALID_SREG) { 366 s_reg_ = INVALID_SREG; 367 master_->liveness_ &= ~storage_mask_; 368 ResetDefBody(); 369 } 370 } 371 RegStorage GetReg() { return reg_; } 372 void SetReg(RegStorage reg) { reg_ = reg; } 373 bool IsTemp() { return is_temp_; } 374 void SetIsTemp(bool val) { is_temp_ = val; } 375 bool IsWide() { return wide_value_; } 376 void SetIsWide(bool val) { 377 wide_value_ = val; 378 if (!val) { 379 // If not wide, reset partner to self. 380 SetPartner(GetReg()); 381 } 382 } 383 bool IsDirty() { return dirty_; } 384 void SetIsDirty(bool val) { dirty_ = val; } 385 RegStorage Partner() { return partner_; } 386 void SetPartner(RegStorage partner) { partner_ = partner; } 387 int SReg() { return (!IsTemp() || IsLive()) ? s_reg_ : INVALID_SREG; } 388 const ResourceMask& DefUseMask() { return def_use_mask_; } 389 void SetDefUseMask(const ResourceMask& def_use_mask) { def_use_mask_ = def_use_mask; } 390 RegisterInfo* Master() { return master_; } 391 void SetMaster(RegisterInfo* master) { 392 master_ = master; 393 if (master != this) { 394 master_->aliased_ = true; 395 DCHECK(alias_chain_ == nullptr); 396 alias_chain_ = master_->alias_chain_; 397 master_->alias_chain_ = this; 398 } 399 } 400 bool IsAliased() { return aliased_; } 401 RegisterInfo* GetAliasChain() { return alias_chain_; } 402 uint32_t StorageMask() { return storage_mask_; } 403 void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; } 404 LIR* DefStart() { return def_start_; } 405 void SetDefStart(LIR* def_start) { def_start_ = def_start; } 406 LIR* DefEnd() { return def_end_; } 407 void SetDefEnd(LIR* def_end) { def_end_ = def_end; } 408 void ResetDefBody() { def_start_ = def_end_ = nullptr; } 409 // Find member of aliased set matching storage_used; return nullptr if none. 410 RegisterInfo* FindMatchingView(uint32_t storage_used) { 411 RegisterInfo* res = Master(); 412 for (; res != nullptr; res = res->GetAliasChain()) { 413 if (res->StorageMask() == storage_used) 414 break; 415 } 416 return res; 417 } 418 419 private: 420 RegStorage reg_; 421 bool is_temp_; // Can allocate as temp? 422 bool wide_value_; // Holds a Dalvik wide value (either itself, or part of a pair). 423 bool dirty_; // If live, is it dirty? 424 bool aliased_; // Is this the master for other aliased RegisterInfo's? 425 RegStorage partner_; // If wide_value, other reg of pair or self if 64-bit register. 426 int s_reg_; // Name of live value. 427 ResourceMask def_use_mask_; // Resources for this element. 428 uint32_t used_storage_; // 1 bit per 4 bytes of storage. Unused by aliases. 429 uint32_t liveness_; // 1 bit per 4 bytes of storage. Unused by aliases. 430 RegisterInfo* master_; // Pointer to controlling storage mask. 431 uint32_t storage_mask_; // Track allocation of sub-units. 432 LIR *def_start_; // Starting inst in last def sequence. 433 LIR *def_end_; // Ending inst in last def sequence. 434 RegisterInfo* alias_chain_; // Chain of aliased registers. 435 }; 436 437 class RegisterPool { 438 public: 439 RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena, 440 const ArrayRef<const RegStorage>& core_regs, 441 const ArrayRef<const RegStorage>& core64_regs, 442 const ArrayRef<const RegStorage>& sp_regs, 443 const ArrayRef<const RegStorage>& dp_regs, 444 const ArrayRef<const RegStorage>& reserved_regs, 445 const ArrayRef<const RegStorage>& reserved64_regs, 446 const ArrayRef<const RegStorage>& core_temps, 447 const ArrayRef<const RegStorage>& core64_temps, 448 const ArrayRef<const RegStorage>& sp_temps, 449 const ArrayRef<const RegStorage>& dp_temps); 450 ~RegisterPool() {} 451 static void* operator new(size_t size, ArenaAllocator* arena) { 452 return arena->Alloc(size, kArenaAllocRegAlloc); 453 } 454 void ResetNextTemp() { 455 next_core_reg_ = 0; 456 next_sp_reg_ = 0; 457 next_dp_reg_ = 0; 458 } 459 GrowableArray<RegisterInfo*> core_regs_; 460 int next_core_reg_; 461 GrowableArray<RegisterInfo*> core64_regs_; 462 int next_core64_reg_; 463 GrowableArray<RegisterInfo*> sp_regs_; // Single precision float. 464 int next_sp_reg_; 465 GrowableArray<RegisterInfo*> dp_regs_; // Double precision float. 466 int next_dp_reg_; 467 GrowableArray<RegisterInfo*>* ref_regs_; // Points to core_regs_ or core64_regs_ 468 int* next_ref_reg_; 469 470 private: 471 Mir2Lir* const m2l_; 472 }; 473 474 struct PromotionMap { 475 RegLocationType core_location:3; 476 uint8_t core_reg; 477 RegLocationType fp_location:3; 478 uint8_t fp_reg; 479 bool first_in_pair; 480 }; 481 482 // 483 // Slow paths. This object is used generate a sequence of code that is executed in the 484 // slow path. For example, resolving a string or class is slow as it will only be executed 485 // once (after that it is resolved and doesn't need to be done again). We want slow paths 486 // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward 487 // branch over them. 488 // 489 // If you want to create a slow path, declare a class derived from LIRSlowPath and provide 490 // the Compile() function that will be called near the end of the code generated by the 491 // method. 492 // 493 // The basic flow for a slow path is: 494 // 495 // CMP reg, #value 496 // BEQ fromfast 497 // cont: 498 // ... 499 // fast path code 500 // ... 501 // more code 502 // ... 503 // RETURN 504 /// 505 // fromfast: 506 // ... 507 // slow path code 508 // ... 509 // B cont 510 // 511 // So you see we need two labels and two branches. The first branch (called fromfast) is 512 // the conditional branch to the slow path code. The second label (called cont) is used 513 // as an unconditional branch target for getting back to the code after the slow path 514 // has completed. 515 // 516 517 class LIRSlowPath { 518 public: 519 LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast, 520 LIR* cont = nullptr) : 521 m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) { 522 m2l->StartSlowPath(this); 523 } 524 virtual ~LIRSlowPath() {} 525 virtual void Compile() = 0; 526 527 static void* operator new(size_t size, ArenaAllocator* arena) { 528 return arena->Alloc(size, kArenaAllocData); 529 } 530 531 LIR *GetContinuationLabel() { 532 return cont_; 533 } 534 535 LIR *GetFromFast() { 536 return fromfast_; 537 } 538 539 protected: 540 LIR* GenerateTargetLabel(int opcode = kPseudoTargetLabel); 541 542 Mir2Lir* const m2l_; 543 CompilationUnit* const cu_; 544 const DexOffset current_dex_pc_; 545 LIR* const fromfast_; 546 LIR* const cont_; 547 }; 548 549 // Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_. 550 class ScopedMemRefType { 551 public: 552 ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type) 553 : m2l_(m2l), 554 old_mem_ref_type_(m2l->mem_ref_type_) { 555 m2l_->mem_ref_type_ = new_mem_ref_type; 556 } 557 558 ~ScopedMemRefType() { 559 m2l_->mem_ref_type_ = old_mem_ref_type_; 560 } 561 562 private: 563 Mir2Lir* const m2l_; 564 ResourceMask::ResourceBit old_mem_ref_type_; 565 566 DISALLOW_COPY_AND_ASSIGN(ScopedMemRefType); 567 }; 568 569 virtual ~Mir2Lir() {} 570 571 /** 572 * @brief Decodes the LIR offset. 573 * @return Returns the scaled offset of LIR. 574 */ 575 virtual size_t GetInstructionOffset(LIR* lir); 576 577 int32_t s4FromSwitchData(const void* switch_data) { 578 return *reinterpret_cast<const int32_t*>(switch_data); 579 } 580 581 /* 582 * TODO: this is a trace JIT vestige, and its use should be reconsidered. At the time 583 * it was introduced, it was intended to be a quick best guess of type without having to 584 * take the time to do type analysis. Currently, though, we have a much better idea of 585 * the types of Dalvik virtual registers. Instead of using this for a best guess, why not 586 * just use our knowledge of type to select the most appropriate register class? 587 */ 588 RegisterClass RegClassBySize(OpSize size) { 589 if (size == kReference) { 590 return kRefReg; 591 } else { 592 return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || 593 size == kSignedByte) ? kCoreReg : kAnyReg; 594 } 595 } 596 597 size_t CodeBufferSizeInBytes() { 598 return code_buffer_.size() / sizeof(code_buffer_[0]); 599 } 600 601 static bool IsPseudoLirOp(int opcode) { 602 return (opcode < 0); 603 } 604 605 /* 606 * LIR operands are 32-bit integers. Sometimes, (especially for managing 607 * instructions which require PC-relative fixups), we need the operands to carry 608 * pointers. To do this, we assign these pointers an index in pointer_storage_, and 609 * hold that index in the operand array. 610 * TUNING: If use of these utilities becomes more common on 32-bit builds, it 611 * may be worth conditionally-compiling a set of identity functions here. 612 */ 613 uint32_t WrapPointer(void* pointer) { 614 uint32_t res = pointer_storage_.Size(); 615 pointer_storage_.Insert(pointer); 616 return res; 617 } 618 619 void* UnwrapPointer(size_t index) { 620 return pointer_storage_.Get(index); 621 } 622 623 // strdup(), but allocates from the arena. 624 char* ArenaStrdup(const char* str) { 625 size_t len = strlen(str) + 1; 626 char* res = reinterpret_cast<char*>(arena_->Alloc(len, kArenaAllocMisc)); 627 if (res != NULL) { 628 strncpy(res, str, len); 629 } 630 return res; 631 } 632 633 // Shared by all targets - implemented in codegen_util.cc 634 void AppendLIR(LIR* lir); 635 void InsertLIRBefore(LIR* current_lir, LIR* new_lir); 636 void InsertLIRAfter(LIR* current_lir, LIR* new_lir); 637 638 /** 639 * @brief Provides the maximum number of compiler temporaries that the backend can/wants 640 * to place in a frame. 641 * @return Returns the maximum number of compiler temporaries. 642 */ 643 size_t GetMaxPossibleCompilerTemps() const; 644 645 /** 646 * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries. 647 * @return Returns the size in bytes for space needed for compiler temporary spill region. 648 */ 649 size_t GetNumBytesForCompilerTempSpillRegion(); 650 651 DexOffset GetCurrentDexPc() const { 652 return current_dalvik_offset_; 653 } 654 655 RegisterClass ShortyToRegClass(char shorty_type); 656 RegisterClass LocToRegClass(RegLocation loc); 657 int ComputeFrameSize(); 658 virtual void Materialize(); 659 virtual CompiledMethod* GetCompiledMethod(); 660 void MarkSafepointPC(LIR* inst); 661 void MarkSafepointPCAfter(LIR* after); 662 void SetupResourceMasks(LIR* lir); 663 void SetMemRefType(LIR* lir, bool is_load, int mem_type); 664 void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit); 665 void SetupRegMask(ResourceMask* mask, int reg); 666 void ClearRegMask(ResourceMask* mask, int reg); 667 void DumpLIRInsn(LIR* arg, unsigned char* base_addr); 668 void EliminateLoad(LIR* lir, int reg_id); 669 void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type); 670 void DumpPromotionMap(); 671 void CodegenDump(); 672 LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0, 673 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL); 674 LIR* NewLIR0(int opcode); 675 LIR* NewLIR1(int opcode, int dest); 676 LIR* NewLIR2(int opcode, int dest, int src1); 677 LIR* NewLIR2NoDest(int opcode, int src, int info); 678 LIR* NewLIR3(int opcode, int dest, int src1, int src2); 679 LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info); 680 LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2); 681 LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta); 682 LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi); 683 LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method); 684 LIR* AddWordData(LIR* *constant_list_p, int value); 685 LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi); 686 void ProcessSwitchTables(); 687 void DumpSparseSwitchTable(const uint16_t* table); 688 void DumpPackedSwitchTable(const uint16_t* table); 689 void MarkBoundary(DexOffset offset, const char* inst_str); 690 void NopLIR(LIR* lir); 691 void UnlinkLIR(LIR* lir); 692 bool EvaluateBranch(Instruction::Code opcode, int src1, int src2); 693 bool IsInexpensiveConstant(RegLocation rl_src); 694 ConditionCode FlipComparisonOrder(ConditionCode before); 695 ConditionCode NegateComparison(ConditionCode before); 696 virtual void InstallLiteralPools(); 697 void InstallSwitchTables(); 698 void InstallFillArrayData(); 699 bool VerifyCatchEntries(); 700 void CreateMappingTables(); 701 void CreateNativeGcMap(); 702 int AssignLiteralOffset(CodeOffset offset); 703 int AssignSwitchTablesOffset(CodeOffset offset); 704 int AssignFillArrayDataOffset(CodeOffset offset); 705 virtual LIR* InsertCaseLabel(DexOffset vaddr, int keyVal); 706 void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec); 707 void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec); 708 709 virtual void StartSlowPath(LIRSlowPath* slowpath) {} 710 virtual void BeginInvoke(CallInfo* info) {} 711 virtual void EndInvoke(CallInfo* info) {} 712 713 714 // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated. 715 virtual RegLocation NarrowRegLoc(RegLocation loc); 716 717 // Shared by all targets - implemented in local_optimizations.cc 718 void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src); 719 void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir); 720 void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir); 721 virtual void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir); 722 723 // Shared by all targets - implemented in ralloc_util.cc 724 int GetSRegHi(int lowSreg); 725 bool LiveOut(int s_reg); 726 void SimpleRegAlloc(); 727 void ResetRegPool(); 728 void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num); 729 void DumpRegPool(GrowableArray<RegisterInfo*>* regs); 730 void DumpCoreRegPool(); 731 void DumpFpRegPool(); 732 void DumpRegPools(); 733 /* Mark a temp register as dead. Does not affect allocation state. */ 734 void Clobber(RegStorage reg); 735 void ClobberSReg(int s_reg); 736 void ClobberAliases(RegisterInfo* info, uint32_t clobber_mask); 737 int SRegToPMap(int s_reg); 738 void RecordCorePromotion(RegStorage reg, int s_reg); 739 RegStorage AllocPreservedCoreReg(int s_reg); 740 void RecordFpPromotion(RegStorage reg, int s_reg); 741 RegStorage AllocPreservedFpReg(int s_reg); 742 virtual RegStorage AllocPreservedSingle(int s_reg); 743 virtual RegStorage AllocPreservedDouble(int s_reg); 744 RegStorage AllocTempBody(GrowableArray<RegisterInfo*> ®s, int* next_temp, bool required); 745 virtual RegStorage AllocTemp(bool required = true); 746 virtual RegStorage AllocTempWide(bool required = true); 747 virtual RegStorage AllocTempRef(bool required = true); 748 virtual RegStorage AllocTempSingle(bool required = true); 749 virtual RegStorage AllocTempDouble(bool required = true); 750 virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class, bool required = true); 751 virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class, bool required = true); 752 void FlushReg(RegStorage reg); 753 void FlushRegWide(RegStorage reg); 754 RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide); 755 RegStorage FindLiveReg(GrowableArray<RegisterInfo*> ®s, int s_reg); 756 virtual void FreeTemp(RegStorage reg); 757 virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free); 758 virtual bool IsLive(RegStorage reg); 759 virtual bool IsTemp(RegStorage reg); 760 bool IsPromoted(RegStorage reg); 761 bool IsDirty(RegStorage reg); 762 virtual void LockTemp(RegStorage reg); 763 void ResetDef(RegStorage reg); 764 void NullifyRange(RegStorage reg, int s_reg); 765 void MarkDef(RegLocation rl, LIR *start, LIR *finish); 766 void MarkDefWide(RegLocation rl, LIR *start, LIR *finish); 767 void ResetDefLoc(RegLocation rl); 768 void ResetDefLocWide(RegLocation rl); 769 void ResetDefTracking(); 770 void ClobberAllTemps(); 771 void FlushSpecificReg(RegisterInfo* info); 772 void FlushAllRegs(); 773 bool RegClassMatches(int reg_class, RegStorage reg); 774 void MarkLive(RegLocation loc); 775 void MarkTemp(RegStorage reg); 776 void UnmarkTemp(RegStorage reg); 777 void MarkWide(RegStorage reg); 778 void MarkNarrow(RegStorage reg); 779 void MarkClean(RegLocation loc); 780 void MarkDirty(RegLocation loc); 781 void MarkInUse(RegStorage reg); 782 bool CheckCorePoolSanity(); 783 virtual RegLocation UpdateLoc(RegLocation loc); 784 virtual RegLocation UpdateLocWide(RegLocation loc); 785 RegLocation UpdateRawLoc(RegLocation loc); 786 787 /** 788 * @brief Used to prepare a register location to receive a wide value. 789 * @see EvalLoc 790 * @param loc the location where the value will be stored. 791 * @param reg_class Type of register needed. 792 * @param update Whether the liveness information should be updated. 793 * @return Returns the properly typed temporary in physical register pairs. 794 */ 795 virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update); 796 797 /** 798 * @brief Used to prepare a register location to receive a value. 799 * @param loc the location where the value will be stored. 800 * @param reg_class Type of register needed. 801 * @param update Whether the liveness information should be updated. 802 * @return Returns the properly typed temporary in physical register. 803 */ 804 virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update); 805 806 void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs); 807 void DumpCounts(const RefCounts* arr, int size, const char* msg); 808 void DoPromotion(); 809 int VRegOffset(int v_reg); 810 int SRegOffset(int s_reg); 811 RegLocation GetReturnWide(RegisterClass reg_class); 812 RegLocation GetReturn(RegisterClass reg_class); 813 RegisterInfo* GetRegInfo(RegStorage reg); 814 815 // Shared by all targets - implemented in gen_common.cc. 816 void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr); 817 virtual bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 818 RegLocation rl_src, RegLocation rl_dest, int lit); 819 bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit); 820 virtual void HandleSlowPaths(); 821 void GenBarrier(); 822 void GenDivZeroException(); 823 // c_code holds condition code that's generated from testing divisor against 0. 824 void GenDivZeroCheck(ConditionCode c_code); 825 // reg holds divisor. 826 void GenDivZeroCheck(RegStorage reg); 827 void GenArrayBoundsCheck(RegStorage index, RegStorage length); 828 void GenArrayBoundsCheck(int32_t index, RegStorage length); 829 LIR* GenNullCheck(RegStorage reg); 830 void MarkPossibleNullPointerException(int opt_flags); 831 void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after); 832 void MarkPossibleStackOverflowException(); 833 void ForceImplicitNullCheck(RegStorage reg, int opt_flags); 834 LIR* GenNullCheck(RegStorage m_reg, int opt_flags); 835 LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags); 836 virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags); 837 void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 838 RegLocation rl_src2, LIR* taken, LIR* fall_through); 839 void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, 840 LIR* taken, LIR* fall_through); 841 virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src); 842 void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 843 RegLocation rl_src); 844 void GenNewArray(uint32_t type_idx, RegLocation rl_dest, 845 RegLocation rl_src); 846 void GenFilledNewArray(CallInfo* info); 847 void GenSput(MIR* mir, RegLocation rl_src, 848 bool is_long_or_double, bool is_object); 849 void GenSget(MIR* mir, RegLocation rl_dest, 850 bool is_long_or_double, bool is_object); 851 void GenIGet(MIR* mir, int opt_flags, OpSize size, 852 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object); 853 void GenIPut(MIR* mir, int opt_flags, OpSize size, 854 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object); 855 void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 856 RegLocation rl_src); 857 858 void GenConstClass(uint32_t type_idx, RegLocation rl_dest); 859 void GenConstString(uint32_t string_idx, RegLocation rl_dest); 860 void GenNewInstance(uint32_t type_idx, RegLocation rl_dest); 861 void GenThrow(RegLocation rl_src); 862 void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); 863 void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src); 864 void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 865 RegLocation rl_src1, RegLocation rl_src2); 866 virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 867 RegLocation rl_src1, RegLocation rl_shift); 868 void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, 869 RegLocation rl_src, int lit); 870 void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 871 RegLocation rl_src1, RegLocation rl_src2); 872 void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src); 873 virtual void GenSuspendTest(int opt_flags); 874 virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target); 875 876 // This will be overridden by x86 implementation. 877 virtual void GenConstWide(RegLocation rl_dest, int64_t value); 878 virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 879 RegLocation rl_src1, RegLocation rl_src2); 880 881 // Shared by all targets - implemented in gen_invoke.cc. 882 LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc, 883 bool use_link = true); 884 RegStorage CallHelperSetup(QuickEntrypointEnum trampoline); 885 886 void CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc); 887 void CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc); 888 void CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0, bool safepoint_pc); 889 void CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0, 890 bool safepoint_pc); 891 void CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1, 892 bool safepoint_pc); 893 void CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0, RegLocation arg1, 894 bool safepoint_pc); 895 void CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0, int arg1, 896 bool safepoint_pc); 897 void CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1, 898 bool safepoint_pc); 899 void CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1, 900 bool safepoint_pc); 901 void CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc); 902 void CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0, 903 bool safepoint_pc); 904 void CallRuntimeHelperRegMethodRegLocation(QuickEntrypointEnum trampoline, RegStorage arg0, 905 RegLocation arg2, bool safepoint_pc); 906 void CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0, 907 RegLocation arg1, bool safepoint_pc); 908 void CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0, RegStorage arg1, 909 bool safepoint_pc); 910 void CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, 911 RegStorage arg1, int arg2, bool safepoint_pc); 912 void CallRuntimeHelperImmMethodRegLocation(QuickEntrypointEnum trampoline, int arg0, 913 RegLocation arg2, bool safepoint_pc); 914 void CallRuntimeHelperImmMethodImm(QuickEntrypointEnum trampoline, int arg0, int arg2, 915 bool safepoint_pc); 916 void CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0, 917 RegLocation arg1, RegLocation arg2, 918 bool safepoint_pc); 919 void CallRuntimeHelperRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline, 920 RegLocation arg0, RegLocation arg1, 921 RegLocation arg2, 922 bool safepoint_pc); 923 void GenInvoke(CallInfo* info); 924 void GenInvokeNoInline(CallInfo* info); 925 virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method); 926 virtual int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel, 927 NextCallInsn next_call_insn, 928 const MethodReference& target_method, 929 uint32_t vtable_idx, 930 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 931 bool skip_this); 932 virtual int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel, 933 NextCallInsn next_call_insn, 934 const MethodReference& target_method, 935 uint32_t vtable_idx, 936 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 937 bool skip_this); 938 939 /** 940 * @brief Used to determine the register location of destination. 941 * @details This is needed during generation of inline intrinsics because it finds destination 942 * of return, 943 * either the physical register or the target of move-result. 944 * @param info Information about the invoke. 945 * @return Returns the destination location. 946 */ 947 RegLocation InlineTarget(CallInfo* info); 948 949 /** 950 * @brief Used to determine the wide register location of destination. 951 * @see InlineTarget 952 * @param info Information about the invoke. 953 * @return Returns the destination location. 954 */ 955 RegLocation InlineTargetWide(CallInfo* info); 956 957 bool GenInlinedGet(CallInfo* info); 958 virtual bool GenInlinedCharAt(CallInfo* info); 959 bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty); 960 virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size); 961 bool GenInlinedReverseBytes(CallInfo* info, OpSize size); 962 bool GenInlinedAbsInt(CallInfo* info); 963 virtual bool GenInlinedAbsLong(CallInfo* info); 964 virtual bool GenInlinedAbsFloat(CallInfo* info) = 0; 965 virtual bool GenInlinedAbsDouble(CallInfo* info) = 0; 966 bool GenInlinedFloatCvt(CallInfo* info); 967 bool GenInlinedDoubleCvt(CallInfo* info); 968 virtual bool GenInlinedCeil(CallInfo* info); 969 virtual bool GenInlinedFloor(CallInfo* info); 970 virtual bool GenInlinedRint(CallInfo* info); 971 virtual bool GenInlinedRound(CallInfo* info, bool is_double); 972 virtual bool GenInlinedArrayCopyCharArray(CallInfo* info); 973 virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based); 974 bool GenInlinedStringCompareTo(CallInfo* info); 975 virtual bool GenInlinedCurrentThread(CallInfo* info); 976 bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile); 977 bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object, 978 bool is_volatile, bool is_ordered); 979 virtual int LoadArgRegs(CallInfo* info, int call_state, 980 NextCallInsn next_call_insn, 981 const MethodReference& target_method, 982 uint32_t vtable_idx, 983 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 984 bool skip_this); 985 986 // Shared by all targets - implemented in gen_loadstore.cc. 987 RegLocation LoadCurrMethod(); 988 void LoadCurrMethodDirect(RegStorage r_tgt); 989 virtual LIR* LoadConstant(RegStorage r_dest, int value); 990 // Natural word size. 991 virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { 992 return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile); 993 } 994 // Load 32 bits, regardless of target. 995 virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) { 996 return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile); 997 } 998 // Load a reference at base + displacement and decompress into register. 999 virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest, 1000 VolatileKind is_volatile) { 1001 return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile); 1002 } 1003 // Load a reference at base + index and decompress into register. 1004 virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 1005 int scale) { 1006 return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference); 1007 } 1008 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 1009 virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); 1010 // Same as above, but derive the target register class from the location record. 1011 virtual RegLocation LoadValue(RegLocation rl_src); 1012 // Load Dalvik value with 64-bit memory storage. 1013 virtual RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind); 1014 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 1015 virtual void LoadValueDirect(RegLocation rl_src, RegStorage r_dest); 1016 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 1017 virtual void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest); 1018 // Load Dalvik value with 64-bit memory storage. 1019 virtual void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest); 1020 // Load Dalvik value with 64-bit memory storage. 1021 virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest); 1022 // Store an item of natural word size. 1023 virtual LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) { 1024 return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile); 1025 } 1026 // Store an uncompressed reference into a compressed 32-bit container. 1027 virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, 1028 VolatileKind is_volatile) { 1029 return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile); 1030 } 1031 // Store an uncompressed reference into a compressed 32-bit container by index. 1032 virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 1033 int scale) { 1034 return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference); 1035 } 1036 // Store 32 bits, regardless of target. 1037 virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) { 1038 return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile); 1039 } 1040 1041 /** 1042 * @brief Used to do the final store in the destination as per bytecode semantics. 1043 * @param rl_dest The destination dalvik register location. 1044 * @param rl_src The source register location. Can be either physical register or dalvik register. 1045 */ 1046 virtual void StoreValue(RegLocation rl_dest, RegLocation rl_src); 1047 1048 /** 1049 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1050 * @see StoreValue 1051 * @param rl_dest The destination dalvik register location. 1052 * @param rl_src The source register location. Can be either physical register or dalvik 1053 * register. 1054 */ 1055 virtual void StoreValueWide(RegLocation rl_dest, RegLocation rl_src); 1056 1057 /** 1058 * @brief Used to do the final store to a destination as per bytecode semantics. 1059 * @see StoreValue 1060 * @param rl_dest The destination dalvik register location. 1061 * @param rl_src The source register location. It must be kLocPhysReg 1062 * 1063 * This is used for x86 two operand computations, where we have computed the correct 1064 * register value that now needs to be properly registered. This is used to avoid an 1065 * extra register copy that would result if StoreValue was called. 1066 */ 1067 virtual void StoreFinalValue(RegLocation rl_dest, RegLocation rl_src); 1068 1069 /** 1070 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1071 * @see StoreValueWide 1072 * @param rl_dest The destination dalvik register location. 1073 * @param rl_src The source register location. It must be kLocPhysReg 1074 * 1075 * This is used for x86 two operand computations, where we have computed the correct 1076 * register values that now need to be properly registered. This is used to avoid an 1077 * extra pair of register copies that would result if StoreValueWide was called. 1078 */ 1079 virtual void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src); 1080 1081 // Shared by all targets - implemented in mir_to_lir.cc. 1082 void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list); 1083 virtual void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1084 bool MethodBlockCodeGen(BasicBlock* bb); 1085 bool SpecialMIR2LIR(const InlineMethod& special); 1086 virtual void MethodMIR2LIR(); 1087 // Update LIR for verbose listings. 1088 void UpdateLIROffsets(); 1089 1090 /* 1091 * @brief Load the address of the dex method into the register. 1092 * @param target_method The MethodReference of the method to be invoked. 1093 * @param type How the method will be invoked. 1094 * @param register that will contain the code address. 1095 * @note register will be passed to TargetReg to get physical register. 1096 */ 1097 void LoadCodeAddress(const MethodReference& target_method, InvokeType type, 1098 SpecialTargetRegister symbolic_reg); 1099 1100 /* 1101 * @brief Load the Method* of a dex method into the register. 1102 * @param target_method The MethodReference of the method to be invoked. 1103 * @param type How the method will be invoked. 1104 * @param register that will contain the code address. 1105 * @note register will be passed to TargetReg to get physical register. 1106 */ 1107 virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type, 1108 SpecialTargetRegister symbolic_reg); 1109 1110 /* 1111 * @brief Load the Class* of a Dex Class type into the register. 1112 * @param type How the method will be invoked. 1113 * @param register that will contain the code address. 1114 * @note register will be passed to TargetReg to get physical register. 1115 */ 1116 virtual void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg); 1117 1118 // Routines that work for the generic case, but may be overriden by target. 1119 /* 1120 * @brief Compare memory to immediate, and branch if condition true. 1121 * @param cond The condition code that when true will branch to the target. 1122 * @param temp_reg A temporary register that can be used if compare to memory is not 1123 * supported by the architecture. 1124 * @param base_reg The register holding the base address. 1125 * @param offset The offset from the base. 1126 * @param check_value The immediate to compare to. 1127 * @param target branch target (or nullptr) 1128 * @param compare output for getting LIR for comparison (or nullptr) 1129 * @returns The branch instruction that was generated. 1130 */ 1131 virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 1132 int offset, int check_value, LIR* target, LIR** compare); 1133 1134 // Required for target - codegen helpers. 1135 virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 1136 RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1137 virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1138 virtual LIR* CheckSuspendUsingLoad() = 0; 1139 1140 virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0; 1141 1142 virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, 1143 OpSize size, VolatileKind is_volatile) = 0; 1144 virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 1145 int scale, OpSize size) = 0; 1146 virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0; 1147 virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0; 1148 virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, 1149 OpSize size, VolatileKind is_volatile) = 0; 1150 virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 1151 int scale, OpSize size) = 0; 1152 virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0; 1153 1154 // Required for target - register utilities. 1155 1156 bool IsSameReg(RegStorage reg1, RegStorage reg2) { 1157 RegisterInfo* info1 = GetRegInfo(reg1); 1158 RegisterInfo* info2 = GetRegInfo(reg2); 1159 return (info1->Master() == info2->Master() && 1160 (info1->StorageMask() & info2->StorageMask()) != 0); 1161 } 1162 1163 /** 1164 * @brief Portable way of getting special registers from the backend. 1165 * @param reg Enumeration describing the purpose of the register. 1166 * @return Return the #RegStorage corresponding to the given purpose @p reg. 1167 * @note This function is currently allowed to return any suitable view of the registers 1168 * (e.g. this could be 64-bit solo or 32-bit solo for 64-bit backends). 1169 */ 1170 virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0; 1171 1172 /** 1173 * @brief Portable way of getting special registers from the backend. 1174 * @param reg Enumeration describing the purpose of the register. 1175 * @param wide_kind What kind of view of the special register is required. 1176 * @return Return the #RegStorage corresponding to the given purpose @p reg. 1177 * 1178 * @note For 32b system, wide (kWide) views only make sense for the argument registers and the 1179 * return. In that case, this function should return a pair where the first component of 1180 * the result will be the indicated special register. 1181 */ 1182 virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) { 1183 if (wide_kind == kWide) { 1184 DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg7) || (kRet0 == reg)); 1185 COMPILE_ASSERT((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) && 1186 (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) && 1187 (kArg7 == kArg6 + 1), kargs_range_unexpected); 1188 COMPILE_ASSERT((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) && 1189 (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) && 1190 (kFArg7 == kFArg6 + 1), kfargs_range_unexpected); 1191 COMPILE_ASSERT(kRet1 == kRet0 + 1, kret_range_unexpected); 1192 return RegStorage::MakeRegPair(TargetReg(reg), 1193 TargetReg(static_cast<SpecialTargetRegister>(reg + 1))); 1194 } else { 1195 return TargetReg(reg); 1196 } 1197 } 1198 1199 /** 1200 * @brief Portable way of getting a special register for storing a pointer. 1201 * @see TargetReg() 1202 */ 1203 virtual RegStorage TargetPtrReg(SpecialTargetRegister reg) { 1204 return TargetReg(reg); 1205 } 1206 1207 // Get a reg storage corresponding to the wide & ref flags of the reg location. 1208 virtual RegStorage TargetReg(SpecialTargetRegister reg, RegLocation loc) { 1209 if (loc.ref) { 1210 return TargetReg(reg, kRef); 1211 } else { 1212 return TargetReg(reg, loc.wide ? kWide : kNotWide); 1213 } 1214 } 1215 1216 virtual RegStorage GetArgMappingToPhysicalReg(int arg_num) = 0; 1217 virtual RegLocation GetReturnAlt() = 0; 1218 virtual RegLocation GetReturnWideAlt() = 0; 1219 virtual RegLocation LocCReturn() = 0; 1220 virtual RegLocation LocCReturnRef() = 0; 1221 virtual RegLocation LocCReturnDouble() = 0; 1222 virtual RegLocation LocCReturnFloat() = 0; 1223 virtual RegLocation LocCReturnWide() = 0; 1224 virtual ResourceMask GetRegMaskCommon(const RegStorage& reg) const = 0; 1225 virtual void AdjustSpillMask() = 0; 1226 virtual void ClobberCallerSave() = 0; 1227 virtual void FreeCallTemps() = 0; 1228 virtual void LockCallTemps() = 0; 1229 virtual void CompilerInitializeRegAlloc() = 0; 1230 1231 // Required for target - miscellaneous. 1232 virtual void AssembleLIR() = 0; 1233 virtual void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) = 0; 1234 virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags, 1235 ResourceMask* use_mask, ResourceMask* def_mask) = 0; 1236 virtual const char* GetTargetInstFmt(int opcode) = 0; 1237 virtual const char* GetTargetInstName(int opcode) = 0; 1238 virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0; 1239 1240 // Note: This may return kEncodeNone on architectures that do not expose a PC. The caller must 1241 // take care of this. 1242 virtual ResourceMask GetPCUseDefEncoding() const = 0; 1243 virtual uint64_t GetTargetInstFlags(int opcode) = 0; 1244 virtual size_t GetInsnSize(LIR* lir) = 0; 1245 virtual bool IsUnconditionalBranch(LIR* lir) = 0; 1246 1247 // Get the register class for load/store of a field. 1248 virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0; 1249 1250 // Required for target - Dalvik-level generators. 1251 virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1252 RegLocation rl_src1, RegLocation rl_src2) = 0; 1253 virtual void GenMulLong(Instruction::Code, 1254 RegLocation rl_dest, RegLocation rl_src1, 1255 RegLocation rl_src2) = 0; 1256 virtual void GenAddLong(Instruction::Code, 1257 RegLocation rl_dest, RegLocation rl_src1, 1258 RegLocation rl_src2) = 0; 1259 virtual void GenAndLong(Instruction::Code, 1260 RegLocation rl_dest, RegLocation rl_src1, 1261 RegLocation rl_src2) = 0; 1262 virtual void GenArithOpDouble(Instruction::Code opcode, 1263 RegLocation rl_dest, RegLocation rl_src1, 1264 RegLocation rl_src2) = 0; 1265 virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, 1266 RegLocation rl_src1, RegLocation rl_src2) = 0; 1267 virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, 1268 RegLocation rl_src1, RegLocation rl_src2) = 0; 1269 virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, 1270 RegLocation rl_src) = 0; 1271 virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0; 1272 1273 /** 1274 * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max. 1275 * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm 1276 * that applies on integers. The generated code will write the smallest or largest value 1277 * directly into the destination register as specified by the invoke information. 1278 * @param info Information about the invoke. 1279 * @param is_min If true generates code that computes minimum. Otherwise computes maximum. 1280 * @param is_long If true the value value is Long. Otherwise the value is Int. 1281 * @return Returns true if successfully generated 1282 */ 1283 virtual bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) = 0; 1284 virtual bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double); 1285 1286 virtual bool GenInlinedSqrt(CallInfo* info) = 0; 1287 virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0; 1288 virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0; 1289 virtual void GenNotLong(RegLocation rl_dest, RegLocation rl_src) = 0; 1290 virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0; 1291 virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1292 RegLocation rl_src2) = 0; 1293 virtual void GenSubLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1294 RegLocation rl_src2) = 0; 1295 virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1296 RegLocation rl_src2) = 0; 1297 virtual void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1298 RegLocation rl_src2, bool is_div) = 0; 1299 virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, 1300 bool is_div) = 0; 1301 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, 1302 bool is_div) = 0; 1303 /* 1304 * @brief Generate an integer div or rem operation by a literal. 1305 * @param rl_dest Destination Location. 1306 * @param rl_src1 Numerator Location. 1307 * @param rl_src2 Divisor Location. 1308 * @param is_div 'true' if this is a division, 'false' for a remainder. 1309 * @param check_zero 'true' if an exception should be generated if the divisor is 0. 1310 */ 1311 virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 1312 RegLocation rl_src2, bool is_div, bool check_zero) = 0; 1313 /* 1314 * @brief Generate an integer div or rem operation by a literal. 1315 * @param rl_dest Destination Location. 1316 * @param rl_src Numerator Location. 1317 * @param lit Divisor. 1318 * @param is_div 'true' if this is a division, 'false' for a remainder. 1319 */ 1320 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, 1321 bool is_div) = 0; 1322 virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0; 1323 1324 /** 1325 * @brief Used for generating code that throws ArithmeticException if both registers are zero. 1326 * @details This is used for generating DivideByZero checks when divisor is held in two 1327 * separate registers. 1328 * @param reg The register holding the pair of 32-bit values. 1329 */ 1330 virtual void GenDivZeroCheckWide(RegStorage reg) = 0; 1331 1332 virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0; 1333 virtual void GenExitSequence() = 0; 1334 virtual void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) = 0; 1335 virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0; 1336 virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0; 1337 1338 /* 1339 * @brief Handle Machine Specific MIR Extended opcodes. 1340 * @param bb The basic block in which the MIR is from. 1341 * @param mir The MIR whose opcode is not standard extended MIR. 1342 * @note Base class implementation will abort for unknown opcodes. 1343 */ 1344 virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1345 1346 /** 1347 * @brief Lowers the kMirOpSelect MIR into LIR. 1348 * @param bb The basic block in which the MIR is from. 1349 * @param mir The MIR whose opcode is kMirOpSelect. 1350 */ 1351 virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0; 1352 1353 /** 1354 * @brief Generates code to select one of the given constants depending on the given opcode. 1355 */ 1356 virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code, 1357 int32_t true_val, int32_t false_val, RegStorage rs_dest, 1358 int dest_reg_class) = 0; 1359 1360 /** 1361 * @brief Used to generate a memory barrier in an architecture specific way. 1362 * @details The last generated LIR will be considered for use as barrier. Namely, 1363 * if the last LIR can be updated in a way where it will serve the semantics of 1364 * barrier, then it will be used as such. Otherwise, a new LIR will be generated 1365 * that can keep the semantics. 1366 * @param barrier_kind The kind of memory barrier to generate. 1367 * @return whether a new instruction was generated. 1368 */ 1369 virtual bool GenMemBarrier(MemBarrierKind barrier_kind) = 0; 1370 1371 virtual void GenMoveException(RegLocation rl_dest) = 0; 1372 virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, 1373 int first_bit, int second_bit) = 0; 1374 virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0; 1375 virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0; 1376 virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1377 virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1378 virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 1379 RegLocation rl_index, RegLocation rl_dest, int scale) = 0; 1380 virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 1381 RegLocation rl_index, RegLocation rl_src, int scale, 1382 bool card_mark) = 0; 1383 virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1384 RegLocation rl_src1, RegLocation rl_shift) = 0; 1385 1386 // Required for target - single operation generators. 1387 virtual LIR* OpUnconditionalBranch(LIR* target) = 0; 1388 virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0; 1389 virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, 1390 LIR* target) = 0; 1391 virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0; 1392 virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0; 1393 virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1394 virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0; 1395 virtual void OpEndIT(LIR* it) = 0; 1396 virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0; 1397 virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0; 1398 virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0; 1399 virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1400 virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0; 1401 virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0; 1402 virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0; 1403 1404 /** 1405 * @brief Used to generate an LIR that does a load from mem to reg. 1406 * @param r_dest The destination physical register. 1407 * @param r_base The base physical register for memory operand. 1408 * @param offset The displacement for memory operand. 1409 * @param move_type Specification on the move desired (size, alignment, register kind). 1410 * @return Returns the generate move LIR. 1411 */ 1412 virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, 1413 MoveType move_type) = 0; 1414 1415 /** 1416 * @brief Used to generate an LIR that does a store from reg to mem. 1417 * @param r_base The base physical register for memory operand. 1418 * @param offset The displacement for memory operand. 1419 * @param r_src The destination physical register. 1420 * @param bytes_to_move The number of bytes to move. 1421 * @param is_aligned Whether the memory location is known to be aligned. 1422 * @return Returns the generate move LIR. 1423 */ 1424 virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, 1425 MoveType move_type) = 0; 1426 1427 /** 1428 * @brief Used for generating a conditional register to register operation. 1429 * @param op The opcode kind. 1430 * @param cc The condition code that when true will perform the opcode. 1431 * @param r_dest The destination physical register. 1432 * @param r_src The source physical register. 1433 * @return Returns the newly created LIR or null in case of creation failure. 1434 */ 1435 virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0; 1436 1437 virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0; 1438 virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, 1439 RegStorage r_src2) = 0; 1440 virtual LIR* OpTestSuspend(LIR* target) = 0; 1441 virtual LIR* OpVldm(RegStorage r_base, int count) = 0; 1442 virtual LIR* OpVstm(RegStorage r_base, int count) = 0; 1443 virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0; 1444 virtual bool InexpensiveConstantInt(int32_t value) = 0; 1445 virtual bool InexpensiveConstantFloat(int32_t value) = 0; 1446 virtual bool InexpensiveConstantLong(int64_t value) = 0; 1447 virtual bool InexpensiveConstantDouble(int64_t value) = 0; 1448 1449 // May be optimized by targets. 1450 virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src); 1451 virtual void GenMonitorExit(int opt_flags, RegLocation rl_src); 1452 1453 // Temp workaround 1454 void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg); 1455 1456 virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0; 1457 1458 protected: 1459 Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); 1460 1461 CompilationUnit* GetCompilationUnit() { 1462 return cu_; 1463 } 1464 /* 1465 * @brief Returns the index of the lowest set bit in 'x'. 1466 * @param x Value to be examined. 1467 * @returns The bit number of the lowest bit set in the value. 1468 */ 1469 int32_t LowestSetBit(uint64_t x); 1470 /* 1471 * @brief Is this value a power of two? 1472 * @param x Value to be examined. 1473 * @returns 'true' if only 1 bit is set in the value. 1474 */ 1475 bool IsPowerOfTwo(uint64_t x); 1476 /* 1477 * @brief Do these SRs overlap? 1478 * @param rl_op1 One RegLocation 1479 * @param rl_op2 The other RegLocation 1480 * @return 'true' if the VR pairs overlap 1481 * 1482 * Check to see if a result pair has a misaligned overlap with an operand pair. This 1483 * is not usual for dx to generate, but it is legal (for now). In a future rev of 1484 * dex, we'll want to make this case illegal. 1485 */ 1486 bool BadOverlap(RegLocation rl_op1, RegLocation rl_op2); 1487 1488 /* 1489 * @brief Force a location (in a register) into a temporary register 1490 * @param loc location of result 1491 * @returns update location 1492 */ 1493 virtual RegLocation ForceTemp(RegLocation loc); 1494 1495 /* 1496 * @brief Force a wide location (in registers) into temporary registers 1497 * @param loc location of result 1498 * @returns update location 1499 */ 1500 virtual RegLocation ForceTempWide(RegLocation loc); 1501 1502 static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) { 1503 return wide ? k64 : ref ? kReference : k32; 1504 } 1505 1506 virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, 1507 RegLocation rl_dest, RegLocation rl_src); 1508 1509 void AddSlowPath(LIRSlowPath* slowpath); 1510 1511 /* 1512 * 1513 * @brief Implement Set up instanceof a class. 1514 * @param needs_access_check 'true' if we must check the access. 1515 * @param type_known_final 'true' if the type is known to be a final class. 1516 * @param type_known_abstract 'true' if the type is known to be an abstract class. 1517 * @param use_declaring_class 'true' if the type can be loaded off the current Method*. 1518 * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache. 1519 * @param type_idx Type index to use if use_declaring_class is 'false'. 1520 * @param rl_dest Result to be set to 0 or 1. 1521 * @param rl_src Object to be tested. 1522 */ 1523 void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1524 bool type_known_abstract, bool use_declaring_class, 1525 bool can_assume_type_is_in_dex_cache, 1526 uint32_t type_idx, RegLocation rl_dest, 1527 RegLocation rl_src); 1528 /* 1529 * @brief Generate the debug_frame FDE information if possible. 1530 * @returns pointer to vector containg CFE information, or NULL. 1531 */ 1532 virtual std::vector<uint8_t>* ReturnCallFrameInformation(); 1533 1534 /** 1535 * @brief Used to insert marker that can be used to associate MIR with LIR. 1536 * @details Only inserts marker if verbosity is enabled. 1537 * @param mir The mir that is currently being generated. 1538 */ 1539 void GenPrintLabel(MIR* mir); 1540 1541 /** 1542 * @brief Used to generate return sequence when there is no frame. 1543 * @details Assumes that the return registers have already been populated. 1544 */ 1545 virtual void GenSpecialExitSequence() = 0; 1546 1547 /** 1548 * @brief Used to generate code for special methods that are known to be 1549 * small enough to work in frameless mode. 1550 * @param bb The basic block of the first MIR. 1551 * @param mir The first MIR of the special method. 1552 * @param special Information about the special method. 1553 * @return Returns whether or not this was handled successfully. Returns false 1554 * if caller should punt to normal MIR2LIR conversion. 1555 */ 1556 virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); 1557 1558 protected: 1559 void ClobberBody(RegisterInfo* p); 1560 void SetCurrentDexPc(DexOffset dexpc) { 1561 current_dalvik_offset_ = dexpc; 1562 } 1563 1564 /** 1565 * @brief Used to lock register if argument at in_position was passed that way. 1566 * @details Does nothing if the argument is passed via stack. 1567 * @param in_position The argument number whose register to lock. 1568 * @param wide Whether the argument is wide. 1569 */ 1570 void LockArg(int in_position, bool wide = false); 1571 1572 /** 1573 * @brief Used to load VR argument to a physical register. 1574 * @details The load is only done if the argument is not already in physical register. 1575 * LockArg must have been previously called. 1576 * @param in_position The argument number to load. 1577 * @param wide Whether the argument is 64-bit or not. 1578 * @return Returns the register (or register pair) for the loaded argument. 1579 */ 1580 RegStorage LoadArg(int in_position, RegisterClass reg_class, bool wide = false); 1581 1582 /** 1583 * @brief Used to load a VR argument directly to a specified register location. 1584 * @param in_position The argument number to place in register. 1585 * @param rl_dest The register location where to place argument. 1586 */ 1587 void LoadArgDirect(int in_position, RegLocation rl_dest); 1588 1589 /** 1590 * @brief Used to generate LIR for special getter method. 1591 * @param mir The mir that represents the iget. 1592 * @param special Information about the special getter method. 1593 * @return Returns whether LIR was successfully generated. 1594 */ 1595 bool GenSpecialIGet(MIR* mir, const InlineMethod& special); 1596 1597 /** 1598 * @brief Used to generate LIR for special setter method. 1599 * @param mir The mir that represents the iput. 1600 * @param special Information about the special setter method. 1601 * @return Returns whether LIR was successfully generated. 1602 */ 1603 bool GenSpecialIPut(MIR* mir, const InlineMethod& special); 1604 1605 /** 1606 * @brief Used to generate LIR for special return-args method. 1607 * @param mir The mir that represents the return of argument. 1608 * @param special Information about the special return-args method. 1609 * @return Returns whether LIR was successfully generated. 1610 */ 1611 bool GenSpecialIdentity(MIR* mir, const InlineMethod& special); 1612 1613 void AddDivZeroCheckSlowPath(LIR* branch); 1614 1615 // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using 1616 // kArg2 as temp. 1617 virtual void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1); 1618 1619 /** 1620 * @brief Load Constant into RegLocation 1621 * @param rl_dest Destination RegLocation 1622 * @param value Constant value 1623 */ 1624 virtual void GenConst(RegLocation rl_dest, int value); 1625 1626 /** 1627 * Returns true iff wide GPRs are just different views on the same physical register. 1628 */ 1629 virtual bool WideGPRsAreAliases() = 0; 1630 1631 /** 1632 * Returns true iff wide FPRs are just different views on the same physical register. 1633 */ 1634 virtual bool WideFPRsAreAliases() = 0; 1635 1636 1637 enum class WidenessCheck { // private 1638 kIgnoreWide, 1639 kCheckWide, 1640 kCheckNotWide 1641 }; 1642 1643 enum class RefCheck { // private 1644 kIgnoreRef, 1645 kCheckRef, 1646 kCheckNotRef 1647 }; 1648 1649 enum class FPCheck { // private 1650 kIgnoreFP, 1651 kCheckFP, 1652 kCheckNotFP 1653 }; 1654 1655 /** 1656 * Check whether a reg storage seems well-formed, that is, if a reg storage is valid, 1657 * that it has the expected form for the flags. 1658 * A flag value of 0 means ignore. A flag value of -1 means false. A flag value of 1 means true. 1659 */ 1660 void CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp, bool fail, 1661 bool report) 1662 const; 1663 1664 /** 1665 * Check whether a reg location seems well-formed, that is, if a reg storage is encoded, 1666 * that it has the expected size. 1667 */ 1668 void CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const; 1669 1670 // See CheckRegStorageImpl. Will print or fail depending on kFailOnSizeError and 1671 // kReportSizeError. 1672 void CheckRegStorage(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp) const; 1673 // See CheckRegLocationImpl. 1674 void CheckRegLocation(RegLocation rl) const; 1675 1676 public: 1677 // TODO: add accessors for these. 1678 LIR* literal_list_; // Constants. 1679 LIR* method_literal_list_; // Method literals requiring patching. 1680 LIR* class_literal_list_; // Class literals requiring patching. 1681 LIR* code_literal_list_; // Code literals requiring patching. 1682 LIR* first_fixup_; // Doubly-linked list of LIR nodes requiring fixups. 1683 1684 protected: 1685 CompilationUnit* const cu_; 1686 MIRGraph* const mir_graph_; 1687 GrowableArray<SwitchTable*> switch_tables_; 1688 GrowableArray<FillArrayData*> fill_array_data_; 1689 GrowableArray<RegisterInfo*> tempreg_info_; 1690 GrowableArray<RegisterInfo*> reginfo_map_; 1691 GrowableArray<void*> pointer_storage_; 1692 CodeOffset current_code_offset_; // Working byte offset of machine instructons. 1693 CodeOffset data_offset_; // starting offset of literal pool. 1694 size_t total_size_; // header + code size. 1695 LIR* block_label_list_; 1696 PromotionMap* promotion_map_; 1697 /* 1698 * TODO: The code generation utilities don't have a built-in 1699 * mechanism to propagate the original Dalvik opcode address to the 1700 * associated generated instructions. For the trace compiler, this wasn't 1701 * necessary because the interpreter handled all throws and debugging 1702 * requests. For now we'll handle this by placing the Dalvik offset 1703 * in the CompilationUnit struct before codegen for each instruction. 1704 * The low-level LIR creation utilites will pull it from here. Rework this. 1705 */ 1706 DexOffset current_dalvik_offset_; 1707 size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size. 1708 RegisterPool* reg_pool_; 1709 /* 1710 * Sanity checking for the register temp tracking. The same ssa 1711 * name should never be associated with one temp register per 1712 * instruction compilation. 1713 */ 1714 int live_sreg_; 1715 CodeBuffer code_buffer_; 1716 // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix. 1717 std::vector<uint8_t> encoded_mapping_table_; 1718 std::vector<uint32_t> core_vmap_table_; 1719 std::vector<uint32_t> fp_vmap_table_; 1720 std::vector<uint8_t> native_gc_map_; 1721 int num_core_spills_; 1722 int num_fp_spills_; 1723 int frame_size_; 1724 unsigned int core_spill_mask_; 1725 unsigned int fp_spill_mask_; 1726 LIR* first_lir_insn_; 1727 LIR* last_lir_insn_; 1728 1729 GrowableArray<LIRSlowPath*> slow_paths_; 1730 1731 // The memory reference type for new LIRs. 1732 // NOTE: Passing this as an explicit parameter by all functions that directly or indirectly 1733 // invoke RawLIR() would clutter the code and reduce the readability. 1734 ResourceMask::ResourceBit mem_ref_type_; 1735 1736 // Each resource mask now takes 16-bytes, so having both use/def masks directly in a LIR 1737 // would consume 32 bytes per LIR. Instead, the LIR now holds only pointers to the masks 1738 // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache 1739 // to deduplicate the masks. 1740 ResourceMaskCache mask_cache_; 1741}; // Class Mir2Lir 1742 1743} // namespace art 1744 1745#endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 1746