mir_to_lir.h revision da96aeda912ff317de2c41e5a49bd244427238ac
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 18#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 19 20#include "arch/instruction_set.h" 21#include "compiled_method.h" 22#include "dex/compiler_enums.h" 23#include "dex/compiler_ir.h" 24#include "dex/reg_location.h" 25#include "dex/reg_storage.h" 26#include "dex/backend.h" 27#include "dex/quick/resource_mask.h" 28#include "driver/compiler_driver.h" 29#include "entrypoints/quick/quick_entrypoints_enum.h" 30#include "invoke_type.h" 31#include "leb128.h" 32#include "safe_map.h" 33#include "utils/array_ref.h" 34#include "utils/arena_allocator.h" 35#include "utils/arena_containers.h" 36#include "utils/arena_object.h" 37#include "utils/stack_checks.h" 38 39namespace art { 40 41// Set to 1 to measure cost of suspend check. 42#define NO_SUSPEND 0 43 44#define IS_BINARY_OP (1ULL << kIsBinaryOp) 45#define IS_BRANCH (1ULL << kIsBranch) 46#define IS_IT (1ULL << kIsIT) 47#define IS_MOVE (1ULL << kIsMoveOp) 48#define IS_LOAD (1ULL << kMemLoad) 49#define IS_QUAD_OP (1ULL << kIsQuadOp) 50#define IS_QUIN_OP (1ULL << kIsQuinOp) 51#define IS_SEXTUPLE_OP (1ULL << kIsSextupleOp) 52#define IS_STORE (1ULL << kMemStore) 53#define IS_TERTIARY_OP (1ULL << kIsTertiaryOp) 54#define IS_UNARY_OP (1ULL << kIsUnaryOp) 55#define IS_VOLATILE (1ULL << kMemVolatile) 56#define NEEDS_FIXUP (1ULL << kPCRelFixup) 57#define NO_OPERAND (1ULL << kNoOperand) 58#define REG_DEF0 (1ULL << kRegDef0) 59#define REG_DEF1 (1ULL << kRegDef1) 60#define REG_DEF2 (1ULL << kRegDef2) 61#define REG_DEFA (1ULL << kRegDefA) 62#define REG_DEFD (1ULL << kRegDefD) 63#define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0) 64#define REG_DEF_FPCS_LIST2 (1ULL << kRegDefFPCSList2) 65#define REG_DEF_LIST0 (1ULL << kRegDefList0) 66#define REG_DEF_LIST1 (1ULL << kRegDefList1) 67#define REG_DEF_LR (1ULL << kRegDefLR) 68#define REG_DEF_SP (1ULL << kRegDefSP) 69#define REG_USE0 (1ULL << kRegUse0) 70#define REG_USE1 (1ULL << kRegUse1) 71#define REG_USE2 (1ULL << kRegUse2) 72#define REG_USE3 (1ULL << kRegUse3) 73#define REG_USE4 (1ULL << kRegUse4) 74#define REG_USEA (1ULL << kRegUseA) 75#define REG_USEC (1ULL << kRegUseC) 76#define REG_USED (1ULL << kRegUseD) 77#define REG_USEB (1ULL << kRegUseB) 78#define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0) 79#define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2) 80#define REG_USE_LIST0 (1ULL << kRegUseList0) 81#define REG_USE_LIST1 (1ULL << kRegUseList1) 82#define REG_USE_LR (1ULL << kRegUseLR) 83#define REG_USE_PC (1ULL << kRegUsePC) 84#define REG_USE_SP (1ULL << kRegUseSP) 85#define SETS_CCODES (1ULL << kSetsCCodes) 86#define USES_CCODES (1ULL << kUsesCCodes) 87#define USE_FP_STACK (1ULL << kUseFpStack) 88#define REG_USE_LO (1ULL << kUseLo) 89#define REG_USE_HI (1ULL << kUseHi) 90#define REG_DEF_LO (1ULL << kDefLo) 91#define REG_DEF_HI (1ULL << kDefHi) 92#define SCALED_OFFSET_X0 (1ULL << kMemScaledx0) 93#define SCALED_OFFSET_X2 (1ULL << kMemScaledx2) 94#define SCALED_OFFSET_X4 (1ULL << kMemScaledx4) 95 96// Special load/stores 97#define IS_LOADX (IS_LOAD | IS_VOLATILE) 98#define IS_LOAD_OFF (IS_LOAD | SCALED_OFFSET_X0) 99#define IS_LOAD_OFF2 (IS_LOAD | SCALED_OFFSET_X2) 100#define IS_LOAD_OFF4 (IS_LOAD | SCALED_OFFSET_X4) 101 102#define IS_STOREX (IS_STORE | IS_VOLATILE) 103#define IS_STORE_OFF (IS_STORE | SCALED_OFFSET_X0) 104#define IS_STORE_OFF2 (IS_STORE | SCALED_OFFSET_X2) 105#define IS_STORE_OFF4 (IS_STORE | SCALED_OFFSET_X4) 106 107// Common combo register usage patterns. 108#define REG_DEF01 (REG_DEF0 | REG_DEF1) 109#define REG_DEF012 (REG_DEF0 | REG_DEF1 | REG_DEF2) 110#define REG_DEF01_USE2 (REG_DEF0 | REG_DEF1 | REG_USE2) 111#define REG_DEF0_USE01 (REG_DEF0 | REG_USE01) 112#define REG_DEF0_USE0 (REG_DEF0 | REG_USE0) 113#define REG_DEF0_USE12 (REG_DEF0 | REG_USE12) 114#define REG_DEF0_USE123 (REG_DEF0 | REG_USE123) 115#define REG_DEF0_USE1 (REG_DEF0 | REG_USE1) 116#define REG_DEF0_USE2 (REG_DEF0 | REG_USE2) 117#define REG_DEFAD_USEAD (REG_DEFAD_USEA | REG_USED) 118#define REG_DEFAD_USEA (REG_DEFA_USEA | REG_DEFD) 119#define REG_DEFA_USEA (REG_DEFA | REG_USEA) 120#define REG_USE012 (REG_USE01 | REG_USE2) 121#define REG_USE014 (REG_USE01 | REG_USE4) 122#define REG_USE01 (REG_USE0 | REG_USE1) 123#define REG_USE02 (REG_USE0 | REG_USE2) 124#define REG_USE12 (REG_USE1 | REG_USE2) 125#define REG_USE23 (REG_USE2 | REG_USE3) 126#define REG_USE123 (REG_USE1 | REG_USE2 | REG_USE3) 127 128// TODO: #includes need a cleanup 129#ifndef INVALID_SREG 130#define INVALID_SREG (-1) 131#endif 132 133class BasicBlock; 134struct CallInfo; 135struct CompilationUnit; 136struct InlineMethod; 137class MIR; 138struct LIR; 139struct RegisterInfo; 140class DexFileMethodInliner; 141class MIRGraph; 142class MirMethodLoweringInfo; 143class Mir2Lir; 144 145typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, 146 const MethodReference& target_method, 147 uint32_t method_idx, uintptr_t direct_code, 148 uintptr_t direct_method, InvokeType type); 149 150typedef std::vector<uint8_t> CodeBuffer; 151 152struct UseDefMasks { 153 const ResourceMask* use_mask; // Resource mask for use. 154 const ResourceMask* def_mask; // Resource mask for def. 155}; 156 157struct AssemblyInfo { 158 LIR* pcrel_next; // Chain of LIR nodes needing pc relative fixups. 159}; 160 161struct LIR { 162 CodeOffset offset; // Offset of this instruction. 163 NarrowDexOffset dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words). 164 int16_t opcode; 165 LIR* next; 166 LIR* prev; 167 LIR* target; 168 struct { 169 unsigned int alias_info:17; // For Dalvik register disambiguation. 170 bool is_nop:1; // LIR is optimized away. 171 unsigned int size:4; // Note: size of encoded instruction is in bytes. 172 bool use_def_invalid:1; // If true, masks should not be used. 173 unsigned int generation:1; // Used to track visitation state during fixup pass. 174 unsigned int fixup:8; // Fixup kind. 175 } flags; 176 union { 177 UseDefMasks m; // Use & Def masks used during optimization. 178 AssemblyInfo a; // Instruction info used during assembly phase. 179 } u; 180 int32_t operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]. 181}; 182 183// Utility macros to traverse the LIR list. 184#define NEXT_LIR(lir) (lir->next) 185#define PREV_LIR(lir) (lir->prev) 186 187// Defines for alias_info (tracks Dalvik register references). 188#define DECODE_ALIAS_INFO_REG(X) (X & 0xffff) 189#define DECODE_ALIAS_INFO_WIDE_FLAG (0x10000) 190#define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0) 191#define ENCODE_ALIAS_INFO(REG, ISWIDE) (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0)) 192 193#define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8)) 194#define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \ 195 do { \ 196 low_reg = both_regs & 0xff; \ 197 high_reg = (both_regs >> 8) & 0xff; \ 198 } while (false) 199 200// Mask to denote sreg as the start of a 64-bit item. Must not interfere with low 16 bits. 201#define STARTING_WIDE_SREG 0x10000 202 203// TODO: replace these macros 204#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath)) 205#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath)) 206#define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath)) 207#define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath)) 208#define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath)) 209 210class Mir2Lir : public Backend { 211 public: 212 static constexpr bool kFailOnSizeError = true && kIsDebugBuild; 213 static constexpr bool kReportSizeError = true && kIsDebugBuild; 214 215 // TODO: If necessary, this could be made target-dependent. 216 static constexpr uint16_t kSmallSwitchThreshold = 5; 217 218 /* 219 * Auxiliary information describing the location of data embedded in the Dalvik 220 * byte code stream. 221 */ 222 struct EmbeddedData { 223 CodeOffset offset; // Code offset of data block. 224 const uint16_t* table; // Original dex data. 225 DexOffset vaddr; // Dalvik offset of parent opcode. 226 }; 227 228 struct FillArrayData : EmbeddedData { 229 int32_t size; 230 }; 231 232 struct SwitchTable : EmbeddedData { 233 LIR* anchor; // Reference instruction for relative offsets. 234 LIR** targets; // Array of case targets. 235 }; 236 237 /* Static register use counts */ 238 struct RefCounts { 239 int count; 240 int s_reg; 241 }; 242 243 /* 244 * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits) 245 * and native register storage. The primary purpose is to reuse previuosly 246 * loaded values, if possible, and otherwise to keep the value in register 247 * storage as long as possible. 248 * 249 * NOTE 1: wide_value refers to the width of the Dalvik value contained in 250 * this register (or pair). For example, a 64-bit register containing a 32-bit 251 * Dalvik value would have wide_value==false even though the storage container itself 252 * is wide. Similarly, a 32-bit register containing half of a 64-bit Dalvik value 253 * would have wide_value==true (and additionally would have its partner field set to the 254 * other half whose wide_value field would also be true. 255 * 256 * NOTE 2: In the case of a register pair, you can determine which of the partners 257 * is the low half by looking at the s_reg names. The high s_reg will equal low_sreg + 1. 258 * 259 * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value 260 * will be true and partner==self. s_reg refers to the low-order word of the Dalvik 261 * value, and the s_reg of the high word is implied (s_reg + 1). 262 * 263 * NOTE 4: The reg and is_temp fields should always be correct. If is_temp is false no 264 * other fields have meaning. [perhaps not true, wide should work for promoted regs?] 265 * If is_temp==true and live==false, no other fields have 266 * meaning. If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start 267 * and def_end describe the relationship between the temp register/register pair and 268 * the Dalvik value[s] described by s_reg/s_reg+1. 269 * 270 * The fields used_storage, master_storage and storage_mask are used to track allocation 271 * in light of potential aliasing. For example, consider Arm's d2, which overlaps s4 & s5. 272 * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of 273 * storage use. For s4, it would be 0x0000001; for s5 0x00000002. These values should not 274 * change once initialized. The "used_storage" field tracks current allocation status. 275 * Although each record contains this field, only the field from the largest member of 276 * an aliased group is used. In our case, it would be d2's. The master_storage pointer 277 * of d2, s4 and s5 would all point to d2's used_storage field. Each bit in a used_storage 278 * represents 32 bits of storage. d2's used_storage would be initialized to 0xfffffffc. 279 * Then, if we wanted to determine whether s4 could be allocated, we would "and" 280 * s4's storage_mask with s4's *master_storage. If the result is zero, s4 is free and 281 * to allocate: *master_storage |= storage_mask. To free, *master_storage &= ~storage_mask. 282 * 283 * For an X86 vector register example, storage_mask would be: 284 * 0x00000001 for 32-bit view of xmm1 285 * 0x00000003 for 64-bit view of xmm1 286 * 0x0000000f for 128-bit view of xmm1 287 * 0x000000ff for 256-bit view of ymm1 // future expansion, if needed 288 * 0x0000ffff for 512-bit view of ymm1 // future expansion, if needed 289 * 0xffffffff for 1024-bit view of ymm1 // future expansion, if needed 290 * 291 * The "liveness" of a register is handled in a similar way. The liveness_ storage is 292 * held in the widest member of an aliased set. Note, though, that for a temp register to 293 * reused as live, it must both be marked live and the associated SReg() must match the 294 * desired s_reg. This gets a little complicated when dealing with aliased registers. All 295 * members of an aliased set will share the same liveness flags, but each will individually 296 * maintain s_reg_. In this way we can know that at least one member of an 297 * aliased set is live, but will only fully match on the appropriate alias view. For example, 298 * if Arm d1 is live as a double and has s_reg_ set to Dalvik v8 (which also implies v9 299 * because it is wide), its aliases s2 and s3 will show as live, but will have 300 * s_reg_ == INVALID_SREG. An attempt to later AllocLiveReg() of v9 with a single-precision 301 * view will fail because although s3's liveness bit is set, its s_reg_ will not match v9. 302 * This will cause all members of the aliased set to be clobbered and AllocLiveReg() will 303 * report that v9 is currently not live as a single (which is what we want). 304 * 305 * NOTE: the x86 usage is still somewhat in flux. There are competing notions of how 306 * to treat xmm registers: 307 * 1. Treat them all as 128-bits wide, but denote how much data used via bytes field. 308 * o This more closely matches reality, but means you'd need to be able to get 309 * to the associated RegisterInfo struct to figure out how it's being used. 310 * o This is how 64-bit core registers will be used - always 64 bits, but the 311 * "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage. 312 * 2. View the xmm registers based on contents. 313 * o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would 314 * be a k64BitVector. 315 * o Note that the two uses above would be considered distinct registers (but with 316 * the aliasing mechanism, we could detect interference). 317 * o This is how aliased double and single float registers will be handled on 318 * Arm and MIPS. 319 * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and 320 * mechanism 2 for aliased float registers and x86 vector registers. 321 */ 322 class RegisterInfo : public ArenaObject<kArenaAllocRegAlloc> { 323 public: 324 RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll); 325 ~RegisterInfo() {} 326 327 static const uint32_t k32SoloStorageMask = 0x00000001; 328 static const uint32_t kLowSingleStorageMask = 0x00000001; 329 static const uint32_t kHighSingleStorageMask = 0x00000002; 330 static const uint32_t k64SoloStorageMask = 0x00000003; 331 static const uint32_t k128SoloStorageMask = 0x0000000f; 332 static const uint32_t k256SoloStorageMask = 0x000000ff; 333 static const uint32_t k512SoloStorageMask = 0x0000ffff; 334 static const uint32_t k1024SoloStorageMask = 0xffffffff; 335 336 bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; } 337 void MarkInUse() { master_->used_storage_ |= storage_mask_; } 338 void MarkFree() { master_->used_storage_ &= ~storage_mask_; } 339 // No part of the containing storage is live in this view. 340 bool IsDead() { return (master_->liveness_ & storage_mask_) == 0; } 341 // Liveness of this view matches. Note: not equivalent to !IsDead(). 342 bool IsLive() { return (master_->liveness_ & storage_mask_) == storage_mask_; } 343 void MarkLive(int s_reg) { 344 // TODO: Anything useful to assert here? 345 s_reg_ = s_reg; 346 master_->liveness_ |= storage_mask_; 347 } 348 void MarkDead() { 349 if (SReg() != INVALID_SREG) { 350 s_reg_ = INVALID_SREG; 351 master_->liveness_ &= ~storage_mask_; 352 ResetDefBody(); 353 } 354 } 355 RegStorage GetReg() { return reg_; } 356 void SetReg(RegStorage reg) { reg_ = reg; } 357 bool IsTemp() { return is_temp_; } 358 void SetIsTemp(bool val) { is_temp_ = val; } 359 bool IsWide() { return wide_value_; } 360 void SetIsWide(bool val) { 361 wide_value_ = val; 362 if (!val) { 363 // If not wide, reset partner to self. 364 SetPartner(GetReg()); 365 } 366 } 367 bool IsDirty() { return dirty_; } 368 void SetIsDirty(bool val) { dirty_ = val; } 369 RegStorage Partner() { return partner_; } 370 void SetPartner(RegStorage partner) { partner_ = partner; } 371 int SReg() { return (!IsTemp() || IsLive()) ? s_reg_ : INVALID_SREG; } 372 const ResourceMask& DefUseMask() { return def_use_mask_; } 373 void SetDefUseMask(const ResourceMask& def_use_mask) { def_use_mask_ = def_use_mask; } 374 RegisterInfo* Master() { return master_; } 375 void SetMaster(RegisterInfo* master) { 376 master_ = master; 377 if (master != this) { 378 master_->aliased_ = true; 379 DCHECK(alias_chain_ == nullptr); 380 alias_chain_ = master_->alias_chain_; 381 master_->alias_chain_ = this; 382 } 383 } 384 bool IsAliased() { return aliased_; } 385 RegisterInfo* GetAliasChain() { return alias_chain_; } 386 uint32_t StorageMask() { return storage_mask_; } 387 void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; } 388 LIR* DefStart() { return def_start_; } 389 void SetDefStart(LIR* def_start) { def_start_ = def_start; } 390 LIR* DefEnd() { return def_end_; } 391 void SetDefEnd(LIR* def_end) { def_end_ = def_end; } 392 void ResetDefBody() { def_start_ = def_end_ = nullptr; } 393 // Find member of aliased set matching storage_used; return nullptr if none. 394 RegisterInfo* FindMatchingView(uint32_t storage_used) { 395 RegisterInfo* res = Master(); 396 for (; res != nullptr; res = res->GetAliasChain()) { 397 if (res->StorageMask() == storage_used) 398 break; 399 } 400 return res; 401 } 402 403 private: 404 RegStorage reg_; 405 bool is_temp_; // Can allocate as temp? 406 bool wide_value_; // Holds a Dalvik wide value (either itself, or part of a pair). 407 bool dirty_; // If live, is it dirty? 408 bool aliased_; // Is this the master for other aliased RegisterInfo's? 409 RegStorage partner_; // If wide_value, other reg of pair or self if 64-bit register. 410 int s_reg_; // Name of live value. 411 ResourceMask def_use_mask_; // Resources for this element. 412 uint32_t used_storage_; // 1 bit per 4 bytes of storage. Unused by aliases. 413 uint32_t liveness_; // 1 bit per 4 bytes of storage. Unused by aliases. 414 RegisterInfo* master_; // Pointer to controlling storage mask. 415 uint32_t storage_mask_; // Track allocation of sub-units. 416 LIR *def_start_; // Starting inst in last def sequence. 417 LIR *def_end_; // Ending inst in last def sequence. 418 RegisterInfo* alias_chain_; // Chain of aliased registers. 419 }; 420 421 class RegisterPool : public DeletableArenaObject<kArenaAllocRegAlloc> { 422 public: 423 RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena, 424 const ArrayRef<const RegStorage>& core_regs, 425 const ArrayRef<const RegStorage>& core64_regs, 426 const ArrayRef<const RegStorage>& sp_regs, 427 const ArrayRef<const RegStorage>& dp_regs, 428 const ArrayRef<const RegStorage>& reserved_regs, 429 const ArrayRef<const RegStorage>& reserved64_regs, 430 const ArrayRef<const RegStorage>& core_temps, 431 const ArrayRef<const RegStorage>& core64_temps, 432 const ArrayRef<const RegStorage>& sp_temps, 433 const ArrayRef<const RegStorage>& dp_temps); 434 ~RegisterPool() {} 435 void ResetNextTemp() { 436 next_core_reg_ = 0; 437 next_sp_reg_ = 0; 438 next_dp_reg_ = 0; 439 } 440 ArenaVector<RegisterInfo*> core_regs_; 441 int next_core_reg_; 442 ArenaVector<RegisterInfo*> core64_regs_; 443 int next_core64_reg_; 444 ArenaVector<RegisterInfo*> sp_regs_; // Single precision float. 445 int next_sp_reg_; 446 ArenaVector<RegisterInfo*> dp_regs_; // Double precision float. 447 int next_dp_reg_; 448 ArenaVector<RegisterInfo*>* ref_regs_; // Points to core_regs_ or core64_regs_ 449 int* next_ref_reg_; 450 451 private: 452 Mir2Lir* const m2l_; 453 }; 454 455 struct PromotionMap { 456 RegLocationType core_location:3; 457 uint8_t core_reg; 458 RegLocationType fp_location:3; 459 uint8_t fp_reg; 460 bool first_in_pair; 461 }; 462 463 // 464 // Slow paths. This object is used generate a sequence of code that is executed in the 465 // slow path. For example, resolving a string or class is slow as it will only be executed 466 // once (after that it is resolved and doesn't need to be done again). We want slow paths 467 // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward 468 // branch over them. 469 // 470 // If you want to create a slow path, declare a class derived from LIRSlowPath and provide 471 // the Compile() function that will be called near the end of the code generated by the 472 // method. 473 // 474 // The basic flow for a slow path is: 475 // 476 // CMP reg, #value 477 // BEQ fromfast 478 // cont: 479 // ... 480 // fast path code 481 // ... 482 // more code 483 // ... 484 // RETURN 485 /// 486 // fromfast: 487 // ... 488 // slow path code 489 // ... 490 // B cont 491 // 492 // So you see we need two labels and two branches. The first branch (called fromfast) is 493 // the conditional branch to the slow path code. The second label (called cont) is used 494 // as an unconditional branch target for getting back to the code after the slow path 495 // has completed. 496 // 497 498 class LIRSlowPath : public ArenaObject<kArenaAllocSlowPaths> { 499 public: 500 LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast, 501 LIR* cont = nullptr) : 502 m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) { 503 } 504 virtual ~LIRSlowPath() {} 505 virtual void Compile() = 0; 506 507 LIR *GetContinuationLabel() { 508 return cont_; 509 } 510 511 LIR *GetFromFast() { 512 return fromfast_; 513 } 514 515 protected: 516 LIR* GenerateTargetLabel(int opcode = kPseudoTargetLabel); 517 518 Mir2Lir* const m2l_; 519 CompilationUnit* const cu_; 520 const DexOffset current_dex_pc_; 521 LIR* const fromfast_; 522 LIR* const cont_; 523 }; 524 525 // Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_. 526 class ScopedMemRefType { 527 public: 528 ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type) 529 : m2l_(m2l), 530 old_mem_ref_type_(m2l->mem_ref_type_) { 531 m2l_->mem_ref_type_ = new_mem_ref_type; 532 } 533 534 ~ScopedMemRefType() { 535 m2l_->mem_ref_type_ = old_mem_ref_type_; 536 } 537 538 private: 539 Mir2Lir* const m2l_; 540 ResourceMask::ResourceBit old_mem_ref_type_; 541 542 DISALLOW_COPY_AND_ASSIGN(ScopedMemRefType); 543 }; 544 545 virtual ~Mir2Lir() {} 546 547 /** 548 * @brief Decodes the LIR offset. 549 * @return Returns the scaled offset of LIR. 550 */ 551 virtual size_t GetInstructionOffset(LIR* lir); 552 553 int32_t s4FromSwitchData(const void* switch_data) { 554 return *reinterpret_cast<const int32_t*>(switch_data); 555 } 556 557 /* 558 * TODO: this is a trace JIT vestige, and its use should be reconsidered. At the time 559 * it was introduced, it was intended to be a quick best guess of type without having to 560 * take the time to do type analysis. Currently, though, we have a much better idea of 561 * the types of Dalvik virtual registers. Instead of using this for a best guess, why not 562 * just use our knowledge of type to select the most appropriate register class? 563 */ 564 RegisterClass RegClassBySize(OpSize size) { 565 if (size == kReference) { 566 return kRefReg; 567 } else { 568 return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || 569 size == kSignedByte) ? kCoreReg : kAnyReg; 570 } 571 } 572 573 size_t CodeBufferSizeInBytes() { 574 return code_buffer_.size() / sizeof(code_buffer_[0]); 575 } 576 577 static bool IsPseudoLirOp(int opcode) { 578 return (opcode < 0); 579 } 580 581 /* 582 * LIR operands are 32-bit integers. Sometimes, (especially for managing 583 * instructions which require PC-relative fixups), we need the operands to carry 584 * pointers. To do this, we assign these pointers an index in pointer_storage_, and 585 * hold that index in the operand array. 586 * TUNING: If use of these utilities becomes more common on 32-bit builds, it 587 * may be worth conditionally-compiling a set of identity functions here. 588 */ 589 uint32_t WrapPointer(void* pointer) { 590 uint32_t res = pointer_storage_.size(); 591 pointer_storage_.push_back(pointer); 592 return res; 593 } 594 595 void* UnwrapPointer(size_t index) { 596 return pointer_storage_[index]; 597 } 598 599 // strdup(), but allocates from the arena. 600 char* ArenaStrdup(const char* str) { 601 size_t len = strlen(str) + 1; 602 char* res = reinterpret_cast<char*>(arena_->Alloc(len, kArenaAllocMisc)); 603 if (res != NULL) { 604 strncpy(res, str, len); 605 } 606 return res; 607 } 608 609 // Shared by all targets - implemented in codegen_util.cc 610 void AppendLIR(LIR* lir); 611 void InsertLIRBefore(LIR* current_lir, LIR* new_lir); 612 void InsertLIRAfter(LIR* current_lir, LIR* new_lir); 613 614 /** 615 * @brief Provides the maximum number of compiler temporaries that the backend can/wants 616 * to place in a frame. 617 * @return Returns the maximum number of compiler temporaries. 618 */ 619 size_t GetMaxPossibleCompilerTemps() const; 620 621 /** 622 * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries. 623 * @return Returns the size in bytes for space needed for compiler temporary spill region. 624 */ 625 size_t GetNumBytesForCompilerTempSpillRegion(); 626 627 DexOffset GetCurrentDexPc() const { 628 return current_dalvik_offset_; 629 } 630 631 RegisterClass ShortyToRegClass(char shorty_type); 632 RegisterClass LocToRegClass(RegLocation loc); 633 int ComputeFrameSize(); 634 virtual void Materialize(); 635 virtual CompiledMethod* GetCompiledMethod(); 636 void MarkSafepointPC(LIR* inst); 637 void MarkSafepointPCAfter(LIR* after); 638 void SetupResourceMasks(LIR* lir); 639 void SetMemRefType(LIR* lir, bool is_load, int mem_type); 640 void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit); 641 void SetupRegMask(ResourceMask* mask, int reg); 642 void ClearRegMask(ResourceMask* mask, int reg); 643 void DumpLIRInsn(LIR* arg, unsigned char* base_addr); 644 void EliminateLoad(LIR* lir, int reg_id); 645 void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type); 646 void DumpPromotionMap(); 647 void CodegenDump(); 648 LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0, 649 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL); 650 LIR* NewLIR0(int opcode); 651 LIR* NewLIR1(int opcode, int dest); 652 LIR* NewLIR2(int opcode, int dest, int src1); 653 LIR* NewLIR2NoDest(int opcode, int src, int info); 654 LIR* NewLIR3(int opcode, int dest, int src1, int src2); 655 LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info); 656 LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2); 657 LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta); 658 LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi); 659 LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method); 660 LIR* ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx); 661 LIR* AddWordData(LIR* *constant_list_p, int value); 662 LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi); 663 void ProcessSwitchTables(); 664 void DumpSparseSwitchTable(const uint16_t* table); 665 void DumpPackedSwitchTable(const uint16_t* table); 666 void MarkBoundary(DexOffset offset, const char* inst_str); 667 void NopLIR(LIR* lir); 668 void UnlinkLIR(LIR* lir); 669 bool EvaluateBranch(Instruction::Code opcode, int src1, int src2); 670 bool IsInexpensiveConstant(RegLocation rl_src); 671 ConditionCode FlipComparisonOrder(ConditionCode before); 672 ConditionCode NegateComparison(ConditionCode before); 673 virtual void InstallLiteralPools(); 674 void InstallSwitchTables(); 675 void InstallFillArrayData(); 676 bool VerifyCatchEntries(); 677 void CreateMappingTables(); 678 void CreateNativeGcMap(); 679 int AssignLiteralOffset(CodeOffset offset); 680 int AssignSwitchTablesOffset(CodeOffset offset); 681 int AssignFillArrayDataOffset(CodeOffset offset); 682 virtual LIR* InsertCaseLabel(DexOffset vaddr, int keyVal); 683 virtual void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec); 684 void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec); 685 686 // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated. 687 virtual RegLocation NarrowRegLoc(RegLocation loc); 688 689 // Shared by all targets - implemented in local_optimizations.cc 690 void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src); 691 void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir); 692 void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir); 693 virtual void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir); 694 695 // Shared by all targets - implemented in ralloc_util.cc 696 int GetSRegHi(int lowSreg); 697 bool LiveOut(int s_reg); 698 void SimpleRegAlloc(); 699 void ResetRegPool(); 700 void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num); 701 void DumpRegPool(ArenaVector<RegisterInfo*>* regs); 702 void DumpCoreRegPool(); 703 void DumpFpRegPool(); 704 void DumpRegPools(); 705 /* Mark a temp register as dead. Does not affect allocation state. */ 706 void Clobber(RegStorage reg); 707 void ClobberSReg(int s_reg); 708 void ClobberAliases(RegisterInfo* info, uint32_t clobber_mask); 709 int SRegToPMap(int s_reg); 710 void RecordCorePromotion(RegStorage reg, int s_reg); 711 RegStorage AllocPreservedCoreReg(int s_reg); 712 void RecordFpPromotion(RegStorage reg, int s_reg); 713 RegStorage AllocPreservedFpReg(int s_reg); 714 virtual RegStorage AllocPreservedSingle(int s_reg); 715 virtual RegStorage AllocPreservedDouble(int s_reg); 716 RegStorage AllocTempBody(ArenaVector<RegisterInfo*>& regs, int* next_temp, bool required); 717 virtual RegStorage AllocTemp(bool required = true); 718 virtual RegStorage AllocTempWide(bool required = true); 719 virtual RegStorage AllocTempRef(bool required = true); 720 virtual RegStorage AllocTempSingle(bool required = true); 721 virtual RegStorage AllocTempDouble(bool required = true); 722 virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class, bool required = true); 723 virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class, bool required = true); 724 void FlushReg(RegStorage reg); 725 void FlushRegWide(RegStorage reg); 726 RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide); 727 RegStorage FindLiveReg(ArenaVector<RegisterInfo*>& regs, int s_reg); 728 virtual void FreeTemp(RegStorage reg); 729 virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free); 730 virtual bool IsLive(RegStorage reg); 731 virtual bool IsTemp(RegStorage reg); 732 bool IsPromoted(RegStorage reg); 733 bool IsDirty(RegStorage reg); 734 virtual void LockTemp(RegStorage reg); 735 void ResetDef(RegStorage reg); 736 void NullifyRange(RegStorage reg, int s_reg); 737 void MarkDef(RegLocation rl, LIR *start, LIR *finish); 738 void MarkDefWide(RegLocation rl, LIR *start, LIR *finish); 739 void ResetDefLoc(RegLocation rl); 740 void ResetDefLocWide(RegLocation rl); 741 void ResetDefTracking(); 742 void ClobberAllTemps(); 743 void FlushSpecificReg(RegisterInfo* info); 744 void FlushAllRegs(); 745 bool RegClassMatches(int reg_class, RegStorage reg); 746 void MarkLive(RegLocation loc); 747 void MarkTemp(RegStorage reg); 748 void UnmarkTemp(RegStorage reg); 749 void MarkWide(RegStorage reg); 750 void MarkNarrow(RegStorage reg); 751 void MarkClean(RegLocation loc); 752 void MarkDirty(RegLocation loc); 753 void MarkInUse(RegStorage reg); 754 bool CheckCorePoolSanity(); 755 virtual RegLocation UpdateLoc(RegLocation loc); 756 virtual RegLocation UpdateLocWide(RegLocation loc); 757 RegLocation UpdateRawLoc(RegLocation loc); 758 759 /** 760 * @brief Used to prepare a register location to receive a wide value. 761 * @see EvalLoc 762 * @param loc the location where the value will be stored. 763 * @param reg_class Type of register needed. 764 * @param update Whether the liveness information should be updated. 765 * @return Returns the properly typed temporary in physical register pairs. 766 */ 767 virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update); 768 769 /** 770 * @brief Used to prepare a register location to receive a value. 771 * @param loc the location where the value will be stored. 772 * @param reg_class Type of register needed. 773 * @param update Whether the liveness information should be updated. 774 * @return Returns the properly typed temporary in physical register. 775 */ 776 virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update); 777 778 void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs); 779 void DumpCounts(const RefCounts* arr, int size, const char* msg); 780 void DoPromotion(); 781 int VRegOffset(int v_reg); 782 int SRegOffset(int s_reg); 783 RegLocation GetReturnWide(RegisterClass reg_class); 784 RegLocation GetReturn(RegisterClass reg_class); 785 RegisterInfo* GetRegInfo(RegStorage reg); 786 787 // Shared by all targets - implemented in gen_common.cc. 788 void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr); 789 virtual bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 790 RegLocation rl_src, RegLocation rl_dest, int lit); 791 bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit); 792 bool HandleEasyFloatingPointDiv(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2); 793 virtual void HandleSlowPaths(); 794 void GenBarrier(); 795 void GenDivZeroException(); 796 // c_code holds condition code that's generated from testing divisor against 0. 797 void GenDivZeroCheck(ConditionCode c_code); 798 // reg holds divisor. 799 void GenDivZeroCheck(RegStorage reg); 800 void GenArrayBoundsCheck(RegStorage index, RegStorage length); 801 void GenArrayBoundsCheck(int32_t index, RegStorage length); 802 LIR* GenNullCheck(RegStorage reg); 803 void MarkPossibleNullPointerException(int opt_flags); 804 void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after); 805 void MarkPossibleStackOverflowException(); 806 void ForceImplicitNullCheck(RegStorage reg, int opt_flags); 807 LIR* GenNullCheck(RegStorage m_reg, int opt_flags); 808 LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags); 809 virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags); 810 void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, 811 LIR* taken); 812 void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken); 813 virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src); 814 void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 815 RegLocation rl_src); 816 void GenNewArray(uint32_t type_idx, RegLocation rl_dest, 817 RegLocation rl_src); 818 void GenFilledNewArray(CallInfo* info); 819 void GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src); 820 void GenSput(MIR* mir, RegLocation rl_src, OpSize size); 821 // Get entrypoints are specific for types, size alone is not sufficient to safely infer 822 // entrypoint. 823 void GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type); 824 void GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type, 825 RegLocation rl_dest, RegLocation rl_obj); 826 void GenIPut(MIR* mir, int opt_flags, OpSize size, 827 RegLocation rl_src, RegLocation rl_obj); 828 void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 829 RegLocation rl_src); 830 831 void GenConstClass(uint32_t type_idx, RegLocation rl_dest); 832 void GenConstString(uint32_t string_idx, RegLocation rl_dest); 833 void GenNewInstance(uint32_t type_idx, RegLocation rl_dest); 834 void GenThrow(RegLocation rl_src); 835 void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); 836 void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src); 837 void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 838 RegLocation rl_src1, RegLocation rl_src2); 839 virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 840 RegLocation rl_src1, RegLocation rl_shift); 841 void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, 842 RegLocation rl_src, int lit); 843 virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 844 RegLocation rl_src1, RegLocation rl_src2, int flags); 845 void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src); 846 virtual void GenSuspendTest(int opt_flags); 847 virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target); 848 849 // This will be overridden by x86 implementation. 850 virtual void GenConstWide(RegLocation rl_dest, int64_t value); 851 virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 852 RegLocation rl_src1, RegLocation rl_src2, int flags); 853 854 // Shared by all targets - implemented in gen_invoke.cc. 855 LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc, 856 bool use_link = true); 857 RegStorage CallHelperSetup(QuickEntrypointEnum trampoline); 858 859 void CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc); 860 void CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc); 861 void CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0, bool safepoint_pc); 862 void CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0, 863 bool safepoint_pc); 864 void CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1, 865 bool safepoint_pc); 866 void CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0, RegLocation arg1, 867 bool safepoint_pc); 868 void CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0, int arg1, 869 bool safepoint_pc); 870 void CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1, 871 bool safepoint_pc); 872 void CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1, 873 bool safepoint_pc); 874 void CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc); 875 void CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0, 876 bool safepoint_pc); 877 void CallRuntimeHelperRegMethodRegLocation(QuickEntrypointEnum trampoline, RegStorage arg0, 878 RegLocation arg2, bool safepoint_pc); 879 void CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0, 880 RegLocation arg1, bool safepoint_pc); 881 void CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0, RegStorage arg1, 882 bool safepoint_pc); 883 void CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, 884 RegStorage arg1, int arg2, bool safepoint_pc); 885 void CallRuntimeHelperImmMethodRegLocation(QuickEntrypointEnum trampoline, int arg0, 886 RegLocation arg2, bool safepoint_pc); 887 void CallRuntimeHelperImmMethodImm(QuickEntrypointEnum trampoline, int arg0, int arg2, 888 bool safepoint_pc); 889 void CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0, 890 RegLocation arg1, RegLocation arg2, 891 bool safepoint_pc); 892 void CallRuntimeHelperRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline, 893 RegLocation arg0, RegLocation arg1, 894 RegLocation arg2, 895 bool safepoint_pc); 896 void GenInvoke(CallInfo* info); 897 void GenInvokeNoInline(CallInfo* info); 898 virtual NextCallInsn GetNextSDCallInsn(); 899 900 /* 901 * @brief Generate the actual call insn based on the method info. 902 * @param method_info the lowering info for the method call. 903 * @returns Call instruction 904 */ 905 virtual LIR* GenCallInsn(const MirMethodLoweringInfo& method_info); 906 907 virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method); 908 virtual int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel, 909 NextCallInsn next_call_insn, 910 const MethodReference& target_method, 911 uint32_t vtable_idx, 912 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 913 bool skip_this); 914 virtual int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel, 915 NextCallInsn next_call_insn, 916 const MethodReference& target_method, 917 uint32_t vtable_idx, 918 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 919 bool skip_this); 920 921 /** 922 * @brief Used to determine the register location of destination. 923 * @details This is needed during generation of inline intrinsics because it finds destination 924 * of return, 925 * either the physical register or the target of move-result. 926 * @param info Information about the invoke. 927 * @return Returns the destination location. 928 */ 929 RegLocation InlineTarget(CallInfo* info); 930 931 /** 932 * @brief Used to determine the wide register location of destination. 933 * @see InlineTarget 934 * @param info Information about the invoke. 935 * @return Returns the destination location. 936 */ 937 RegLocation InlineTargetWide(CallInfo* info); 938 939 bool GenInlinedReferenceGetReferent(CallInfo* info); 940 virtual bool GenInlinedCharAt(CallInfo* info); 941 bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty); 942 virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size); 943 bool GenInlinedReverseBytes(CallInfo* info, OpSize size); 944 virtual bool GenInlinedAbsInt(CallInfo* info); 945 virtual bool GenInlinedAbsLong(CallInfo* info); 946 virtual bool GenInlinedAbsFloat(CallInfo* info) = 0; 947 virtual bool GenInlinedAbsDouble(CallInfo* info) = 0; 948 bool GenInlinedFloatCvt(CallInfo* info); 949 bool GenInlinedDoubleCvt(CallInfo* info); 950 virtual bool GenInlinedCeil(CallInfo* info); 951 virtual bool GenInlinedFloor(CallInfo* info); 952 virtual bool GenInlinedRint(CallInfo* info); 953 virtual bool GenInlinedRound(CallInfo* info, bool is_double); 954 virtual bool GenInlinedArrayCopyCharArray(CallInfo* info); 955 virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based); 956 bool GenInlinedStringCompareTo(CallInfo* info); 957 virtual bool GenInlinedCurrentThread(CallInfo* info); 958 bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile); 959 bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object, 960 bool is_volatile, bool is_ordered); 961 virtual int LoadArgRegs(CallInfo* info, int call_state, 962 NextCallInsn next_call_insn, 963 const MethodReference& target_method, 964 uint32_t vtable_idx, 965 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 966 bool skip_this); 967 968 // Shared by all targets - implemented in gen_loadstore.cc. 969 RegLocation LoadCurrMethod(); 970 void LoadCurrMethodDirect(RegStorage r_tgt); 971 virtual LIR* LoadConstant(RegStorage r_dest, int value); 972 // Natural word size. 973 virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { 974 return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile); 975 } 976 // Load 8 bits, regardless of target. 977 virtual LIR* Load8Disp(RegStorage r_base, int displacement, RegStorage r_dest) { 978 return LoadBaseDisp(r_base, displacement, r_dest, kSignedByte, kNotVolatile); 979 } 980 // Load 32 bits, regardless of target. 981 virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) { 982 return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile); 983 } 984 // Load a reference at base + displacement and decompress into register. 985 virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest, 986 VolatileKind is_volatile) { 987 return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile); 988 } 989 // Load a reference at base + index and decompress into register. 990 virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 991 int scale) { 992 return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference); 993 } 994 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 995 virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); 996 // Load Dalvik value with 64-bit memory storage. 997 virtual RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind); 998 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 999 virtual void LoadValueDirect(RegLocation rl_src, RegStorage r_dest); 1000 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 1001 virtual void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest); 1002 // Load Dalvik value with 64-bit memory storage. 1003 virtual void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest); 1004 // Load Dalvik value with 64-bit memory storage. 1005 virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest); 1006 // Store an item of natural word size. 1007 virtual LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) { 1008 return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile); 1009 } 1010 // Store an uncompressed reference into a compressed 32-bit container. 1011 virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, 1012 VolatileKind is_volatile) { 1013 return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile); 1014 } 1015 // Store an uncompressed reference into a compressed 32-bit container by index. 1016 virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 1017 int scale) { 1018 return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference); 1019 } 1020 // Store 32 bits, regardless of target. 1021 virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) { 1022 return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile); 1023 } 1024 1025 /** 1026 * @brief Used to do the final store in the destination as per bytecode semantics. 1027 * @param rl_dest The destination dalvik register location. 1028 * @param rl_src The source register location. Can be either physical register or dalvik register. 1029 */ 1030 virtual void StoreValue(RegLocation rl_dest, RegLocation rl_src); 1031 1032 /** 1033 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1034 * @see StoreValue 1035 * @param rl_dest The destination dalvik register location. 1036 * @param rl_src The source register location. Can be either physical register or dalvik 1037 * register. 1038 */ 1039 virtual void StoreValueWide(RegLocation rl_dest, RegLocation rl_src); 1040 1041 /** 1042 * @brief Used to do the final store to a destination as per bytecode semantics. 1043 * @see StoreValue 1044 * @param rl_dest The destination dalvik register location. 1045 * @param rl_src The source register location. It must be kLocPhysReg 1046 * 1047 * This is used for x86 two operand computations, where we have computed the correct 1048 * register value that now needs to be properly registered. This is used to avoid an 1049 * extra register copy that would result if StoreValue was called. 1050 */ 1051 virtual void StoreFinalValue(RegLocation rl_dest, RegLocation rl_src); 1052 1053 /** 1054 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1055 * @see StoreValueWide 1056 * @param rl_dest The destination dalvik register location. 1057 * @param rl_src The source register location. It must be kLocPhysReg 1058 * 1059 * This is used for x86 two operand computations, where we have computed the correct 1060 * register values that now need to be properly registered. This is used to avoid an 1061 * extra pair of register copies that would result if StoreValueWide was called. 1062 */ 1063 virtual void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src); 1064 1065 // Shared by all targets - implemented in mir_to_lir.cc. 1066 void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list); 1067 virtual void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1068 bool MethodBlockCodeGen(BasicBlock* bb); 1069 bool SpecialMIR2LIR(const InlineMethod& special); 1070 virtual void MethodMIR2LIR(); 1071 // Update LIR for verbose listings. 1072 void UpdateLIROffsets(); 1073 1074 /** 1075 * @brief Mark a garbage collection card. Skip if the stored value is null. 1076 * @param val_reg the register holding the stored value to check against null. 1077 * @param tgt_addr_reg the address of the object or array where the value was stored. 1078 */ 1079 void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg); 1080 1081 /* 1082 * @brief Load the address of the dex method into the register. 1083 * @param target_method The MethodReference of the method to be invoked. 1084 * @param type How the method will be invoked. 1085 * @param register that will contain the code address. 1086 * @note register will be passed to TargetReg to get physical register. 1087 */ 1088 void LoadCodeAddress(const MethodReference& target_method, InvokeType type, 1089 SpecialTargetRegister symbolic_reg); 1090 1091 /* 1092 * @brief Load the Method* of a dex method into the register. 1093 * @param target_method The MethodReference of the method to be invoked. 1094 * @param type How the method will be invoked. 1095 * @param register that will contain the code address. 1096 * @note register will be passed to TargetReg to get physical register. 1097 */ 1098 virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type, 1099 SpecialTargetRegister symbolic_reg); 1100 1101 /* 1102 * @brief Load the Class* of a Dex Class type into the register. 1103 * @param dex DexFile that contains the class type. 1104 * @param type How the method will be invoked. 1105 * @param register that will contain the code address. 1106 * @note register will be passed to TargetReg to get physical register. 1107 */ 1108 virtual void LoadClassType(const DexFile& dex_file, uint32_t type_idx, 1109 SpecialTargetRegister symbolic_reg); 1110 1111 // Routines that work for the generic case, but may be overriden by target. 1112 /* 1113 * @brief Compare memory to immediate, and branch if condition true. 1114 * @param cond The condition code that when true will branch to the target. 1115 * @param temp_reg A temporary register that can be used if compare to memory is not 1116 * supported by the architecture. 1117 * @param base_reg The register holding the base address. 1118 * @param offset The offset from the base. 1119 * @param check_value The immediate to compare to. 1120 * @param target branch target (or nullptr) 1121 * @param compare output for getting LIR for comparison (or nullptr) 1122 * @returns The branch instruction that was generated. 1123 */ 1124 virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 1125 int offset, int check_value, LIR* target, LIR** compare); 1126 1127 // Required for target - codegen helpers. 1128 virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 1129 RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1130 virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1131 virtual void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, 1132 int32_t constant) = 0; 1133 virtual void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, 1134 int64_t constant) = 0; 1135 virtual LIR* CheckSuspendUsingLoad() = 0; 1136 1137 virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0; 1138 1139 virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, 1140 OpSize size, VolatileKind is_volatile) = 0; 1141 virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 1142 int scale, OpSize size) = 0; 1143 virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0; 1144 virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0; 1145 virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, 1146 OpSize size, VolatileKind is_volatile) = 0; 1147 virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 1148 int scale, OpSize size) = 0; 1149 1150 /** 1151 * @brief Unconditionally mark a garbage collection card. 1152 * @param tgt_addr_reg the address of the object or array where the value was stored. 1153 */ 1154 virtual void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) = 0; 1155 1156 // Required for target - register utilities. 1157 1158 bool IsSameReg(RegStorage reg1, RegStorage reg2) { 1159 RegisterInfo* info1 = GetRegInfo(reg1); 1160 RegisterInfo* info2 = GetRegInfo(reg2); 1161 return (info1->Master() == info2->Master() && 1162 (info1->StorageMask() & info2->StorageMask()) != 0); 1163 } 1164 1165 static constexpr bool IsWide(OpSize size) { 1166 return size == k64 || size == kDouble; 1167 } 1168 1169 static constexpr bool IsRef(OpSize size) { 1170 return size == kReference; 1171 } 1172 1173 /** 1174 * @brief Portable way of getting special registers from the backend. 1175 * @param reg Enumeration describing the purpose of the register. 1176 * @return Return the #RegStorage corresponding to the given purpose @p reg. 1177 * @note This function is currently allowed to return any suitable view of the registers 1178 * (e.g. this could be 64-bit solo or 32-bit solo for 64-bit backends). 1179 */ 1180 virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0; 1181 1182 /** 1183 * @brief Portable way of getting special registers from the backend. 1184 * @param reg Enumeration describing the purpose of the register. 1185 * @param wide_kind What kind of view of the special register is required. 1186 * @return Return the #RegStorage corresponding to the given purpose @p reg. 1187 * 1188 * @note For 32b system, wide (kWide) views only make sense for the argument registers and the 1189 * return. In that case, this function should return a pair where the first component of 1190 * the result will be the indicated special register. 1191 */ 1192 virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) { 1193 if (wide_kind == kWide) { 1194 DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg)); 1195 static_assert((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) && 1196 (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) && 1197 (kArg7 == kArg6 + 1), "kargs range unexpected"); 1198 static_assert((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) && 1199 (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) && 1200 (kFArg7 == kFArg6 + 1) && (kFArg8 == kFArg7 + 1) && (kFArg9 == kFArg8 + 1) && 1201 (kFArg10 == kFArg9 + 1) && (kFArg11 == kFArg10 + 1) && 1202 (kFArg12 == kFArg11 + 1) && (kFArg13 == kFArg12 + 1) && 1203 (kFArg14 == kFArg13 + 1) && (kFArg15 == kFArg14 + 1), 1204 "kfargs range unexpected"); 1205 static_assert(kRet1 == kRet0 + 1, "kret range unexpected"); 1206 return RegStorage::MakeRegPair(TargetReg(reg), 1207 TargetReg(static_cast<SpecialTargetRegister>(reg + 1))); 1208 } else { 1209 return TargetReg(reg); 1210 } 1211 } 1212 1213 /** 1214 * @brief Portable way of getting a special register for storing a pointer. 1215 * @see TargetReg() 1216 */ 1217 virtual RegStorage TargetPtrReg(SpecialTargetRegister reg) { 1218 return TargetReg(reg); 1219 } 1220 1221 // Get a reg storage corresponding to the wide & ref flags of the reg location. 1222 virtual RegStorage TargetReg(SpecialTargetRegister reg, RegLocation loc) { 1223 if (loc.ref) { 1224 return TargetReg(reg, kRef); 1225 } else { 1226 return TargetReg(reg, loc.wide ? kWide : kNotWide); 1227 } 1228 } 1229 1230 virtual RegStorage GetArgMappingToPhysicalReg(int arg_num) = 0; 1231 virtual RegLocation GetReturnAlt() = 0; 1232 virtual RegLocation GetReturnWideAlt() = 0; 1233 virtual RegLocation LocCReturn() = 0; 1234 virtual RegLocation LocCReturnRef() = 0; 1235 virtual RegLocation LocCReturnDouble() = 0; 1236 virtual RegLocation LocCReturnFloat() = 0; 1237 virtual RegLocation LocCReturnWide() = 0; 1238 virtual ResourceMask GetRegMaskCommon(const RegStorage& reg) const = 0; 1239 virtual void AdjustSpillMask() = 0; 1240 virtual void ClobberCallerSave() = 0; 1241 virtual void FreeCallTemps() = 0; 1242 virtual void LockCallTemps() = 0; 1243 virtual void CompilerInitializeRegAlloc() = 0; 1244 1245 // Required for target - miscellaneous. 1246 virtual void AssembleLIR() = 0; 1247 virtual void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) = 0; 1248 virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags, 1249 ResourceMask* use_mask, ResourceMask* def_mask) = 0; 1250 virtual const char* GetTargetInstFmt(int opcode) = 0; 1251 virtual const char* GetTargetInstName(int opcode) = 0; 1252 virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0; 1253 1254 // Note: This may return kEncodeNone on architectures that do not expose a PC. The caller must 1255 // take care of this. 1256 virtual ResourceMask GetPCUseDefEncoding() const = 0; 1257 virtual uint64_t GetTargetInstFlags(int opcode) = 0; 1258 virtual size_t GetInsnSize(LIR* lir) = 0; 1259 virtual bool IsUnconditionalBranch(LIR* lir) = 0; 1260 1261 // Get the register class for load/store of a field. 1262 virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0; 1263 1264 // Required for target - Dalvik-level generators. 1265 virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1266 RegLocation rl_src1, RegLocation rl_src2, int flags) = 0; 1267 virtual void GenArithOpDouble(Instruction::Code opcode, 1268 RegLocation rl_dest, RegLocation rl_src1, 1269 RegLocation rl_src2) = 0; 1270 virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, 1271 RegLocation rl_src1, RegLocation rl_src2) = 0; 1272 virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, 1273 RegLocation rl_src1, RegLocation rl_src2) = 0; 1274 virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, 1275 RegLocation rl_src) = 0; 1276 virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0; 1277 1278 /** 1279 * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max. 1280 * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm 1281 * that applies on integers. The generated code will write the smallest or largest value 1282 * directly into the destination register as specified by the invoke information. 1283 * @param info Information about the invoke. 1284 * @param is_min If true generates code that computes minimum. Otherwise computes maximum. 1285 * @param is_long If true the value value is Long. Otherwise the value is Int. 1286 * @return Returns true if successfully generated 1287 */ 1288 virtual bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) = 0; 1289 virtual bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double); 1290 1291 virtual bool GenInlinedSqrt(CallInfo* info) = 0; 1292 virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0; 1293 virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0; 1294 virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, 1295 bool is_div) = 0; 1296 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, 1297 bool is_div) = 0; 1298 /* 1299 * @brief Generate an integer div or rem operation by a literal. 1300 * @param rl_dest Destination Location. 1301 * @param rl_src1 Numerator Location. 1302 * @param rl_src2 Divisor Location. 1303 * @param is_div 'true' if this is a division, 'false' for a remainder. 1304 * @param flags The instruction optimization flags. It can include information 1305 * if exception check can be elided. 1306 */ 1307 virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 1308 RegLocation rl_src2, bool is_div, int flags) = 0; 1309 /* 1310 * @brief Generate an integer div or rem operation by a literal. 1311 * @param rl_dest Destination Location. 1312 * @param rl_src Numerator Location. 1313 * @param lit Divisor. 1314 * @param is_div 'true' if this is a division, 'false' for a remainder. 1315 */ 1316 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, 1317 bool is_div) = 0; 1318 virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0; 1319 1320 /** 1321 * @brief Used for generating code that throws ArithmeticException if both registers are zero. 1322 * @details This is used for generating DivideByZero checks when divisor is held in two 1323 * separate registers. 1324 * @param reg The register holding the pair of 32-bit values. 1325 */ 1326 virtual void GenDivZeroCheckWide(RegStorage reg) = 0; 1327 1328 virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0; 1329 virtual void GenExitSequence() = 0; 1330 virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0; 1331 virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0; 1332 1333 /* 1334 * @brief Handle Machine Specific MIR Extended opcodes. 1335 * @param bb The basic block in which the MIR is from. 1336 * @param mir The MIR whose opcode is not standard extended MIR. 1337 * @note Base class implementation will abort for unknown opcodes. 1338 */ 1339 virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1340 1341 /** 1342 * @brief Lowers the kMirOpSelect MIR into LIR. 1343 * @param bb The basic block in which the MIR is from. 1344 * @param mir The MIR whose opcode is kMirOpSelect. 1345 */ 1346 virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0; 1347 1348 /** 1349 * @brief Generates code to select one of the given constants depending on the given opcode. 1350 */ 1351 virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code, 1352 int32_t true_val, int32_t false_val, RegStorage rs_dest, 1353 RegisterClass dest_reg_class) = 0; 1354 1355 /** 1356 * @brief Used to generate a memory barrier in an architecture specific way. 1357 * @details The last generated LIR will be considered for use as barrier. Namely, 1358 * if the last LIR can be updated in a way where it will serve the semantics of 1359 * barrier, then it will be used as such. Otherwise, a new LIR will be generated 1360 * that can keep the semantics. 1361 * @param barrier_kind The kind of memory barrier to generate. 1362 * @return whether a new instruction was generated. 1363 */ 1364 virtual bool GenMemBarrier(MemBarrierKind barrier_kind) = 0; 1365 1366 virtual void GenMoveException(RegLocation rl_dest) = 0; 1367 virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, 1368 int first_bit, int second_bit) = 0; 1369 virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0; 1370 virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0; 1371 1372 // Create code for switch statements. Will decide between short and long versions below. 1373 void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); 1374 void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); 1375 1376 // Potentially backend-specific versions of switch instructions for shorter switch statements. 1377 // The default implementation will create a chained compare-and-branch. 1378 virtual void GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); 1379 virtual void GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); 1380 // Backend-specific versions of switch instructions for longer switch statements. 1381 virtual void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1382 virtual void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1383 1384 virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 1385 RegLocation rl_index, RegLocation rl_dest, int scale) = 0; 1386 virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 1387 RegLocation rl_index, RegLocation rl_src, int scale, 1388 bool card_mark) = 0; 1389 virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1390 RegLocation rl_src1, RegLocation rl_shift, int flags) = 0; 1391 1392 // Required for target - single operation generators. 1393 virtual LIR* OpUnconditionalBranch(LIR* target) = 0; 1394 virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0; 1395 virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, 1396 LIR* target) = 0; 1397 virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0; 1398 virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0; 1399 virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1400 virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0; 1401 virtual void OpEndIT(LIR* it) = 0; 1402 virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0; 1403 virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0; 1404 virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0; 1405 virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1406 virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0; 1407 virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0; 1408 virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0; 1409 1410 /** 1411 * @brief Used to generate an LIR that does a load from mem to reg. 1412 * @param r_dest The destination physical register. 1413 * @param r_base The base physical register for memory operand. 1414 * @param offset The displacement for memory operand. 1415 * @param move_type Specification on the move desired (size, alignment, register kind). 1416 * @return Returns the generate move LIR. 1417 */ 1418 virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, 1419 MoveType move_type) = 0; 1420 1421 /** 1422 * @brief Used to generate an LIR that does a store from reg to mem. 1423 * @param r_base The base physical register for memory operand. 1424 * @param offset The displacement for memory operand. 1425 * @param r_src The destination physical register. 1426 * @param bytes_to_move The number of bytes to move. 1427 * @param is_aligned Whether the memory location is known to be aligned. 1428 * @return Returns the generate move LIR. 1429 */ 1430 virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, 1431 MoveType move_type) = 0; 1432 1433 /** 1434 * @brief Used for generating a conditional register to register operation. 1435 * @param op The opcode kind. 1436 * @param cc The condition code that when true will perform the opcode. 1437 * @param r_dest The destination physical register. 1438 * @param r_src The source physical register. 1439 * @return Returns the newly created LIR or null in case of creation failure. 1440 */ 1441 virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0; 1442 1443 virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0; 1444 virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, 1445 RegStorage r_src2) = 0; 1446 virtual LIR* OpTestSuspend(LIR* target) = 0; 1447 virtual LIR* OpVldm(RegStorage r_base, int count) = 0; 1448 virtual LIR* OpVstm(RegStorage r_base, int count) = 0; 1449 virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0; 1450 virtual bool InexpensiveConstantInt(int32_t value) = 0; 1451 virtual bool InexpensiveConstantFloat(int32_t value) = 0; 1452 virtual bool InexpensiveConstantLong(int64_t value) = 0; 1453 virtual bool InexpensiveConstantDouble(int64_t value) = 0; 1454 virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) { 1455 UNUSED(opcode); 1456 return InexpensiveConstantInt(value); 1457 } 1458 1459 /** 1460 * @brief Whether division by the given divisor can be converted to multiply by its reciprocal. 1461 * @param divisor A constant divisor bits of float type. 1462 * @return Returns true iff, x/divisor == x*(1.0f/divisor), for every float x. 1463 */ 1464 bool CanDivideByReciprocalMultiplyFloat(int32_t divisor) { 1465 // True, if float value significand bits are 0. 1466 return ((divisor & 0x7fffff) == 0); 1467 } 1468 1469 /** 1470 * @brief Whether division by the given divisor can be converted to multiply by its reciprocal. 1471 * @param divisor A constant divisor bits of double type. 1472 * @return Returns true iff, x/divisor == x*(1.0/divisor), for every double x. 1473 */ 1474 bool CanDivideByReciprocalMultiplyDouble(int64_t divisor) { 1475 // True, if double value significand bits are 0. 1476 return ((divisor & ((UINT64_C(1) << 52) - 1)) == 0); 1477 } 1478 1479 // May be optimized by targets. 1480 virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src); 1481 virtual void GenMonitorExit(int opt_flags, RegLocation rl_src); 1482 1483 // Temp workaround 1484 void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg); 1485 1486 virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0; 1487 1488 protected: 1489 Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); 1490 1491 CompilationUnit* GetCompilationUnit() { 1492 return cu_; 1493 } 1494 /* 1495 * @brief Returns the index of the lowest set bit in 'x'. 1496 * @param x Value to be examined. 1497 * @returns The bit number of the lowest bit set in the value. 1498 */ 1499 int32_t LowestSetBit(uint64_t x); 1500 /* 1501 * @brief Is this value a power of two? 1502 * @param x Value to be examined. 1503 * @returns 'true' if only 1 bit is set in the value. 1504 */ 1505 bool IsPowerOfTwo(uint64_t x); 1506 /* 1507 * @brief Do these SRs overlap? 1508 * @param rl_op1 One RegLocation 1509 * @param rl_op2 The other RegLocation 1510 * @return 'true' if the VR pairs overlap 1511 * 1512 * Check to see if a result pair has a misaligned overlap with an operand pair. This 1513 * is not usual for dx to generate, but it is legal (for now). In a future rev of 1514 * dex, we'll want to make this case illegal. 1515 */ 1516 bool PartiallyIntersects(RegLocation rl_op1, RegLocation rl_op2); 1517 1518 /* 1519 * @brief Do these SRs intersect? 1520 * @param rl_op1 One RegLocation 1521 * @param rl_op2 The other RegLocation 1522 * @return 'true' if the VR pairs intersect 1523 * 1524 * Check to see if a result pair has misaligned overlap or 1525 * full overlap with an operand pair. 1526 */ 1527 bool Intersects(RegLocation rl_op1, RegLocation rl_op2); 1528 1529 /* 1530 * @brief Force a location (in a register) into a temporary register 1531 * @param loc location of result 1532 * @returns update location 1533 */ 1534 virtual RegLocation ForceTemp(RegLocation loc); 1535 1536 /* 1537 * @brief Force a wide location (in registers) into temporary registers 1538 * @param loc location of result 1539 * @returns update location 1540 */ 1541 virtual RegLocation ForceTempWide(RegLocation loc); 1542 1543 virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, 1544 RegLocation rl_dest, RegLocation rl_src); 1545 1546 void AddSlowPath(LIRSlowPath* slowpath); 1547 1548 /* 1549 * 1550 * @brief Implement Set up instanceof a class. 1551 * @param needs_access_check 'true' if we must check the access. 1552 * @param type_known_final 'true' if the type is known to be a final class. 1553 * @param type_known_abstract 'true' if the type is known to be an abstract class. 1554 * @param use_declaring_class 'true' if the type can be loaded off the current Method*. 1555 * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache. 1556 * @param type_idx Type index to use if use_declaring_class is 'false'. 1557 * @param rl_dest Result to be set to 0 or 1. 1558 * @param rl_src Object to be tested. 1559 */ 1560 void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1561 bool type_known_abstract, bool use_declaring_class, 1562 bool can_assume_type_is_in_dex_cache, 1563 uint32_t type_idx, RegLocation rl_dest, 1564 RegLocation rl_src); 1565 /* 1566 * @brief Generate the eh_frame FDE information if possible. 1567 * @returns pointer to vector containg FDE information, or NULL. 1568 */ 1569 virtual std::vector<uint8_t>* ReturnFrameDescriptionEntry(); 1570 1571 /** 1572 * @brief Used to insert marker that can be used to associate MIR with LIR. 1573 * @details Only inserts marker if verbosity is enabled. 1574 * @param mir The mir that is currently being generated. 1575 */ 1576 void GenPrintLabel(MIR* mir); 1577 1578 /** 1579 * @brief Used to generate return sequence when there is no frame. 1580 * @details Assumes that the return registers have already been populated. 1581 */ 1582 virtual void GenSpecialExitSequence() = 0; 1583 1584 /** 1585 * @brief Used to generate code for special methods that are known to be 1586 * small enough to work in frameless mode. 1587 * @param bb The basic block of the first MIR. 1588 * @param mir The first MIR of the special method. 1589 * @param special Information about the special method. 1590 * @return Returns whether or not this was handled successfully. Returns false 1591 * if caller should punt to normal MIR2LIR conversion. 1592 */ 1593 virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); 1594 1595 protected: 1596 void ClobberBody(RegisterInfo* p); 1597 void SetCurrentDexPc(DexOffset dexpc) { 1598 current_dalvik_offset_ = dexpc; 1599 } 1600 1601 /** 1602 * @brief Used to lock register if argument at in_position was passed that way. 1603 * @details Does nothing if the argument is passed via stack. 1604 * @param in_position The argument number whose register to lock. 1605 * @param wide Whether the argument is wide. 1606 */ 1607 void LockArg(int in_position, bool wide = false); 1608 1609 /** 1610 * @brief Used to load VR argument to a physical register. 1611 * @details The load is only done if the argument is not already in physical register. 1612 * LockArg must have been previously called. 1613 * @param in_position The argument number to load. 1614 * @param wide Whether the argument is 64-bit or not. 1615 * @return Returns the register (or register pair) for the loaded argument. 1616 */ 1617 RegStorage LoadArg(int in_position, RegisterClass reg_class, bool wide = false); 1618 1619 /** 1620 * @brief Used to load a VR argument directly to a specified register location. 1621 * @param in_position The argument number to place in register. 1622 * @param rl_dest The register location where to place argument. 1623 */ 1624 void LoadArgDirect(int in_position, RegLocation rl_dest); 1625 1626 /** 1627 * @brief Used to generate LIR for special getter method. 1628 * @param mir The mir that represents the iget. 1629 * @param special Information about the special getter method. 1630 * @return Returns whether LIR was successfully generated. 1631 */ 1632 bool GenSpecialIGet(MIR* mir, const InlineMethod& special); 1633 1634 /** 1635 * @brief Used to generate LIR for special setter method. 1636 * @param mir The mir that represents the iput. 1637 * @param special Information about the special setter method. 1638 * @return Returns whether LIR was successfully generated. 1639 */ 1640 bool GenSpecialIPut(MIR* mir, const InlineMethod& special); 1641 1642 /** 1643 * @brief Used to generate LIR for special return-args method. 1644 * @param mir The mir that represents the return of argument. 1645 * @param special Information about the special return-args method. 1646 * @return Returns whether LIR was successfully generated. 1647 */ 1648 bool GenSpecialIdentity(MIR* mir, const InlineMethod& special); 1649 1650 void AddDivZeroCheckSlowPath(LIR* branch); 1651 1652 // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using 1653 // kArg2 as temp. 1654 virtual void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1); 1655 1656 /** 1657 * @brief Load Constant into RegLocation 1658 * @param rl_dest Destination RegLocation 1659 * @param value Constant value 1660 */ 1661 virtual void GenConst(RegLocation rl_dest, int value); 1662 1663 /** 1664 * Returns true iff wide GPRs are just different views on the same physical register. 1665 */ 1666 virtual bool WideGPRsAreAliases() const = 0; 1667 1668 /** 1669 * Returns true iff wide FPRs are just different views on the same physical register. 1670 */ 1671 virtual bool WideFPRsAreAliases() const = 0; 1672 1673 1674 enum class WidenessCheck { // private 1675 kIgnoreWide, 1676 kCheckWide, 1677 kCheckNotWide 1678 }; 1679 1680 enum class RefCheck { // private 1681 kIgnoreRef, 1682 kCheckRef, 1683 kCheckNotRef 1684 }; 1685 1686 enum class FPCheck { // private 1687 kIgnoreFP, 1688 kCheckFP, 1689 kCheckNotFP 1690 }; 1691 1692 /** 1693 * Check whether a reg storage seems well-formed, that is, if a reg storage is valid, 1694 * that it has the expected form for the flags. 1695 * A flag value of 0 means ignore. A flag value of -1 means false. A flag value of 1 means true. 1696 */ 1697 void CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp, bool fail, 1698 bool report) 1699 const; 1700 1701 /** 1702 * Check whether a reg location seems well-formed, that is, if a reg storage is encoded, 1703 * that it has the expected size. 1704 */ 1705 void CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const; 1706 1707 // See CheckRegStorageImpl. Will print or fail depending on kFailOnSizeError and 1708 // kReportSizeError. 1709 void CheckRegStorage(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp) const; 1710 // See CheckRegLocationImpl. 1711 void CheckRegLocation(RegLocation rl) const; 1712 1713 public: 1714 // TODO: add accessors for these. 1715 LIR* literal_list_; // Constants. 1716 LIR* method_literal_list_; // Method literals requiring patching. 1717 LIR* class_literal_list_; // Class literals requiring patching. 1718 LIR* code_literal_list_; // Code literals requiring patching. 1719 LIR* first_fixup_; // Doubly-linked list of LIR nodes requiring fixups. 1720 1721 protected: 1722 CompilationUnit* const cu_; 1723 MIRGraph* const mir_graph_; 1724 ArenaVector<SwitchTable*> switch_tables_; 1725 ArenaVector<FillArrayData*> fill_array_data_; 1726 ArenaVector<RegisterInfo*> tempreg_info_; 1727 ArenaVector<RegisterInfo*> reginfo_map_; 1728 ArenaVector<void*> pointer_storage_; 1729 CodeOffset current_code_offset_; // Working byte offset of machine instructons. 1730 CodeOffset data_offset_; // starting offset of literal pool. 1731 size_t total_size_; // header + code size. 1732 LIR* block_label_list_; 1733 PromotionMap* promotion_map_; 1734 /* 1735 * TODO: The code generation utilities don't have a built-in 1736 * mechanism to propagate the original Dalvik opcode address to the 1737 * associated generated instructions. For the trace compiler, this wasn't 1738 * necessary because the interpreter handled all throws and debugging 1739 * requests. For now we'll handle this by placing the Dalvik offset 1740 * in the CompilationUnit struct before codegen for each instruction. 1741 * The low-level LIR creation utilites will pull it from here. Rework this. 1742 */ 1743 DexOffset current_dalvik_offset_; 1744 size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size. 1745 std::unique_ptr<RegisterPool> reg_pool_; 1746 /* 1747 * Sanity checking for the register temp tracking. The same ssa 1748 * name should never be associated with one temp register per 1749 * instruction compilation. 1750 */ 1751 int live_sreg_; 1752 CodeBuffer code_buffer_; 1753 // The source mapping table data (pc -> dex). More entries than in encoded_mapping_table_ 1754 SrcMap src_mapping_table_; 1755 // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix. 1756 std::vector<uint8_t> encoded_mapping_table_; 1757 ArenaVector<uint32_t> core_vmap_table_; 1758 ArenaVector<uint32_t> fp_vmap_table_; 1759 std::vector<uint8_t> native_gc_map_; 1760 ArenaVector<LinkerPatch> patches_; 1761 int num_core_spills_; 1762 int num_fp_spills_; 1763 int frame_size_; 1764 unsigned int core_spill_mask_; 1765 unsigned int fp_spill_mask_; 1766 LIR* first_lir_insn_; 1767 LIR* last_lir_insn_; 1768 1769 ArenaVector<LIRSlowPath*> slow_paths_; 1770 1771 // The memory reference type for new LIRs. 1772 // NOTE: Passing this as an explicit parameter by all functions that directly or indirectly 1773 // invoke RawLIR() would clutter the code and reduce the readability. 1774 ResourceMask::ResourceBit mem_ref_type_; 1775 1776 // Each resource mask now takes 16-bytes, so having both use/def masks directly in a LIR 1777 // would consume 32 bytes per LIR. Instead, the LIR now holds only pointers to the masks 1778 // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache 1779 // to deduplicate the masks. 1780 ResourceMaskCache mask_cache_; 1781 1782 private: 1783 static bool SizeMatchesTypeForEntrypoint(OpSize size, Primitive::Type type); 1784}; // Class Mir2Lir 1785 1786} // namespace art 1787 1788#endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 1789