code_generator_arm.cc revision c66671076b12a0ee8b9d1ae782732cc91beacb73
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator_arm.h" 18 19#include "arch/arm/instruction_set_features_arm.h" 20#include "code_generator_utils.h" 21#include "entrypoints/quick/quick_entrypoints.h" 22#include "gc/accounting/card_table.h" 23#include "intrinsics.h" 24#include "intrinsics_arm.h" 25#include "mirror/array-inl.h" 26#include "mirror/art_method.h" 27#include "mirror/class.h" 28#include "thread.h" 29#include "utils/arm/assembler_arm.h" 30#include "utils/arm/managed_register_arm.h" 31#include "utils/assembler.h" 32#include "utils/stack_checks.h" 33 34namespace art { 35 36namespace arm { 37 38static bool ExpectedPairLayout(Location location) { 39 // We expected this for both core and fpu register pairs. 40 return ((location.low() & 1) == 0) && (location.low() + 1 == location.high()); 41} 42 43static constexpr int kCurrentMethodStackOffset = 0; 44 45// We unconditionally allocate R5 to ensure we can do long operations 46// with baseline. 47static constexpr Register kCoreSavedRegisterForBaseline = R5; 48static constexpr Register kCoreCalleeSaves[] = 49 { R5, R6, R7, R8, R10, R11, PC }; 50static constexpr SRegister kFpuCalleeSaves[] = 51 { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 }; 52 53// D31 cannot be split into two S registers, and the register allocator only works on 54// S registers. Therefore there is no need to block it. 55static constexpr DRegister DTMP = D31; 56 57#define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())-> 58#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value() 59 60class NullCheckSlowPathARM : public SlowPathCodeARM { 61 public: 62 explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {} 63 64 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 65 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 66 __ Bind(GetEntryLabel()); 67 arm_codegen->InvokeRuntime( 68 QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this); 69 } 70 71 private: 72 HNullCheck* const instruction_; 73 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM); 74}; 75 76class DivZeroCheckSlowPathARM : public SlowPathCodeARM { 77 public: 78 explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {} 79 80 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 81 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 82 __ Bind(GetEntryLabel()); 83 arm_codegen->InvokeRuntime( 84 QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this); 85 } 86 87 private: 88 HDivZeroCheck* const instruction_; 89 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM); 90}; 91 92class SuspendCheckSlowPathARM : public SlowPathCodeARM { 93 public: 94 SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor) 95 : instruction_(instruction), successor_(successor) {} 96 97 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 98 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 99 __ Bind(GetEntryLabel()); 100 SaveLiveRegisters(codegen, instruction_->GetLocations()); 101 arm_codegen->InvokeRuntime( 102 QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this); 103 RestoreLiveRegisters(codegen, instruction_->GetLocations()); 104 if (successor_ == nullptr) { 105 __ b(GetReturnLabel()); 106 } else { 107 __ b(arm_codegen->GetLabelOf(successor_)); 108 } 109 } 110 111 Label* GetReturnLabel() { 112 DCHECK(successor_ == nullptr); 113 return &return_label_; 114 } 115 116 HBasicBlock* GetSuccessor() const { 117 return successor_; 118 } 119 120 private: 121 HSuspendCheck* const instruction_; 122 // If not null, the block to branch to after the suspend check. 123 HBasicBlock* const successor_; 124 125 // If `successor_` is null, the label to branch to after the suspend check. 126 Label return_label_; 127 128 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM); 129}; 130 131class BoundsCheckSlowPathARM : public SlowPathCodeARM { 132 public: 133 BoundsCheckSlowPathARM(HBoundsCheck* instruction, 134 Location index_location, 135 Location length_location) 136 : instruction_(instruction), 137 index_location_(index_location), 138 length_location_(length_location) {} 139 140 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 141 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 142 __ Bind(GetEntryLabel()); 143 // We're moving two locations to locations that could overlap, so we need a parallel 144 // move resolver. 145 InvokeRuntimeCallingConvention calling_convention; 146 codegen->EmitParallelMoves( 147 index_location_, 148 Location::RegisterLocation(calling_convention.GetRegisterAt(0)), 149 Primitive::kPrimInt, 150 length_location_, 151 Location::RegisterLocation(calling_convention.GetRegisterAt(1)), 152 Primitive::kPrimInt); 153 arm_codegen->InvokeRuntime( 154 QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this); 155 } 156 157 private: 158 HBoundsCheck* const instruction_; 159 const Location index_location_; 160 const Location length_location_; 161 162 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM); 163}; 164 165class LoadClassSlowPathARM : public SlowPathCodeARM { 166 public: 167 LoadClassSlowPathARM(HLoadClass* cls, 168 HInstruction* at, 169 uint32_t dex_pc, 170 bool do_clinit) 171 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { 172 DCHECK(at->IsLoadClass() || at->IsClinitCheck()); 173 } 174 175 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 176 LocationSummary* locations = at_->GetLocations(); 177 178 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 179 __ Bind(GetEntryLabel()); 180 SaveLiveRegisters(codegen, locations); 181 182 InvokeRuntimeCallingConvention calling_convention; 183 __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex()); 184 int32_t entry_point_offset = do_clinit_ 185 ? QUICK_ENTRY_POINT(pInitializeStaticStorage) 186 : QUICK_ENTRY_POINT(pInitializeType); 187 arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this); 188 189 // Move the class to the desired location. 190 Location out = locations->Out(); 191 if (out.IsValid()) { 192 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); 193 arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); 194 } 195 RestoreLiveRegisters(codegen, locations); 196 __ b(GetExitLabel()); 197 } 198 199 private: 200 // The class this slow path will load. 201 HLoadClass* const cls_; 202 203 // The instruction where this slow path is happening. 204 // (Might be the load class or an initialization check). 205 HInstruction* const at_; 206 207 // The dex PC of `at_`. 208 const uint32_t dex_pc_; 209 210 // Whether to initialize the class. 211 const bool do_clinit_; 212 213 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM); 214}; 215 216class LoadStringSlowPathARM : public SlowPathCodeARM { 217 public: 218 explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {} 219 220 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 221 LocationSummary* locations = instruction_->GetLocations(); 222 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 223 224 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 225 __ Bind(GetEntryLabel()); 226 SaveLiveRegisters(codegen, locations); 227 228 InvokeRuntimeCallingConvention calling_convention; 229 __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex()); 230 arm_codegen->InvokeRuntime( 231 QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this); 232 arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); 233 234 RestoreLiveRegisters(codegen, locations); 235 __ b(GetExitLabel()); 236 } 237 238 private: 239 HLoadString* const instruction_; 240 241 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM); 242}; 243 244class TypeCheckSlowPathARM : public SlowPathCodeARM { 245 public: 246 TypeCheckSlowPathARM(HInstruction* instruction, 247 Location class_to_check, 248 Location object_class, 249 uint32_t dex_pc) 250 : instruction_(instruction), 251 class_to_check_(class_to_check), 252 object_class_(object_class), 253 dex_pc_(dex_pc) {} 254 255 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 256 LocationSummary* locations = instruction_->GetLocations(); 257 DCHECK(instruction_->IsCheckCast() 258 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 259 260 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 261 __ Bind(GetEntryLabel()); 262 SaveLiveRegisters(codegen, locations); 263 264 // We're moving two locations to locations that could overlap, so we need a parallel 265 // move resolver. 266 InvokeRuntimeCallingConvention calling_convention; 267 codegen->EmitParallelMoves( 268 class_to_check_, 269 Location::RegisterLocation(calling_convention.GetRegisterAt(0)), 270 Primitive::kPrimNot, 271 object_class_, 272 Location::RegisterLocation(calling_convention.GetRegisterAt(1)), 273 Primitive::kPrimNot); 274 275 if (instruction_->IsInstanceOf()) { 276 arm_codegen->InvokeRuntime( 277 QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this); 278 arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); 279 } else { 280 DCHECK(instruction_->IsCheckCast()); 281 arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this); 282 } 283 284 RestoreLiveRegisters(codegen, locations); 285 __ b(GetExitLabel()); 286 } 287 288 private: 289 HInstruction* const instruction_; 290 const Location class_to_check_; 291 const Location object_class_; 292 uint32_t dex_pc_; 293 294 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM); 295}; 296 297class DeoptimizationSlowPathARM : public SlowPathCodeARM { 298 public: 299 explicit DeoptimizationSlowPathARM(HInstruction* instruction) 300 : instruction_(instruction) {} 301 302 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 303 __ Bind(GetEntryLabel()); 304 SaveLiveRegisters(codegen, instruction_->GetLocations()); 305 DCHECK(instruction_->IsDeoptimize()); 306 HDeoptimize* deoptimize = instruction_->AsDeoptimize(); 307 uint32_t dex_pc = deoptimize->GetDexPc(); 308 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 309 arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this); 310 } 311 312 private: 313 HInstruction* const instruction_; 314 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM); 315}; 316 317#undef __ 318 319#undef __ 320#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())-> 321 322inline Condition ARMCondition(IfCondition cond) { 323 switch (cond) { 324 case kCondEQ: return EQ; 325 case kCondNE: return NE; 326 case kCondLT: return LT; 327 case kCondLE: return LE; 328 case kCondGT: return GT; 329 case kCondGE: return GE; 330 default: 331 LOG(FATAL) << "Unknown if condition"; 332 } 333 return EQ; // Unreachable. 334} 335 336inline Condition ARMOppositeCondition(IfCondition cond) { 337 switch (cond) { 338 case kCondEQ: return NE; 339 case kCondNE: return EQ; 340 case kCondLT: return GE; 341 case kCondLE: return GT; 342 case kCondGT: return LE; 343 case kCondGE: return LT; 344 default: 345 LOG(FATAL) << "Unknown if condition"; 346 } 347 return EQ; // Unreachable. 348} 349 350void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const { 351 stream << ArmManagedRegister::FromCoreRegister(Register(reg)); 352} 353 354void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const { 355 stream << ArmManagedRegister::FromSRegister(SRegister(reg)); 356} 357 358size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { 359 __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index); 360 return kArmWordSize; 361} 362 363size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { 364 __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index); 365 return kArmWordSize; 366} 367 368size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 369 __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index); 370 return kArmWordSize; 371} 372 373size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 374 __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index); 375 return kArmWordSize; 376} 377 378CodeGeneratorARM::CodeGeneratorARM(HGraph* graph, 379 const ArmInstructionSetFeatures& isa_features, 380 const CompilerOptions& compiler_options) 381 : CodeGenerator(graph, 382 kNumberOfCoreRegisters, 383 kNumberOfSRegisters, 384 kNumberOfRegisterPairs, 385 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves), 386 arraysize(kCoreCalleeSaves)), 387 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves), 388 arraysize(kFpuCalleeSaves)), 389 compiler_options), 390 block_labels_(graph->GetArena(), 0), 391 location_builder_(graph, this), 392 instruction_visitor_(graph, this), 393 move_resolver_(graph->GetArena(), this), 394 assembler_(true), 395 isa_features_(isa_features) { 396 // Save the PC register to mimic Quick. 397 AddAllocatedRegister(Location::RegisterLocation(PC)); 398} 399 400Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const { 401 switch (type) { 402 case Primitive::kPrimLong: { 403 size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs); 404 ArmManagedRegister pair = 405 ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg)); 406 DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]); 407 DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]); 408 409 blocked_core_registers_[pair.AsRegisterPairLow()] = true; 410 blocked_core_registers_[pair.AsRegisterPairHigh()] = true; 411 UpdateBlockedPairRegisters(); 412 return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh()); 413 } 414 415 case Primitive::kPrimByte: 416 case Primitive::kPrimBoolean: 417 case Primitive::kPrimChar: 418 case Primitive::kPrimShort: 419 case Primitive::kPrimInt: 420 case Primitive::kPrimNot: { 421 int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters); 422 // Block all register pairs that contain `reg`. 423 for (int i = 0; i < kNumberOfRegisterPairs; i++) { 424 ArmManagedRegister current = 425 ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i)); 426 if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) { 427 blocked_register_pairs_[i] = true; 428 } 429 } 430 return Location::RegisterLocation(reg); 431 } 432 433 case Primitive::kPrimFloat: { 434 int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters); 435 return Location::FpuRegisterLocation(reg); 436 } 437 438 case Primitive::kPrimDouble: { 439 int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters); 440 DCHECK_EQ(reg % 2, 0); 441 return Location::FpuRegisterPairLocation(reg, reg + 1); 442 } 443 444 case Primitive::kPrimVoid: 445 LOG(FATAL) << "Unreachable type " << type; 446 } 447 448 return Location(); 449} 450 451void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const { 452 // Don't allocate the dalvik style register pair passing. 453 blocked_register_pairs_[R1_R2] = true; 454 455 // Stack register, LR and PC are always reserved. 456 blocked_core_registers_[SP] = true; 457 blocked_core_registers_[LR] = true; 458 blocked_core_registers_[PC] = true; 459 460 // Reserve thread register. 461 blocked_core_registers_[TR] = true; 462 463 // Reserve temp register. 464 blocked_core_registers_[IP] = true; 465 466 if (is_baseline) { 467 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) { 468 blocked_core_registers_[kCoreCalleeSaves[i]] = true; 469 } 470 471 blocked_core_registers_[kCoreSavedRegisterForBaseline] = false; 472 473 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) { 474 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true; 475 } 476 } 477 478 UpdateBlockedPairRegisters(); 479} 480 481void CodeGeneratorARM::UpdateBlockedPairRegisters() const { 482 for (int i = 0; i < kNumberOfRegisterPairs; i++) { 483 ArmManagedRegister current = 484 ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i)); 485 if (blocked_core_registers_[current.AsRegisterPairLow()] 486 || blocked_core_registers_[current.AsRegisterPairHigh()]) { 487 blocked_register_pairs_[i] = true; 488 } 489 } 490} 491 492InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen) 493 : HGraphVisitor(graph), 494 assembler_(codegen->GetAssembler()), 495 codegen_(codegen) {} 496 497static uint32_t LeastSignificantBit(uint32_t mask) { 498 // ffs starts at 1. 499 return ffs(mask) - 1; 500} 501 502void CodeGeneratorARM::ComputeSpillMask() { 503 core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_; 504 // Save one extra register for baseline. Note that on thumb2, there is no easy 505 // instruction to restore just the PC, so this actually helps both baseline 506 // and non-baseline to save and restore at least two registers at entry and exit. 507 core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline); 508 DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved"; 509 fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_; 510 // We use vpush and vpop for saving and restoring floating point registers, which take 511 // a SRegister and the number of registers to save/restore after that SRegister. We 512 // therefore update the `fpu_spill_mask_` to also contain those registers not allocated, 513 // but in the range. 514 if (fpu_spill_mask_ != 0) { 515 uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_); 516 uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_); 517 for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) { 518 fpu_spill_mask_ |= (1 << i); 519 } 520 } 521} 522 523static dwarf::Reg DWARFReg(Register reg) { 524 return dwarf::Reg::ArmCore(static_cast<int>(reg)); 525} 526 527static dwarf::Reg DWARFReg(SRegister reg) { 528 return dwarf::Reg::ArmFp(static_cast<int>(reg)); 529} 530 531void CodeGeneratorARM::GenerateFrameEntry() { 532 bool skip_overflow_check = 533 IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm); 534 DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); 535 __ Bind(&frame_entry_label_); 536 537 if (HasEmptyFrame()) { 538 return; 539 } 540 541 if (!skip_overflow_check) { 542 __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm))); 543 __ LoadFromOffset(kLoadWord, IP, IP, 0); 544 RecordPcInfo(nullptr, 0); 545 } 546 547 // PC is in the list of callee-save to mimic Quick, but we need to push 548 // LR at entry instead. 549 uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR; 550 __ PushList(push_mask); 551 __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask)); 552 __ cfi().RelOffsetForMany(DWARFReg(R0), 0, push_mask, kArmWordSize); 553 if (fpu_spill_mask_ != 0) { 554 SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_)); 555 __ vpushs(start_register, POPCOUNT(fpu_spill_mask_)); 556 __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_)); 557 __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize); 558 } 559 int adjust = GetFrameSize() - FrameEntrySpillSize(); 560 __ AddConstant(SP, -adjust); 561 __ cfi().AdjustCFAOffset(adjust); 562 __ StoreToOffset(kStoreWord, R0, SP, 0); 563} 564 565void CodeGeneratorARM::GenerateFrameExit() { 566 if (HasEmptyFrame()) { 567 __ bx(LR); 568 return; 569 } 570 __ cfi().RememberState(); 571 int adjust = GetFrameSize() - FrameEntrySpillSize(); 572 __ AddConstant(SP, adjust); 573 __ cfi().AdjustCFAOffset(-adjust); 574 if (fpu_spill_mask_ != 0) { 575 SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_)); 576 __ vpops(start_register, POPCOUNT(fpu_spill_mask_)); 577 __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_)); 578 __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_); 579 } 580 __ PopList(core_spill_mask_); 581 __ cfi().RestoreState(); 582 __ cfi().DefCFAOffset(GetFrameSize()); 583} 584 585void CodeGeneratorARM::Bind(HBasicBlock* block) { 586 __ Bind(GetLabelOf(block)); 587} 588 589Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const { 590 switch (load->GetType()) { 591 case Primitive::kPrimLong: 592 case Primitive::kPrimDouble: 593 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 594 595 case Primitive::kPrimInt: 596 case Primitive::kPrimNot: 597 case Primitive::kPrimFloat: 598 return Location::StackSlot(GetStackSlot(load->GetLocal())); 599 600 case Primitive::kPrimBoolean: 601 case Primitive::kPrimByte: 602 case Primitive::kPrimChar: 603 case Primitive::kPrimShort: 604 case Primitive::kPrimVoid: 605 LOG(FATAL) << "Unexpected type " << load->GetType(); 606 UNREACHABLE(); 607 } 608 609 LOG(FATAL) << "Unreachable"; 610 UNREACHABLE(); 611} 612 613Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) { 614 switch (type) { 615 case Primitive::kPrimBoolean: 616 case Primitive::kPrimByte: 617 case Primitive::kPrimChar: 618 case Primitive::kPrimShort: 619 case Primitive::kPrimInt: 620 case Primitive::kPrimNot: { 621 uint32_t index = gp_index_++; 622 uint32_t stack_index = stack_index_++; 623 if (index < calling_convention.GetNumberOfRegisters()) { 624 return Location::RegisterLocation(calling_convention.GetRegisterAt(index)); 625 } else { 626 return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index)); 627 } 628 } 629 630 case Primitive::kPrimLong: { 631 uint32_t index = gp_index_; 632 uint32_t stack_index = stack_index_; 633 gp_index_ += 2; 634 stack_index_ += 2; 635 if (index + 1 < calling_convention.GetNumberOfRegisters()) { 636 if (calling_convention.GetRegisterAt(index) == R1) { 637 // Skip R1, and use R2_R3 instead. 638 gp_index_++; 639 index++; 640 } 641 } 642 if (index + 1 < calling_convention.GetNumberOfRegisters()) { 643 DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1, 644 calling_convention.GetRegisterAt(index + 1)); 645 return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index), 646 calling_convention.GetRegisterAt(index + 1)); 647 } else { 648 return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index)); 649 } 650 } 651 652 case Primitive::kPrimFloat: { 653 uint32_t stack_index = stack_index_++; 654 if (float_index_ % 2 == 0) { 655 float_index_ = std::max(double_index_, float_index_); 656 } 657 if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) { 658 return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++)); 659 } else { 660 return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index)); 661 } 662 } 663 664 case Primitive::kPrimDouble: { 665 double_index_ = std::max(double_index_, RoundUp(float_index_, 2)); 666 uint32_t stack_index = stack_index_; 667 stack_index_ += 2; 668 if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) { 669 uint32_t index = double_index_; 670 double_index_ += 2; 671 Location result = Location::FpuRegisterPairLocation( 672 calling_convention.GetFpuRegisterAt(index), 673 calling_convention.GetFpuRegisterAt(index + 1)); 674 DCHECK(ExpectedPairLayout(result)); 675 return result; 676 } else { 677 return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index)); 678 } 679 } 680 681 case Primitive::kPrimVoid: 682 LOG(FATAL) << "Unexpected parameter type " << type; 683 break; 684 } 685 return Location(); 686} 687 688Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) { 689 switch (type) { 690 case Primitive::kPrimBoolean: 691 case Primitive::kPrimByte: 692 case Primitive::kPrimChar: 693 case Primitive::kPrimShort: 694 case Primitive::kPrimInt: 695 case Primitive::kPrimNot: { 696 return Location::RegisterLocation(R0); 697 } 698 699 case Primitive::kPrimFloat: { 700 return Location::FpuRegisterLocation(S0); 701 } 702 703 case Primitive::kPrimLong: { 704 return Location::RegisterPairLocation(R0, R1); 705 } 706 707 case Primitive::kPrimDouble: { 708 return Location::FpuRegisterPairLocation(S0, S1); 709 } 710 711 case Primitive::kPrimVoid: 712 return Location(); 713 } 714 UNREACHABLE(); 715} 716 717void CodeGeneratorARM::Move32(Location destination, Location source) { 718 if (source.Equals(destination)) { 719 return; 720 } 721 if (destination.IsRegister()) { 722 if (source.IsRegister()) { 723 __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>()); 724 } else if (source.IsFpuRegister()) { 725 __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>()); 726 } else { 727 __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex()); 728 } 729 } else if (destination.IsFpuRegister()) { 730 if (source.IsRegister()) { 731 __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>()); 732 } else if (source.IsFpuRegister()) { 733 __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>()); 734 } else { 735 __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex()); 736 } 737 } else { 738 DCHECK(destination.IsStackSlot()) << destination; 739 if (source.IsRegister()) { 740 __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex()); 741 } else if (source.IsFpuRegister()) { 742 __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex()); 743 } else { 744 DCHECK(source.IsStackSlot()) << source; 745 __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex()); 746 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 747 } 748 } 749} 750 751void CodeGeneratorARM::Move64(Location destination, Location source) { 752 if (source.Equals(destination)) { 753 return; 754 } 755 if (destination.IsRegisterPair()) { 756 if (source.IsRegisterPair()) { 757 EmitParallelMoves( 758 Location::RegisterLocation(source.AsRegisterPairHigh<Register>()), 759 Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()), 760 Primitive::kPrimInt, 761 Location::RegisterLocation(source.AsRegisterPairLow<Register>()), 762 Location::RegisterLocation(destination.AsRegisterPairLow<Register>()), 763 Primitive::kPrimInt); 764 } else if (source.IsFpuRegister()) { 765 UNIMPLEMENTED(FATAL); 766 } else { 767 DCHECK(source.IsDoubleStackSlot()); 768 DCHECK(ExpectedPairLayout(destination)); 769 __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(), 770 SP, source.GetStackIndex()); 771 } 772 } else if (destination.IsFpuRegisterPair()) { 773 if (source.IsDoubleStackSlot()) { 774 __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), 775 SP, 776 source.GetStackIndex()); 777 } else { 778 UNIMPLEMENTED(FATAL); 779 } 780 } else { 781 DCHECK(destination.IsDoubleStackSlot()); 782 if (source.IsRegisterPair()) { 783 // No conflict possible, so just do the moves. 784 if (source.AsRegisterPairLow<Register>() == R1) { 785 DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2); 786 __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex()); 787 __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize)); 788 } else { 789 __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(), 790 SP, destination.GetStackIndex()); 791 } 792 } else if (source.IsFpuRegisterPair()) { 793 __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()), 794 SP, 795 destination.GetStackIndex()); 796 } else { 797 DCHECK(source.IsDoubleStackSlot()); 798 EmitParallelMoves( 799 Location::StackSlot(source.GetStackIndex()), 800 Location::StackSlot(destination.GetStackIndex()), 801 Primitive::kPrimInt, 802 Location::StackSlot(source.GetHighStackIndex(kArmWordSize)), 803 Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)), 804 Primitive::kPrimInt); 805 } 806 } 807} 808 809void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) { 810 LocationSummary* locations = instruction->GetLocations(); 811 if (locations != nullptr && locations->Out().Equals(location)) { 812 return; 813 } 814 815 if (locations != nullptr && locations->Out().IsConstant()) { 816 HConstant* const_to_move = locations->Out().GetConstant(); 817 if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) { 818 int32_t value = GetInt32ValueOf(const_to_move); 819 if (location.IsRegister()) { 820 __ LoadImmediate(location.AsRegister<Register>(), value); 821 } else { 822 DCHECK(location.IsStackSlot()); 823 __ LoadImmediate(IP, value); 824 __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex()); 825 } 826 } else { 827 DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName(); 828 int64_t value = const_to_move->AsLongConstant()->GetValue(); 829 if (location.IsRegisterPair()) { 830 __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value)); 831 __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value)); 832 } else { 833 DCHECK(location.IsDoubleStackSlot()); 834 __ LoadImmediate(IP, Low32Bits(value)); 835 __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex()); 836 __ LoadImmediate(IP, High32Bits(value)); 837 __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize)); 838 } 839 } 840 } else if (instruction->IsLoadLocal()) { 841 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal()); 842 switch (instruction->GetType()) { 843 case Primitive::kPrimBoolean: 844 case Primitive::kPrimByte: 845 case Primitive::kPrimChar: 846 case Primitive::kPrimShort: 847 case Primitive::kPrimInt: 848 case Primitive::kPrimNot: 849 case Primitive::kPrimFloat: 850 Move32(location, Location::StackSlot(stack_slot)); 851 break; 852 853 case Primitive::kPrimLong: 854 case Primitive::kPrimDouble: 855 Move64(location, Location::DoubleStackSlot(stack_slot)); 856 break; 857 858 default: 859 LOG(FATAL) << "Unexpected type " << instruction->GetType(); 860 } 861 } else if (instruction->IsTemporary()) { 862 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 863 if (temp_location.IsStackSlot()) { 864 Move32(location, temp_location); 865 } else { 866 DCHECK(temp_location.IsDoubleStackSlot()); 867 Move64(location, temp_location); 868 } 869 } else { 870 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary()); 871 switch (instruction->GetType()) { 872 case Primitive::kPrimBoolean: 873 case Primitive::kPrimByte: 874 case Primitive::kPrimChar: 875 case Primitive::kPrimShort: 876 case Primitive::kPrimNot: 877 case Primitive::kPrimInt: 878 case Primitive::kPrimFloat: 879 Move32(location, locations->Out()); 880 break; 881 882 case Primitive::kPrimLong: 883 case Primitive::kPrimDouble: 884 Move64(location, locations->Out()); 885 break; 886 887 default: 888 LOG(FATAL) << "Unexpected type " << instruction->GetType(); 889 } 890 } 891} 892 893void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset, 894 HInstruction* instruction, 895 uint32_t dex_pc, 896 SlowPathCode* slow_path) { 897 __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset); 898 __ blx(LR); 899 RecordPcInfo(instruction, dex_pc, slow_path); 900 DCHECK(instruction->IsSuspendCheck() 901 || instruction->IsBoundsCheck() 902 || instruction->IsNullCheck() 903 || instruction->IsDivZeroCheck() 904 || instruction->GetLocations()->CanCall() 905 || !IsLeafMethod()); 906} 907 908void LocationsBuilderARM::VisitGoto(HGoto* got) { 909 got->SetLocations(nullptr); 910} 911 912void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) { 913 HBasicBlock* successor = got->GetSuccessor(); 914 DCHECK(!successor->IsExitBlock()); 915 916 HBasicBlock* block = got->GetBlock(); 917 HInstruction* previous = got->GetPrevious(); 918 919 HLoopInformation* info = block->GetLoopInformation(); 920 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) { 921 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck()); 922 GenerateSuspendCheck(info->GetSuspendCheck(), successor); 923 return; 924 } 925 926 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { 927 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); 928 } 929 if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) { 930 __ b(codegen_->GetLabelOf(successor)); 931 } 932} 933 934void LocationsBuilderARM::VisitExit(HExit* exit) { 935 exit->SetLocations(nullptr); 936} 937 938void InstructionCodeGeneratorARM::VisitExit(HExit* exit) { 939 UNUSED(exit); 940} 941 942void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction, 943 Label* true_target, 944 Label* false_target, 945 Label* always_true_target) { 946 HInstruction* cond = instruction->InputAt(0); 947 if (cond->IsIntConstant()) { 948 // Constant condition, statically compared against 1. 949 int32_t cond_value = cond->AsIntConstant()->GetValue(); 950 if (cond_value == 1) { 951 if (always_true_target != nullptr) { 952 __ b(always_true_target); 953 } 954 return; 955 } else { 956 DCHECK_EQ(cond_value, 0); 957 } 958 } else { 959 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 960 // Condition has been materialized, compare the output to 0 961 DCHECK(instruction->GetLocations()->InAt(0).IsRegister()); 962 __ cmp(instruction->GetLocations()->InAt(0).AsRegister<Register>(), 963 ShifterOperand(0)); 964 __ b(true_target, NE); 965 } else { 966 // Condition has not been materialized, use its inputs as the 967 // comparison and its condition as the branch condition. 968 LocationSummary* locations = cond->GetLocations(); 969 DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0); 970 Register left = locations->InAt(0).AsRegister<Register>(); 971 if (locations->InAt(1).IsRegister()) { 972 __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>())); 973 } else { 974 DCHECK(locations->InAt(1).IsConstant()); 975 HConstant* constant = locations->InAt(1).GetConstant(); 976 int32_t value = CodeGenerator::GetInt32ValueOf(constant); 977 ShifterOperand operand; 978 if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) { 979 __ cmp(left, operand); 980 } else { 981 Register temp = IP; 982 __ LoadImmediate(temp, value); 983 __ cmp(left, ShifterOperand(temp)); 984 } 985 } 986 __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition())); 987 } 988 } 989 if (false_target != nullptr) { 990 __ b(false_target); 991 } 992} 993 994void LocationsBuilderARM::VisitIf(HIf* if_instr) { 995 LocationSummary* locations = 996 new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall); 997 HInstruction* cond = if_instr->InputAt(0); 998 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 999 locations->SetInAt(0, Location::RequiresRegister()); 1000 } 1001} 1002 1003void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) { 1004 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor()); 1005 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); 1006 Label* always_true_target = true_target; 1007 if (codegen_->GoesToNextBlock(if_instr->GetBlock(), 1008 if_instr->IfTrueSuccessor())) { 1009 always_true_target = nullptr; 1010 } 1011 if (codegen_->GoesToNextBlock(if_instr->GetBlock(), 1012 if_instr->IfFalseSuccessor())) { 1013 false_target = nullptr; 1014 } 1015 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target); 1016} 1017 1018void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) { 1019 LocationSummary* locations = new (GetGraph()->GetArena()) 1020 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); 1021 HInstruction* cond = deoptimize->InputAt(0); 1022 DCHECK(cond->IsCondition()); 1023 if (cond->AsCondition()->NeedsMaterialization()) { 1024 locations->SetInAt(0, Location::RequiresRegister()); 1025 } 1026} 1027 1028void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) { 1029 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) 1030 DeoptimizationSlowPathARM(deoptimize); 1031 codegen_->AddSlowPath(slow_path); 1032 Label* slow_path_entry = slow_path->GetEntryLabel(); 1033 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry); 1034} 1035 1036void LocationsBuilderARM::VisitCondition(HCondition* comp) { 1037 LocationSummary* locations = 1038 new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall); 1039 locations->SetInAt(0, Location::RequiresRegister()); 1040 locations->SetInAt(1, Location::RegisterOrConstant(comp->InputAt(1))); 1041 if (comp->NeedsMaterialization()) { 1042 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1043 } 1044} 1045 1046void InstructionCodeGeneratorARM::VisitCondition(HCondition* comp) { 1047 if (!comp->NeedsMaterialization()) return; 1048 LocationSummary* locations = comp->GetLocations(); 1049 Register left = locations->InAt(0).AsRegister<Register>(); 1050 1051 if (locations->InAt(1).IsRegister()) { 1052 __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>())); 1053 } else { 1054 DCHECK(locations->InAt(1).IsConstant()); 1055 int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant()); 1056 ShifterOperand operand; 1057 if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) { 1058 __ cmp(left, operand); 1059 } else { 1060 Register temp = IP; 1061 __ LoadImmediate(temp, value); 1062 __ cmp(left, ShifterOperand(temp)); 1063 } 1064 } 1065 __ it(ARMCondition(comp->GetCondition()), kItElse); 1066 __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1), 1067 ARMCondition(comp->GetCondition())); 1068 __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0), 1069 ARMOppositeCondition(comp->GetCondition())); 1070} 1071 1072void LocationsBuilderARM::VisitEqual(HEqual* comp) { 1073 VisitCondition(comp); 1074} 1075 1076void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) { 1077 VisitCondition(comp); 1078} 1079 1080void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) { 1081 VisitCondition(comp); 1082} 1083 1084void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) { 1085 VisitCondition(comp); 1086} 1087 1088void LocationsBuilderARM::VisitLessThan(HLessThan* comp) { 1089 VisitCondition(comp); 1090} 1091 1092void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) { 1093 VisitCondition(comp); 1094} 1095 1096void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) { 1097 VisitCondition(comp); 1098} 1099 1100void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) { 1101 VisitCondition(comp); 1102} 1103 1104void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) { 1105 VisitCondition(comp); 1106} 1107 1108void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) { 1109 VisitCondition(comp); 1110} 1111 1112void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { 1113 VisitCondition(comp); 1114} 1115 1116void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { 1117 VisitCondition(comp); 1118} 1119 1120void LocationsBuilderARM::VisitLocal(HLocal* local) { 1121 local->SetLocations(nullptr); 1122} 1123 1124void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) { 1125 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock()); 1126} 1127 1128void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) { 1129 load->SetLocations(nullptr); 1130} 1131 1132void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) { 1133 // Nothing to do, this is driven by the code generator. 1134 UNUSED(load); 1135} 1136 1137void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) { 1138 LocationSummary* locations = 1139 new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall); 1140 switch (store->InputAt(1)->GetType()) { 1141 case Primitive::kPrimBoolean: 1142 case Primitive::kPrimByte: 1143 case Primitive::kPrimChar: 1144 case Primitive::kPrimShort: 1145 case Primitive::kPrimInt: 1146 case Primitive::kPrimNot: 1147 case Primitive::kPrimFloat: 1148 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal()))); 1149 break; 1150 1151 case Primitive::kPrimLong: 1152 case Primitive::kPrimDouble: 1153 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal()))); 1154 break; 1155 1156 default: 1157 LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType(); 1158 } 1159} 1160 1161void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) { 1162 UNUSED(store); 1163} 1164 1165void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) { 1166 LocationSummary* locations = 1167 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1168 locations->SetOut(Location::ConstantLocation(constant)); 1169} 1170 1171void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) { 1172 // Will be generated at use site. 1173 UNUSED(constant); 1174} 1175 1176void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) { 1177 LocationSummary* locations = 1178 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1179 locations->SetOut(Location::ConstantLocation(constant)); 1180} 1181 1182void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) { 1183 // Will be generated at use site. 1184 UNUSED(constant); 1185} 1186 1187void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) { 1188 LocationSummary* locations = 1189 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1190 locations->SetOut(Location::ConstantLocation(constant)); 1191} 1192 1193void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) { 1194 // Will be generated at use site. 1195 UNUSED(constant); 1196} 1197 1198void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) { 1199 LocationSummary* locations = 1200 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1201 locations->SetOut(Location::ConstantLocation(constant)); 1202} 1203 1204void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) { 1205 // Will be generated at use site. 1206 UNUSED(constant); 1207} 1208 1209void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) { 1210 LocationSummary* locations = 1211 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1212 locations->SetOut(Location::ConstantLocation(constant)); 1213} 1214 1215void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) { 1216 // Will be generated at use site. 1217 UNUSED(constant); 1218} 1219 1220void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { 1221 memory_barrier->SetLocations(nullptr); 1222} 1223 1224void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { 1225 GenerateMemoryBarrier(memory_barrier->GetBarrierKind()); 1226} 1227 1228void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) { 1229 ret->SetLocations(nullptr); 1230} 1231 1232void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) { 1233 UNUSED(ret); 1234 codegen_->GenerateFrameExit(); 1235} 1236 1237void LocationsBuilderARM::VisitReturn(HReturn* ret) { 1238 LocationSummary* locations = 1239 new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall); 1240 locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType())); 1241} 1242 1243void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) { 1244 UNUSED(ret); 1245 codegen_->GenerateFrameExit(); 1246} 1247 1248void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1249 // When we do not run baseline, explicit clinit checks triggered by static 1250 // invokes must have been pruned by art::PrepareForRegisterAllocation. 1251 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck()); 1252 1253 IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(), 1254 codegen_->GetInstructionSetFeatures()); 1255 if (intrinsic.TryDispatch(invoke)) { 1256 return; 1257 } 1258 1259 HandleInvoke(invoke); 1260} 1261 1262void CodeGeneratorARM::LoadCurrentMethod(Register reg) { 1263 DCHECK(RequiresCurrentMethod()); 1264 __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset); 1265} 1266 1267static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) { 1268 if (invoke->GetLocations()->Intrinsified()) { 1269 IntrinsicCodeGeneratorARM intrinsic(codegen); 1270 intrinsic.Dispatch(invoke); 1271 return true; 1272 } 1273 return false; 1274} 1275 1276void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1277 // When we do not run baseline, explicit clinit checks triggered by static 1278 // invokes must have been pruned by art::PrepareForRegisterAllocation. 1279 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck()); 1280 1281 if (TryGenerateIntrinsicCode(invoke, codegen_)) { 1282 return; 1283 } 1284 1285 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); 1286 1287 codegen_->GenerateStaticOrDirectCall(invoke, temp); 1288 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1289} 1290 1291void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) { 1292 LocationSummary* locations = 1293 new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); 1294 locations->AddTemp(Location::RegisterLocation(R0)); 1295 1296 InvokeDexCallingConventionVisitorARM calling_convention_visitor; 1297 for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) { 1298 HInstruction* input = invoke->InputAt(i); 1299 locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType())); 1300 } 1301 1302 locations->SetOut(calling_convention_visitor.GetReturnLocation(invoke->GetType())); 1303} 1304 1305void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1306 IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(), 1307 codegen_->GetInstructionSetFeatures()); 1308 if (intrinsic.TryDispatch(invoke)) { 1309 return; 1310 } 1311 1312 HandleInvoke(invoke); 1313} 1314 1315void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1316 if (TryGenerateIntrinsicCode(invoke, codegen_)) { 1317 return; 1318 } 1319 1320 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); 1321 uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + 1322 invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); 1323 LocationSummary* locations = invoke->GetLocations(); 1324 Location receiver = locations->InAt(0); 1325 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 1326 // temp = object->GetClass(); 1327 if (receiver.IsStackSlot()) { 1328 __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex()); 1329 __ LoadFromOffset(kLoadWord, temp, temp, class_offset); 1330 } else { 1331 __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); 1332 } 1333 codegen_->MaybeRecordImplicitNullCheck(invoke); 1334 // temp = temp->GetMethodAt(method_offset); 1335 uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 1336 kArmWordSize).Int32Value(); 1337 __ LoadFromOffset(kLoadWord, temp, temp, method_offset); 1338 // LR = temp->GetEntryPoint(); 1339 __ LoadFromOffset(kLoadWord, LR, temp, entry_point); 1340 // LR(); 1341 __ blx(LR); 1342 DCHECK(!codegen_->IsLeafMethod()); 1343 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1344} 1345 1346void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) { 1347 HandleInvoke(invoke); 1348 // Add the hidden argument. 1349 invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12)); 1350} 1351 1352void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) { 1353 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. 1354 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); 1355 uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + 1356 (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); 1357 LocationSummary* locations = invoke->GetLocations(); 1358 Location receiver = locations->InAt(0); 1359 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 1360 1361 // Set the hidden argument. 1362 __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(), 1363 invoke->GetDexMethodIndex()); 1364 1365 // temp = object->GetClass(); 1366 if (receiver.IsStackSlot()) { 1367 __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex()); 1368 __ LoadFromOffset(kLoadWord, temp, temp, class_offset); 1369 } else { 1370 __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); 1371 } 1372 codegen_->MaybeRecordImplicitNullCheck(invoke); 1373 // temp = temp->GetImtEntryAt(method_offset); 1374 uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 1375 kArmWordSize).Int32Value(); 1376 __ LoadFromOffset(kLoadWord, temp, temp, method_offset); 1377 // LR = temp->GetEntryPoint(); 1378 __ LoadFromOffset(kLoadWord, LR, temp, entry_point); 1379 // LR(); 1380 __ blx(LR); 1381 DCHECK(!codegen_->IsLeafMethod()); 1382 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1383} 1384 1385void LocationsBuilderARM::VisitNeg(HNeg* neg) { 1386 LocationSummary* locations = 1387 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); 1388 switch (neg->GetResultType()) { 1389 case Primitive::kPrimInt: { 1390 locations->SetInAt(0, Location::RequiresRegister()); 1391 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1392 break; 1393 } 1394 case Primitive::kPrimLong: { 1395 locations->SetInAt(0, Location::RequiresRegister()); 1396 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); 1397 break; 1398 } 1399 1400 case Primitive::kPrimFloat: 1401 case Primitive::kPrimDouble: 1402 locations->SetInAt(0, Location::RequiresFpuRegister()); 1403 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1404 break; 1405 1406 default: 1407 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 1408 } 1409} 1410 1411void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) { 1412 LocationSummary* locations = neg->GetLocations(); 1413 Location out = locations->Out(); 1414 Location in = locations->InAt(0); 1415 switch (neg->GetResultType()) { 1416 case Primitive::kPrimInt: 1417 DCHECK(in.IsRegister()); 1418 __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0)); 1419 break; 1420 1421 case Primitive::kPrimLong: 1422 DCHECK(in.IsRegisterPair()); 1423 // out.lo = 0 - in.lo (and update the carry/borrow (C) flag) 1424 __ rsbs(out.AsRegisterPairLow<Register>(), 1425 in.AsRegisterPairLow<Register>(), 1426 ShifterOperand(0)); 1427 // We cannot emit an RSC (Reverse Subtract with Carry) 1428 // instruction here, as it does not exist in the Thumb-2 1429 // instruction set. We use the following approach 1430 // using SBC and SUB instead. 1431 // 1432 // out.hi = -C 1433 __ sbc(out.AsRegisterPairHigh<Register>(), 1434 out.AsRegisterPairHigh<Register>(), 1435 ShifterOperand(out.AsRegisterPairHigh<Register>())); 1436 // out.hi = out.hi - in.hi 1437 __ sub(out.AsRegisterPairHigh<Register>(), 1438 out.AsRegisterPairHigh<Register>(), 1439 ShifterOperand(in.AsRegisterPairHigh<Register>())); 1440 break; 1441 1442 case Primitive::kPrimFloat: 1443 DCHECK(in.IsFpuRegister()); 1444 __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>()); 1445 break; 1446 1447 case Primitive::kPrimDouble: 1448 DCHECK(in.IsFpuRegisterPair()); 1449 __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 1450 FromLowSToD(in.AsFpuRegisterPairLow<SRegister>())); 1451 break; 1452 1453 default: 1454 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 1455 } 1456} 1457 1458void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) { 1459 Primitive::Type result_type = conversion->GetResultType(); 1460 Primitive::Type input_type = conversion->GetInputType(); 1461 DCHECK_NE(result_type, input_type); 1462 1463 // The float-to-long and double-to-long type conversions rely on a 1464 // call to the runtime. 1465 LocationSummary::CallKind call_kind = 1466 ((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble) 1467 && result_type == Primitive::kPrimLong) 1468 ? LocationSummary::kCall 1469 : LocationSummary::kNoCall; 1470 LocationSummary* locations = 1471 new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind); 1472 1473 // The Java language does not allow treating boolean as an integral type but 1474 // our bit representation makes it safe. 1475 1476 switch (result_type) { 1477 case Primitive::kPrimByte: 1478 switch (input_type) { 1479 case Primitive::kPrimBoolean: 1480 // Boolean input is a result of code transformations. 1481 case Primitive::kPrimShort: 1482 case Primitive::kPrimInt: 1483 case Primitive::kPrimChar: 1484 // Processing a Dex `int-to-byte' instruction. 1485 locations->SetInAt(0, Location::RequiresRegister()); 1486 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1487 break; 1488 1489 default: 1490 LOG(FATAL) << "Unexpected type conversion from " << input_type 1491 << " to " << result_type; 1492 } 1493 break; 1494 1495 case Primitive::kPrimShort: 1496 switch (input_type) { 1497 case Primitive::kPrimBoolean: 1498 // Boolean input is a result of code transformations. 1499 case Primitive::kPrimByte: 1500 case Primitive::kPrimInt: 1501 case Primitive::kPrimChar: 1502 // Processing a Dex `int-to-short' instruction. 1503 locations->SetInAt(0, Location::RequiresRegister()); 1504 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1505 break; 1506 1507 default: 1508 LOG(FATAL) << "Unexpected type conversion from " << input_type 1509 << " to " << result_type; 1510 } 1511 break; 1512 1513 case Primitive::kPrimInt: 1514 switch (input_type) { 1515 case Primitive::kPrimLong: 1516 // Processing a Dex `long-to-int' instruction. 1517 locations->SetInAt(0, Location::Any()); 1518 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1519 break; 1520 1521 case Primitive::kPrimFloat: 1522 // Processing a Dex `float-to-int' instruction. 1523 locations->SetInAt(0, Location::RequiresFpuRegister()); 1524 locations->SetOut(Location::RequiresRegister()); 1525 locations->AddTemp(Location::RequiresFpuRegister()); 1526 break; 1527 1528 case Primitive::kPrimDouble: 1529 // Processing a Dex `double-to-int' instruction. 1530 locations->SetInAt(0, Location::RequiresFpuRegister()); 1531 locations->SetOut(Location::RequiresRegister()); 1532 locations->AddTemp(Location::RequiresFpuRegister()); 1533 break; 1534 1535 default: 1536 LOG(FATAL) << "Unexpected type conversion from " << input_type 1537 << " to " << result_type; 1538 } 1539 break; 1540 1541 case Primitive::kPrimLong: 1542 switch (input_type) { 1543 case Primitive::kPrimBoolean: 1544 // Boolean input is a result of code transformations. 1545 case Primitive::kPrimByte: 1546 case Primitive::kPrimShort: 1547 case Primitive::kPrimInt: 1548 case Primitive::kPrimChar: 1549 // Processing a Dex `int-to-long' instruction. 1550 locations->SetInAt(0, Location::RequiresRegister()); 1551 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1552 break; 1553 1554 case Primitive::kPrimFloat: { 1555 // Processing a Dex `float-to-long' instruction. 1556 InvokeRuntimeCallingConvention calling_convention; 1557 locations->SetInAt(0, Location::FpuRegisterLocation( 1558 calling_convention.GetFpuRegisterAt(0))); 1559 locations->SetOut(Location::RegisterPairLocation(R0, R1)); 1560 break; 1561 } 1562 1563 case Primitive::kPrimDouble: { 1564 // Processing a Dex `double-to-long' instruction. 1565 InvokeRuntimeCallingConvention calling_convention; 1566 locations->SetInAt(0, Location::FpuRegisterPairLocation( 1567 calling_convention.GetFpuRegisterAt(0), 1568 calling_convention.GetFpuRegisterAt(1))); 1569 locations->SetOut(Location::RegisterPairLocation(R0, R1)); 1570 break; 1571 } 1572 1573 default: 1574 LOG(FATAL) << "Unexpected type conversion from " << input_type 1575 << " to " << result_type; 1576 } 1577 break; 1578 1579 case Primitive::kPrimChar: 1580 switch (input_type) { 1581 case Primitive::kPrimBoolean: 1582 // Boolean input is a result of code transformations. 1583 case Primitive::kPrimByte: 1584 case Primitive::kPrimShort: 1585 case Primitive::kPrimInt: 1586 // Processing a Dex `int-to-char' instruction. 1587 locations->SetInAt(0, Location::RequiresRegister()); 1588 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1589 break; 1590 1591 default: 1592 LOG(FATAL) << "Unexpected type conversion from " << input_type 1593 << " to " << result_type; 1594 } 1595 break; 1596 1597 case Primitive::kPrimFloat: 1598 switch (input_type) { 1599 case Primitive::kPrimBoolean: 1600 // Boolean input is a result of code transformations. 1601 case Primitive::kPrimByte: 1602 case Primitive::kPrimShort: 1603 case Primitive::kPrimInt: 1604 case Primitive::kPrimChar: 1605 // Processing a Dex `int-to-float' instruction. 1606 locations->SetInAt(0, Location::RequiresRegister()); 1607 locations->SetOut(Location::RequiresFpuRegister()); 1608 break; 1609 1610 case Primitive::kPrimLong: 1611 // Processing a Dex `long-to-float' instruction. 1612 locations->SetInAt(0, Location::RequiresRegister()); 1613 locations->SetOut(Location::RequiresFpuRegister()); 1614 locations->AddTemp(Location::RequiresRegister()); 1615 locations->AddTemp(Location::RequiresRegister()); 1616 locations->AddTemp(Location::RequiresFpuRegister()); 1617 locations->AddTemp(Location::RequiresFpuRegister()); 1618 break; 1619 1620 case Primitive::kPrimDouble: 1621 // Processing a Dex `double-to-float' instruction. 1622 locations->SetInAt(0, Location::RequiresFpuRegister()); 1623 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1624 break; 1625 1626 default: 1627 LOG(FATAL) << "Unexpected type conversion from " << input_type 1628 << " to " << result_type; 1629 }; 1630 break; 1631 1632 case Primitive::kPrimDouble: 1633 switch (input_type) { 1634 case Primitive::kPrimBoolean: 1635 // Boolean input is a result of code transformations. 1636 case Primitive::kPrimByte: 1637 case Primitive::kPrimShort: 1638 case Primitive::kPrimInt: 1639 case Primitive::kPrimChar: 1640 // Processing a Dex `int-to-double' instruction. 1641 locations->SetInAt(0, Location::RequiresRegister()); 1642 locations->SetOut(Location::RequiresFpuRegister()); 1643 break; 1644 1645 case Primitive::kPrimLong: 1646 // Processing a Dex `long-to-double' instruction. 1647 locations->SetInAt(0, Location::RequiresRegister()); 1648 locations->SetOut(Location::RequiresFpuRegister()); 1649 locations->AddTemp(Location::RequiresRegister()); 1650 locations->AddTemp(Location::RequiresRegister()); 1651 locations->AddTemp(Location::RequiresFpuRegister()); 1652 break; 1653 1654 case Primitive::kPrimFloat: 1655 // Processing a Dex `float-to-double' instruction. 1656 locations->SetInAt(0, Location::RequiresFpuRegister()); 1657 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1658 break; 1659 1660 default: 1661 LOG(FATAL) << "Unexpected type conversion from " << input_type 1662 << " to " << result_type; 1663 }; 1664 break; 1665 1666 default: 1667 LOG(FATAL) << "Unexpected type conversion from " << input_type 1668 << " to " << result_type; 1669 } 1670} 1671 1672void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) { 1673 LocationSummary* locations = conversion->GetLocations(); 1674 Location out = locations->Out(); 1675 Location in = locations->InAt(0); 1676 Primitive::Type result_type = conversion->GetResultType(); 1677 Primitive::Type input_type = conversion->GetInputType(); 1678 DCHECK_NE(result_type, input_type); 1679 switch (result_type) { 1680 case Primitive::kPrimByte: 1681 switch (input_type) { 1682 case Primitive::kPrimBoolean: 1683 // Boolean input is a result of code transformations. 1684 case Primitive::kPrimShort: 1685 case Primitive::kPrimInt: 1686 case Primitive::kPrimChar: 1687 // Processing a Dex `int-to-byte' instruction. 1688 __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8); 1689 break; 1690 1691 default: 1692 LOG(FATAL) << "Unexpected type conversion from " << input_type 1693 << " to " << result_type; 1694 } 1695 break; 1696 1697 case Primitive::kPrimShort: 1698 switch (input_type) { 1699 case Primitive::kPrimBoolean: 1700 // Boolean input is a result of code transformations. 1701 case Primitive::kPrimByte: 1702 case Primitive::kPrimInt: 1703 case Primitive::kPrimChar: 1704 // Processing a Dex `int-to-short' instruction. 1705 __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16); 1706 break; 1707 1708 default: 1709 LOG(FATAL) << "Unexpected type conversion from " << input_type 1710 << " to " << result_type; 1711 } 1712 break; 1713 1714 case Primitive::kPrimInt: 1715 switch (input_type) { 1716 case Primitive::kPrimLong: 1717 // Processing a Dex `long-to-int' instruction. 1718 DCHECK(out.IsRegister()); 1719 if (in.IsRegisterPair()) { 1720 __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>()); 1721 } else if (in.IsDoubleStackSlot()) { 1722 __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex()); 1723 } else { 1724 DCHECK(in.IsConstant()); 1725 DCHECK(in.GetConstant()->IsLongConstant()); 1726 int64_t value = in.GetConstant()->AsLongConstant()->GetValue(); 1727 __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value)); 1728 } 1729 break; 1730 1731 case Primitive::kPrimFloat: { 1732 // Processing a Dex `float-to-int' instruction. 1733 SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>(); 1734 __ vmovs(temp, in.AsFpuRegister<SRegister>()); 1735 __ vcvtis(temp, temp); 1736 __ vmovrs(out.AsRegister<Register>(), temp); 1737 break; 1738 } 1739 1740 case Primitive::kPrimDouble: { 1741 // Processing a Dex `double-to-int' instruction. 1742 SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>(); 1743 DRegister temp_d = FromLowSToD(temp_s); 1744 __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>())); 1745 __ vcvtid(temp_s, temp_d); 1746 __ vmovrs(out.AsRegister<Register>(), temp_s); 1747 break; 1748 } 1749 1750 default: 1751 LOG(FATAL) << "Unexpected type conversion from " << input_type 1752 << " to " << result_type; 1753 } 1754 break; 1755 1756 case Primitive::kPrimLong: 1757 switch (input_type) { 1758 case Primitive::kPrimBoolean: 1759 // Boolean input is a result of code transformations. 1760 case Primitive::kPrimByte: 1761 case Primitive::kPrimShort: 1762 case Primitive::kPrimInt: 1763 case Primitive::kPrimChar: 1764 // Processing a Dex `int-to-long' instruction. 1765 DCHECK(out.IsRegisterPair()); 1766 DCHECK(in.IsRegister()); 1767 __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>()); 1768 // Sign extension. 1769 __ Asr(out.AsRegisterPairHigh<Register>(), 1770 out.AsRegisterPairLow<Register>(), 1771 31); 1772 break; 1773 1774 case Primitive::kPrimFloat: 1775 // Processing a Dex `float-to-long' instruction. 1776 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l), 1777 conversion, 1778 conversion->GetDexPc(), 1779 nullptr); 1780 break; 1781 1782 case Primitive::kPrimDouble: 1783 // Processing a Dex `double-to-long' instruction. 1784 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l), 1785 conversion, 1786 conversion->GetDexPc(), 1787 nullptr); 1788 break; 1789 1790 default: 1791 LOG(FATAL) << "Unexpected type conversion from " << input_type 1792 << " to " << result_type; 1793 } 1794 break; 1795 1796 case Primitive::kPrimChar: 1797 switch (input_type) { 1798 case Primitive::kPrimBoolean: 1799 // Boolean input is a result of code transformations. 1800 case Primitive::kPrimByte: 1801 case Primitive::kPrimShort: 1802 case Primitive::kPrimInt: 1803 // Processing a Dex `int-to-char' instruction. 1804 __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16); 1805 break; 1806 1807 default: 1808 LOG(FATAL) << "Unexpected type conversion from " << input_type 1809 << " to " << result_type; 1810 } 1811 break; 1812 1813 case Primitive::kPrimFloat: 1814 switch (input_type) { 1815 case Primitive::kPrimBoolean: 1816 // Boolean input is a result of code transformations. 1817 case Primitive::kPrimByte: 1818 case Primitive::kPrimShort: 1819 case Primitive::kPrimInt: 1820 case Primitive::kPrimChar: { 1821 // Processing a Dex `int-to-float' instruction. 1822 __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>()); 1823 __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>()); 1824 break; 1825 } 1826 1827 case Primitive::kPrimLong: { 1828 // Processing a Dex `long-to-float' instruction. 1829 Register low = in.AsRegisterPairLow<Register>(); 1830 Register high = in.AsRegisterPairHigh<Register>(); 1831 SRegister output = out.AsFpuRegister<SRegister>(); 1832 Register constant_low = locations->GetTemp(0).AsRegister<Register>(); 1833 Register constant_high = locations->GetTemp(1).AsRegister<Register>(); 1834 SRegister temp1_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>(); 1835 DRegister temp1_d = FromLowSToD(temp1_s); 1836 SRegister temp2_s = locations->GetTemp(3).AsFpuRegisterPairLow<SRegister>(); 1837 DRegister temp2_d = FromLowSToD(temp2_s); 1838 1839 // Operations use doubles for precision reasons (each 32-bit 1840 // half of a long fits in the 53-bit mantissa of a double, 1841 // but not in the 24-bit mantissa of a float). This is 1842 // especially important for the low bits. The result is 1843 // eventually converted to float. 1844 1845 // temp1_d = int-to-double(high) 1846 __ vmovsr(temp1_s, high); 1847 __ vcvtdi(temp1_d, temp1_s); 1848 // Using vmovd to load the `k2Pow32EncodingForDouble` constant 1849 // as an immediate value into `temp2_d` does not work, as 1850 // this instruction only transfers 8 significant bits of its 1851 // immediate operand. Instead, use two 32-bit core 1852 // registers to load `k2Pow32EncodingForDouble` into 1853 // `temp2_d`. 1854 __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble)); 1855 __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble)); 1856 __ vmovdrr(temp2_d, constant_low, constant_high); 1857 // temp1_d = temp1_d * 2^32 1858 __ vmuld(temp1_d, temp1_d, temp2_d); 1859 // temp2_d = unsigned-to-double(low) 1860 __ vmovsr(temp2_s, low); 1861 __ vcvtdu(temp2_d, temp2_s); 1862 // temp1_d = temp1_d + temp2_d 1863 __ vaddd(temp1_d, temp1_d, temp2_d); 1864 // output = double-to-float(temp1_d); 1865 __ vcvtsd(output, temp1_d); 1866 break; 1867 } 1868 1869 case Primitive::kPrimDouble: 1870 // Processing a Dex `double-to-float' instruction. 1871 __ vcvtsd(out.AsFpuRegister<SRegister>(), 1872 FromLowSToD(in.AsFpuRegisterPairLow<SRegister>())); 1873 break; 1874 1875 default: 1876 LOG(FATAL) << "Unexpected type conversion from " << input_type 1877 << " to " << result_type; 1878 }; 1879 break; 1880 1881 case Primitive::kPrimDouble: 1882 switch (input_type) { 1883 case Primitive::kPrimBoolean: 1884 // Boolean input is a result of code transformations. 1885 case Primitive::kPrimByte: 1886 case Primitive::kPrimShort: 1887 case Primitive::kPrimInt: 1888 case Primitive::kPrimChar: { 1889 // Processing a Dex `int-to-double' instruction. 1890 __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>()); 1891 __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 1892 out.AsFpuRegisterPairLow<SRegister>()); 1893 break; 1894 } 1895 1896 case Primitive::kPrimLong: { 1897 // Processing a Dex `long-to-double' instruction. 1898 Register low = in.AsRegisterPairLow<Register>(); 1899 Register high = in.AsRegisterPairHigh<Register>(); 1900 SRegister out_s = out.AsFpuRegisterPairLow<SRegister>(); 1901 DRegister out_d = FromLowSToD(out_s); 1902 Register constant_low = locations->GetTemp(0).AsRegister<Register>(); 1903 Register constant_high = locations->GetTemp(1).AsRegister<Register>(); 1904 SRegister temp_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>(); 1905 DRegister temp_d = FromLowSToD(temp_s); 1906 1907 // out_d = int-to-double(high) 1908 __ vmovsr(out_s, high); 1909 __ vcvtdi(out_d, out_s); 1910 // Using vmovd to load the `k2Pow32EncodingForDouble` constant 1911 // as an immediate value into `temp_d` does not work, as 1912 // this instruction only transfers 8 significant bits of its 1913 // immediate operand. Instead, use two 32-bit core 1914 // registers to load `k2Pow32EncodingForDouble` into `temp_d`. 1915 __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble)); 1916 __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble)); 1917 __ vmovdrr(temp_d, constant_low, constant_high); 1918 // out_d = out_d * 2^32 1919 __ vmuld(out_d, out_d, temp_d); 1920 // temp_d = unsigned-to-double(low) 1921 __ vmovsr(temp_s, low); 1922 __ vcvtdu(temp_d, temp_s); 1923 // out_d = out_d + temp_d 1924 __ vaddd(out_d, out_d, temp_d); 1925 break; 1926 } 1927 1928 case Primitive::kPrimFloat: 1929 // Processing a Dex `float-to-double' instruction. 1930 __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 1931 in.AsFpuRegister<SRegister>()); 1932 break; 1933 1934 default: 1935 LOG(FATAL) << "Unexpected type conversion from " << input_type 1936 << " to " << result_type; 1937 }; 1938 break; 1939 1940 default: 1941 LOG(FATAL) << "Unexpected type conversion from " << input_type 1942 << " to " << result_type; 1943 } 1944} 1945 1946void LocationsBuilderARM::VisitAdd(HAdd* add) { 1947 LocationSummary* locations = 1948 new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); 1949 switch (add->GetResultType()) { 1950 case Primitive::kPrimInt: { 1951 locations->SetInAt(0, Location::RequiresRegister()); 1952 locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1))); 1953 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1954 break; 1955 } 1956 1957 case Primitive::kPrimLong: { 1958 locations->SetInAt(0, Location::RequiresRegister()); 1959 locations->SetInAt(1, Location::RequiresRegister()); 1960 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1961 break; 1962 } 1963 1964 case Primitive::kPrimFloat: 1965 case Primitive::kPrimDouble: { 1966 locations->SetInAt(0, Location::RequiresFpuRegister()); 1967 locations->SetInAt(1, Location::RequiresFpuRegister()); 1968 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1969 break; 1970 } 1971 1972 default: 1973 LOG(FATAL) << "Unexpected add type " << add->GetResultType(); 1974 } 1975} 1976 1977void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) { 1978 LocationSummary* locations = add->GetLocations(); 1979 Location out = locations->Out(); 1980 Location first = locations->InAt(0); 1981 Location second = locations->InAt(1); 1982 switch (add->GetResultType()) { 1983 case Primitive::kPrimInt: 1984 if (second.IsRegister()) { 1985 __ add(out.AsRegister<Register>(), 1986 first.AsRegister<Register>(), 1987 ShifterOperand(second.AsRegister<Register>())); 1988 } else { 1989 __ AddConstant(out.AsRegister<Register>(), 1990 first.AsRegister<Register>(), 1991 second.GetConstant()->AsIntConstant()->GetValue()); 1992 } 1993 break; 1994 1995 case Primitive::kPrimLong: { 1996 DCHECK(second.IsRegisterPair()); 1997 __ adds(out.AsRegisterPairLow<Register>(), 1998 first.AsRegisterPairLow<Register>(), 1999 ShifterOperand(second.AsRegisterPairLow<Register>())); 2000 __ adc(out.AsRegisterPairHigh<Register>(), 2001 first.AsRegisterPairHigh<Register>(), 2002 ShifterOperand(second.AsRegisterPairHigh<Register>())); 2003 break; 2004 } 2005 2006 case Primitive::kPrimFloat: 2007 __ vadds(out.AsFpuRegister<SRegister>(), 2008 first.AsFpuRegister<SRegister>(), 2009 second.AsFpuRegister<SRegister>()); 2010 break; 2011 2012 case Primitive::kPrimDouble: 2013 __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 2014 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()), 2015 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>())); 2016 break; 2017 2018 default: 2019 LOG(FATAL) << "Unexpected add type " << add->GetResultType(); 2020 } 2021} 2022 2023void LocationsBuilderARM::VisitSub(HSub* sub) { 2024 LocationSummary* locations = 2025 new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); 2026 switch (sub->GetResultType()) { 2027 case Primitive::kPrimInt: { 2028 locations->SetInAt(0, Location::RequiresRegister()); 2029 locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1))); 2030 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2031 break; 2032 } 2033 2034 case Primitive::kPrimLong: { 2035 locations->SetInAt(0, Location::RequiresRegister()); 2036 locations->SetInAt(1, Location::RequiresRegister()); 2037 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2038 break; 2039 } 2040 case Primitive::kPrimFloat: 2041 case Primitive::kPrimDouble: { 2042 locations->SetInAt(0, Location::RequiresFpuRegister()); 2043 locations->SetInAt(1, Location::RequiresFpuRegister()); 2044 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2045 break; 2046 } 2047 default: 2048 LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); 2049 } 2050} 2051 2052void InstructionCodeGeneratorARM::VisitSub(HSub* sub) { 2053 LocationSummary* locations = sub->GetLocations(); 2054 Location out = locations->Out(); 2055 Location first = locations->InAt(0); 2056 Location second = locations->InAt(1); 2057 switch (sub->GetResultType()) { 2058 case Primitive::kPrimInt: { 2059 if (second.IsRegister()) { 2060 __ sub(out.AsRegister<Register>(), 2061 first.AsRegister<Register>(), 2062 ShifterOperand(second.AsRegister<Register>())); 2063 } else { 2064 __ AddConstant(out.AsRegister<Register>(), 2065 first.AsRegister<Register>(), 2066 -second.GetConstant()->AsIntConstant()->GetValue()); 2067 } 2068 break; 2069 } 2070 2071 case Primitive::kPrimLong: { 2072 DCHECK(second.IsRegisterPair()); 2073 __ subs(out.AsRegisterPairLow<Register>(), 2074 first.AsRegisterPairLow<Register>(), 2075 ShifterOperand(second.AsRegisterPairLow<Register>())); 2076 __ sbc(out.AsRegisterPairHigh<Register>(), 2077 first.AsRegisterPairHigh<Register>(), 2078 ShifterOperand(second.AsRegisterPairHigh<Register>())); 2079 break; 2080 } 2081 2082 case Primitive::kPrimFloat: { 2083 __ vsubs(out.AsFpuRegister<SRegister>(), 2084 first.AsFpuRegister<SRegister>(), 2085 second.AsFpuRegister<SRegister>()); 2086 break; 2087 } 2088 2089 case Primitive::kPrimDouble: { 2090 __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 2091 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()), 2092 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>())); 2093 break; 2094 } 2095 2096 2097 default: 2098 LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); 2099 } 2100} 2101 2102void LocationsBuilderARM::VisitMul(HMul* mul) { 2103 LocationSummary* locations = 2104 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); 2105 switch (mul->GetResultType()) { 2106 case Primitive::kPrimInt: 2107 case Primitive::kPrimLong: { 2108 locations->SetInAt(0, Location::RequiresRegister()); 2109 locations->SetInAt(1, Location::RequiresRegister()); 2110 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2111 break; 2112 } 2113 2114 case Primitive::kPrimFloat: 2115 case Primitive::kPrimDouble: { 2116 locations->SetInAt(0, Location::RequiresFpuRegister()); 2117 locations->SetInAt(1, Location::RequiresFpuRegister()); 2118 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2119 break; 2120 } 2121 2122 default: 2123 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2124 } 2125} 2126 2127void InstructionCodeGeneratorARM::VisitMul(HMul* mul) { 2128 LocationSummary* locations = mul->GetLocations(); 2129 Location out = locations->Out(); 2130 Location first = locations->InAt(0); 2131 Location second = locations->InAt(1); 2132 switch (mul->GetResultType()) { 2133 case Primitive::kPrimInt: { 2134 __ mul(out.AsRegister<Register>(), 2135 first.AsRegister<Register>(), 2136 second.AsRegister<Register>()); 2137 break; 2138 } 2139 case Primitive::kPrimLong: { 2140 Register out_hi = out.AsRegisterPairHigh<Register>(); 2141 Register out_lo = out.AsRegisterPairLow<Register>(); 2142 Register in1_hi = first.AsRegisterPairHigh<Register>(); 2143 Register in1_lo = first.AsRegisterPairLow<Register>(); 2144 Register in2_hi = second.AsRegisterPairHigh<Register>(); 2145 Register in2_lo = second.AsRegisterPairLow<Register>(); 2146 2147 // Extra checks to protect caused by the existence of R1_R2. 2148 // The algorithm is wrong if out.hi is either in1.lo or in2.lo: 2149 // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2); 2150 DCHECK_NE(out_hi, in1_lo); 2151 DCHECK_NE(out_hi, in2_lo); 2152 2153 // input: in1 - 64 bits, in2 - 64 bits 2154 // output: out 2155 // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo 2156 // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32] 2157 // parts: out.lo = (in1.lo * in2.lo)[31:0] 2158 2159 // IP <- in1.lo * in2.hi 2160 __ mul(IP, in1_lo, in2_hi); 2161 // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo 2162 __ mla(out_hi, in1_hi, in2_lo, IP); 2163 // out.lo <- (in1.lo * in2.lo)[31:0]; 2164 __ umull(out_lo, IP, in1_lo, in2_lo); 2165 // out.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32] 2166 __ add(out_hi, out_hi, ShifterOperand(IP)); 2167 break; 2168 } 2169 2170 case Primitive::kPrimFloat: { 2171 __ vmuls(out.AsFpuRegister<SRegister>(), 2172 first.AsFpuRegister<SRegister>(), 2173 second.AsFpuRegister<SRegister>()); 2174 break; 2175 } 2176 2177 case Primitive::kPrimDouble: { 2178 __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 2179 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()), 2180 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>())); 2181 break; 2182 } 2183 2184 default: 2185 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2186 } 2187} 2188 2189void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) { 2190 DCHECK(instruction->IsDiv() || instruction->IsRem()); 2191 DCHECK(instruction->GetResultType() == Primitive::kPrimInt); 2192 2193 LocationSummary* locations = instruction->GetLocations(); 2194 Location second = locations->InAt(1); 2195 DCHECK(second.IsConstant()); 2196 2197 Register out = locations->Out().AsRegister<Register>(); 2198 Register dividend = locations->InAt(0).AsRegister<Register>(); 2199 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue(); 2200 DCHECK(imm == 1 || imm == -1); 2201 2202 if (instruction->IsRem()) { 2203 __ LoadImmediate(out, 0); 2204 } else { 2205 if (imm == 1) { 2206 __ Mov(out, dividend); 2207 } else { 2208 __ rsb(out, dividend, ShifterOperand(0)); 2209 } 2210 } 2211} 2212 2213void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) { 2214 DCHECK(instruction->IsDiv() || instruction->IsRem()); 2215 DCHECK(instruction->GetResultType() == Primitive::kPrimInt); 2216 2217 LocationSummary* locations = instruction->GetLocations(); 2218 Location second = locations->InAt(1); 2219 DCHECK(second.IsConstant()); 2220 2221 Register out = locations->Out().AsRegister<Register>(); 2222 Register dividend = locations->InAt(0).AsRegister<Register>(); 2223 Register temp = locations->GetTemp(0).AsRegister<Register>(); 2224 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue(); 2225 int32_t abs_imm = std::abs(imm); 2226 DCHECK(IsPowerOfTwo(abs_imm)); 2227 int ctz_imm = CTZ(abs_imm); 2228 2229 if (ctz_imm == 1) { 2230 __ Lsr(temp, dividend, 32 - ctz_imm); 2231 } else { 2232 __ Asr(temp, dividend, 31); 2233 __ Lsr(temp, temp, 32 - ctz_imm); 2234 } 2235 __ add(out, temp, ShifterOperand(dividend)); 2236 2237 if (instruction->IsDiv()) { 2238 __ Asr(out, out, ctz_imm); 2239 if (imm < 0) { 2240 __ rsb(out, out, ShifterOperand(0)); 2241 } 2242 } else { 2243 __ ubfx(out, out, 0, ctz_imm); 2244 __ sub(out, out, ShifterOperand(temp)); 2245 } 2246} 2247 2248void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) { 2249 DCHECK(instruction->IsDiv() || instruction->IsRem()); 2250 DCHECK(instruction->GetResultType() == Primitive::kPrimInt); 2251 2252 LocationSummary* locations = instruction->GetLocations(); 2253 Location second = locations->InAt(1); 2254 DCHECK(second.IsConstant()); 2255 2256 Register out = locations->Out().AsRegister<Register>(); 2257 Register dividend = locations->InAt(0).AsRegister<Register>(); 2258 Register temp1 = locations->GetTemp(0).AsRegister<Register>(); 2259 Register temp2 = locations->GetTemp(1).AsRegister<Register>(); 2260 int64_t imm = second.GetConstant()->AsIntConstant()->GetValue(); 2261 2262 int64_t magic; 2263 int shift; 2264 CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); 2265 2266 __ LoadImmediate(temp1, magic); 2267 __ smull(temp2, temp1, dividend, temp1); 2268 2269 if (imm > 0 && magic < 0) { 2270 __ add(temp1, temp1, ShifterOperand(dividend)); 2271 } else if (imm < 0 && magic > 0) { 2272 __ sub(temp1, temp1, ShifterOperand(dividend)); 2273 } 2274 2275 if (shift != 0) { 2276 __ Asr(temp1, temp1, shift); 2277 } 2278 2279 if (instruction->IsDiv()) { 2280 __ sub(out, temp1, ShifterOperand(temp1, ASR, 31)); 2281 } else { 2282 __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31)); 2283 // TODO: Strength reduction for mls. 2284 __ LoadImmediate(temp2, imm); 2285 __ mls(out, temp1, temp2, dividend); 2286 } 2287} 2288 2289void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) { 2290 DCHECK(instruction->IsDiv() || instruction->IsRem()); 2291 DCHECK(instruction->GetResultType() == Primitive::kPrimInt); 2292 2293 LocationSummary* locations = instruction->GetLocations(); 2294 Location second = locations->InAt(1); 2295 DCHECK(second.IsConstant()); 2296 2297 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue(); 2298 if (imm == 0) { 2299 // Do not generate anything. DivZeroCheck would prevent any code to be executed. 2300 } else if (imm == 1 || imm == -1) { 2301 DivRemOneOrMinusOne(instruction); 2302 } else if (IsPowerOfTwo(std::abs(imm))) { 2303 DivRemByPowerOfTwo(instruction); 2304 } else { 2305 DCHECK(imm <= -2 || imm >= 2); 2306 GenerateDivRemWithAnyConstant(instruction); 2307 } 2308} 2309 2310void LocationsBuilderARM::VisitDiv(HDiv* div) { 2311 LocationSummary::CallKind call_kind = LocationSummary::kNoCall; 2312 if (div->GetResultType() == Primitive::kPrimLong) { 2313 // pLdiv runtime call. 2314 call_kind = LocationSummary::kCall; 2315 } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) { 2316 // sdiv will be replaced by other instruction sequence. 2317 } else if (div->GetResultType() == Primitive::kPrimInt && 2318 !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2319 // pIdivmod runtime call. 2320 call_kind = LocationSummary::kCall; 2321 } 2322 2323 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind); 2324 2325 switch (div->GetResultType()) { 2326 case Primitive::kPrimInt: { 2327 if (div->InputAt(1)->IsConstant()) { 2328 locations->SetInAt(0, Location::RequiresRegister()); 2329 locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1))); 2330 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2331 int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue()); 2332 if (abs_imm <= 1) { 2333 // No temp register required. 2334 } else { 2335 locations->AddTemp(Location::RequiresRegister()); 2336 if (!IsPowerOfTwo(abs_imm)) { 2337 locations->AddTemp(Location::RequiresRegister()); 2338 } 2339 } 2340 } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2341 locations->SetInAt(0, Location::RequiresRegister()); 2342 locations->SetInAt(1, Location::RequiresRegister()); 2343 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2344 } else { 2345 InvokeRuntimeCallingConvention calling_convention; 2346 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2347 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2348 // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but 2349 // we only need the former. 2350 locations->SetOut(Location::RegisterLocation(R0)); 2351 } 2352 break; 2353 } 2354 case Primitive::kPrimLong: { 2355 InvokeRuntimeCallingConvention calling_convention; 2356 locations->SetInAt(0, Location::RegisterPairLocation( 2357 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); 2358 locations->SetInAt(1, Location::RegisterPairLocation( 2359 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3))); 2360 locations->SetOut(Location::RegisterPairLocation(R0, R1)); 2361 break; 2362 } 2363 case Primitive::kPrimFloat: 2364 case Primitive::kPrimDouble: { 2365 locations->SetInAt(0, Location::RequiresFpuRegister()); 2366 locations->SetInAt(1, Location::RequiresFpuRegister()); 2367 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2368 break; 2369 } 2370 2371 default: 2372 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 2373 } 2374} 2375 2376void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) { 2377 LocationSummary* locations = div->GetLocations(); 2378 Location out = locations->Out(); 2379 Location first = locations->InAt(0); 2380 Location second = locations->InAt(1); 2381 2382 switch (div->GetResultType()) { 2383 case Primitive::kPrimInt: { 2384 if (second.IsConstant()) { 2385 GenerateDivRemConstantIntegral(div); 2386 } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2387 __ sdiv(out.AsRegister<Register>(), 2388 first.AsRegister<Register>(), 2389 second.AsRegister<Register>()); 2390 } else { 2391 InvokeRuntimeCallingConvention calling_convention; 2392 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>()); 2393 DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>()); 2394 DCHECK_EQ(R0, out.AsRegister<Register>()); 2395 2396 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr); 2397 } 2398 break; 2399 } 2400 2401 case Primitive::kPrimLong: { 2402 InvokeRuntimeCallingConvention calling_convention; 2403 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>()); 2404 DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>()); 2405 DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>()); 2406 DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>()); 2407 DCHECK_EQ(R0, out.AsRegisterPairLow<Register>()); 2408 DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>()); 2409 2410 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr); 2411 break; 2412 } 2413 2414 case Primitive::kPrimFloat: { 2415 __ vdivs(out.AsFpuRegister<SRegister>(), 2416 first.AsFpuRegister<SRegister>(), 2417 second.AsFpuRegister<SRegister>()); 2418 break; 2419 } 2420 2421 case Primitive::kPrimDouble: { 2422 __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 2423 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()), 2424 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>())); 2425 break; 2426 } 2427 2428 default: 2429 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 2430 } 2431} 2432 2433void LocationsBuilderARM::VisitRem(HRem* rem) { 2434 Primitive::Type type = rem->GetResultType(); 2435 2436 // Most remainders are implemented in the runtime. 2437 LocationSummary::CallKind call_kind = LocationSummary::kCall; 2438 if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) { 2439 // sdiv will be replaced by other instruction sequence. 2440 call_kind = LocationSummary::kNoCall; 2441 } else if ((rem->GetResultType() == Primitive::kPrimInt) 2442 && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2443 // Have hardware divide instruction for int, do it with three instructions. 2444 call_kind = LocationSummary::kNoCall; 2445 } 2446 2447 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); 2448 2449 switch (type) { 2450 case Primitive::kPrimInt: { 2451 if (rem->InputAt(1)->IsConstant()) { 2452 locations->SetInAt(0, Location::RequiresRegister()); 2453 locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1))); 2454 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2455 int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue()); 2456 if (abs_imm <= 1) { 2457 // No temp register required. 2458 } else { 2459 locations->AddTemp(Location::RequiresRegister()); 2460 if (!IsPowerOfTwo(abs_imm)) { 2461 locations->AddTemp(Location::RequiresRegister()); 2462 } 2463 } 2464 } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2465 locations->SetInAt(0, Location::RequiresRegister()); 2466 locations->SetInAt(1, Location::RequiresRegister()); 2467 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2468 locations->AddTemp(Location::RequiresRegister()); 2469 } else { 2470 InvokeRuntimeCallingConvention calling_convention; 2471 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2472 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2473 // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but 2474 // we only need the latter. 2475 locations->SetOut(Location::RegisterLocation(R1)); 2476 } 2477 break; 2478 } 2479 case Primitive::kPrimLong: { 2480 InvokeRuntimeCallingConvention calling_convention; 2481 locations->SetInAt(0, Location::RegisterPairLocation( 2482 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); 2483 locations->SetInAt(1, Location::RegisterPairLocation( 2484 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3))); 2485 // The runtime helper puts the output in R2,R3. 2486 locations->SetOut(Location::RegisterPairLocation(R2, R3)); 2487 break; 2488 } 2489 case Primitive::kPrimFloat: { 2490 InvokeRuntimeCallingConvention calling_convention; 2491 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); 2492 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1))); 2493 locations->SetOut(Location::FpuRegisterLocation(S0)); 2494 break; 2495 } 2496 2497 case Primitive::kPrimDouble: { 2498 InvokeRuntimeCallingConvention calling_convention; 2499 locations->SetInAt(0, Location::FpuRegisterPairLocation( 2500 calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1))); 2501 locations->SetInAt(1, Location::FpuRegisterPairLocation( 2502 calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3))); 2503 locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1)); 2504 break; 2505 } 2506 2507 default: 2508 LOG(FATAL) << "Unexpected rem type " << type; 2509 } 2510} 2511 2512void InstructionCodeGeneratorARM::VisitRem(HRem* rem) { 2513 LocationSummary* locations = rem->GetLocations(); 2514 Location out = locations->Out(); 2515 Location first = locations->InAt(0); 2516 Location second = locations->InAt(1); 2517 2518 Primitive::Type type = rem->GetResultType(); 2519 switch (type) { 2520 case Primitive::kPrimInt: { 2521 if (second.IsConstant()) { 2522 GenerateDivRemConstantIntegral(rem); 2523 } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2524 Register reg1 = first.AsRegister<Register>(); 2525 Register reg2 = second.AsRegister<Register>(); 2526 Register temp = locations->GetTemp(0).AsRegister<Register>(); 2527 2528 // temp = reg1 / reg2 (integer division) 2529 // temp = temp * reg2 2530 // dest = reg1 - temp 2531 __ sdiv(temp, reg1, reg2); 2532 __ mul(temp, temp, reg2); 2533 __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp)); 2534 } else { 2535 InvokeRuntimeCallingConvention calling_convention; 2536 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>()); 2537 DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>()); 2538 DCHECK_EQ(R1, out.AsRegister<Register>()); 2539 2540 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr); 2541 } 2542 break; 2543 } 2544 2545 case Primitive::kPrimLong: { 2546 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr); 2547 break; 2548 } 2549 2550 case Primitive::kPrimFloat: { 2551 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr); 2552 break; 2553 } 2554 2555 case Primitive::kPrimDouble: { 2556 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr); 2557 break; 2558 } 2559 2560 default: 2561 LOG(FATAL) << "Unexpected rem type " << type; 2562 } 2563} 2564 2565void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) { 2566 LocationSummary* locations = 2567 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2568 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); 2569 if (instruction->HasUses()) { 2570 locations->SetOut(Location::SameAsFirstInput()); 2571 } 2572} 2573 2574void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) { 2575 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction); 2576 codegen_->AddSlowPath(slow_path); 2577 2578 LocationSummary* locations = instruction->GetLocations(); 2579 Location value = locations->InAt(0); 2580 2581 switch (instruction->GetType()) { 2582 case Primitive::kPrimInt: { 2583 if (value.IsRegister()) { 2584 __ cmp(value.AsRegister<Register>(), ShifterOperand(0)); 2585 __ b(slow_path->GetEntryLabel(), EQ); 2586 } else { 2587 DCHECK(value.IsConstant()) << value; 2588 if (value.GetConstant()->AsIntConstant()->GetValue() == 0) { 2589 __ b(slow_path->GetEntryLabel()); 2590 } 2591 } 2592 break; 2593 } 2594 case Primitive::kPrimLong: { 2595 if (value.IsRegisterPair()) { 2596 __ orrs(IP, 2597 value.AsRegisterPairLow<Register>(), 2598 ShifterOperand(value.AsRegisterPairHigh<Register>())); 2599 __ b(slow_path->GetEntryLabel(), EQ); 2600 } else { 2601 DCHECK(value.IsConstant()) << value; 2602 if (value.GetConstant()->AsLongConstant()->GetValue() == 0) { 2603 __ b(slow_path->GetEntryLabel()); 2604 } 2605 } 2606 break; 2607 default: 2608 LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType(); 2609 } 2610 } 2611} 2612 2613void LocationsBuilderARM::HandleShift(HBinaryOperation* op) { 2614 DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); 2615 2616 LocationSummary* locations = 2617 new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall); 2618 2619 switch (op->GetResultType()) { 2620 case Primitive::kPrimInt: { 2621 locations->SetInAt(0, Location::RequiresRegister()); 2622 locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1))); 2623 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2624 break; 2625 } 2626 case Primitive::kPrimLong: { 2627 locations->SetInAt(0, Location::RequiresRegister()); 2628 locations->SetInAt(1, Location::RequiresRegister()); 2629 locations->AddTemp(Location::RequiresRegister()); 2630 locations->SetOut(Location::RequiresRegister()); 2631 break; 2632 } 2633 default: 2634 LOG(FATAL) << "Unexpected operation type " << op->GetResultType(); 2635 } 2636} 2637 2638void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) { 2639 DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); 2640 2641 LocationSummary* locations = op->GetLocations(); 2642 Location out = locations->Out(); 2643 Location first = locations->InAt(0); 2644 Location second = locations->InAt(1); 2645 2646 Primitive::Type type = op->GetResultType(); 2647 switch (type) { 2648 case Primitive::kPrimInt: { 2649 Register out_reg = out.AsRegister<Register>(); 2650 Register first_reg = first.AsRegister<Register>(); 2651 // Arm doesn't mask the shift count so we need to do it ourselves. 2652 if (second.IsRegister()) { 2653 Register second_reg = second.AsRegister<Register>(); 2654 __ and_(second_reg, second_reg, ShifterOperand(kMaxIntShiftValue)); 2655 if (op->IsShl()) { 2656 __ Lsl(out_reg, first_reg, second_reg); 2657 } else if (op->IsShr()) { 2658 __ Asr(out_reg, first_reg, second_reg); 2659 } else { 2660 __ Lsr(out_reg, first_reg, second_reg); 2661 } 2662 } else { 2663 int32_t cst = second.GetConstant()->AsIntConstant()->GetValue(); 2664 uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue); 2665 if (shift_value == 0) { // arm does not support shifting with 0 immediate. 2666 __ Mov(out_reg, first_reg); 2667 } else if (op->IsShl()) { 2668 __ Lsl(out_reg, first_reg, shift_value); 2669 } else if (op->IsShr()) { 2670 __ Asr(out_reg, first_reg, shift_value); 2671 } else { 2672 __ Lsr(out_reg, first_reg, shift_value); 2673 } 2674 } 2675 break; 2676 } 2677 case Primitive::kPrimLong: { 2678 Register o_h = out.AsRegisterPairHigh<Register>(); 2679 Register o_l = out.AsRegisterPairLow<Register>(); 2680 2681 Register temp = locations->GetTemp(0).AsRegister<Register>(); 2682 2683 Register high = first.AsRegisterPairHigh<Register>(); 2684 Register low = first.AsRegisterPairLow<Register>(); 2685 2686 Register second_reg = second.AsRegister<Register>(); 2687 2688 if (op->IsShl()) { 2689 // Shift the high part 2690 __ and_(second_reg, second_reg, ShifterOperand(63)); 2691 __ Lsl(o_h, high, second_reg); 2692 // Shift the low part and `or` what overflew on the high part 2693 __ rsb(temp, second_reg, ShifterOperand(32)); 2694 __ Lsr(temp, low, temp); 2695 __ orr(o_h, o_h, ShifterOperand(temp)); 2696 // If the shift is > 32 bits, override the high part 2697 __ subs(temp, second_reg, ShifterOperand(32)); 2698 __ it(PL); 2699 __ Lsl(o_h, low, temp, false, PL); 2700 // Shift the low part 2701 __ Lsl(o_l, low, second_reg); 2702 } else if (op->IsShr()) { 2703 // Shift the low part 2704 __ and_(second_reg, second_reg, ShifterOperand(63)); 2705 __ Lsr(o_l, low, second_reg); 2706 // Shift the high part and `or` what underflew on the low part 2707 __ rsb(temp, second_reg, ShifterOperand(32)); 2708 __ Lsl(temp, high, temp); 2709 __ orr(o_l, o_l, ShifterOperand(temp)); 2710 // If the shift is > 32 bits, override the low part 2711 __ subs(temp, second_reg, ShifterOperand(32)); 2712 __ it(PL); 2713 __ Asr(o_l, high, temp, false, PL); 2714 // Shift the high part 2715 __ Asr(o_h, high, second_reg); 2716 } else { 2717 // same as Shr except we use `Lsr`s and not `Asr`s 2718 __ and_(second_reg, second_reg, ShifterOperand(63)); 2719 __ Lsr(o_l, low, second_reg); 2720 __ rsb(temp, second_reg, ShifterOperand(32)); 2721 __ Lsl(temp, high, temp); 2722 __ orr(o_l, o_l, ShifterOperand(temp)); 2723 __ subs(temp, second_reg, ShifterOperand(32)); 2724 __ it(PL); 2725 __ Lsr(o_l, high, temp, false, PL); 2726 __ Lsr(o_h, high, second_reg); 2727 } 2728 break; 2729 } 2730 default: 2731 LOG(FATAL) << "Unexpected operation type " << type; 2732 } 2733} 2734 2735void LocationsBuilderARM::VisitShl(HShl* shl) { 2736 HandleShift(shl); 2737} 2738 2739void InstructionCodeGeneratorARM::VisitShl(HShl* shl) { 2740 HandleShift(shl); 2741} 2742 2743void LocationsBuilderARM::VisitShr(HShr* shr) { 2744 HandleShift(shr); 2745} 2746 2747void InstructionCodeGeneratorARM::VisitShr(HShr* shr) { 2748 HandleShift(shr); 2749} 2750 2751void LocationsBuilderARM::VisitUShr(HUShr* ushr) { 2752 HandleShift(ushr); 2753} 2754 2755void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) { 2756 HandleShift(ushr); 2757} 2758 2759void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) { 2760 LocationSummary* locations = 2761 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2762 InvokeRuntimeCallingConvention calling_convention; 2763 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2764 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2765 locations->SetOut(Location::RegisterLocation(R0)); 2766} 2767 2768void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) { 2769 InvokeRuntimeCallingConvention calling_convention; 2770 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); 2771 __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex()); 2772 codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(), 2773 instruction, 2774 instruction->GetDexPc(), 2775 nullptr); 2776} 2777 2778void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) { 2779 LocationSummary* locations = 2780 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2781 InvokeRuntimeCallingConvention calling_convention; 2782 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2783 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 2784 locations->SetOut(Location::RegisterLocation(R0)); 2785 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2786} 2787 2788void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) { 2789 InvokeRuntimeCallingConvention calling_convention; 2790 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2)); 2791 __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex()); 2792 codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(), 2793 instruction, 2794 instruction->GetDexPc(), 2795 nullptr); 2796} 2797 2798void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) { 2799 LocationSummary* locations = 2800 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2801 Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); 2802 if (location.IsStackSlot()) { 2803 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2804 } else if (location.IsDoubleStackSlot()) { 2805 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2806 } 2807 locations->SetOut(location); 2808} 2809 2810void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) { 2811 // Nothing to do, the parameter is already at its location. 2812 UNUSED(instruction); 2813} 2814 2815void LocationsBuilderARM::VisitNot(HNot* not_) { 2816 LocationSummary* locations = 2817 new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall); 2818 locations->SetInAt(0, Location::RequiresRegister()); 2819 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2820} 2821 2822void InstructionCodeGeneratorARM::VisitNot(HNot* not_) { 2823 LocationSummary* locations = not_->GetLocations(); 2824 Location out = locations->Out(); 2825 Location in = locations->InAt(0); 2826 switch (not_->GetResultType()) { 2827 case Primitive::kPrimInt: 2828 __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>())); 2829 break; 2830 2831 case Primitive::kPrimLong: 2832 __ mvn(out.AsRegisterPairLow<Register>(), 2833 ShifterOperand(in.AsRegisterPairLow<Register>())); 2834 __ mvn(out.AsRegisterPairHigh<Register>(), 2835 ShifterOperand(in.AsRegisterPairHigh<Register>())); 2836 break; 2837 2838 default: 2839 LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType(); 2840 } 2841} 2842 2843void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) { 2844 LocationSummary* locations = 2845 new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall); 2846 locations->SetInAt(0, Location::RequiresRegister()); 2847 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2848} 2849 2850void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) { 2851 LocationSummary* locations = bool_not->GetLocations(); 2852 Location out = locations->Out(); 2853 Location in = locations->InAt(0); 2854 __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1)); 2855} 2856 2857void LocationsBuilderARM::VisitCompare(HCompare* compare) { 2858 LocationSummary* locations = 2859 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); 2860 switch (compare->InputAt(0)->GetType()) { 2861 case Primitive::kPrimLong: { 2862 locations->SetInAt(0, Location::RequiresRegister()); 2863 locations->SetInAt(1, Location::RequiresRegister()); 2864 // Output overlaps because it is written before doing the low comparison. 2865 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); 2866 break; 2867 } 2868 case Primitive::kPrimFloat: 2869 case Primitive::kPrimDouble: { 2870 locations->SetInAt(0, Location::RequiresFpuRegister()); 2871 locations->SetInAt(1, Location::RequiresFpuRegister()); 2872 locations->SetOut(Location::RequiresRegister()); 2873 break; 2874 } 2875 default: 2876 LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType(); 2877 } 2878} 2879 2880void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) { 2881 LocationSummary* locations = compare->GetLocations(); 2882 Register out = locations->Out().AsRegister<Register>(); 2883 Location left = locations->InAt(0); 2884 Location right = locations->InAt(1); 2885 2886 Label less, greater, done; 2887 Primitive::Type type = compare->InputAt(0)->GetType(); 2888 switch (type) { 2889 case Primitive::kPrimLong: { 2890 __ cmp(left.AsRegisterPairHigh<Register>(), 2891 ShifterOperand(right.AsRegisterPairHigh<Register>())); // Signed compare. 2892 __ b(&less, LT); 2893 __ b(&greater, GT); 2894 // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags. 2895 __ LoadImmediate(out, 0); 2896 __ cmp(left.AsRegisterPairLow<Register>(), 2897 ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare. 2898 break; 2899 } 2900 case Primitive::kPrimFloat: 2901 case Primitive::kPrimDouble: { 2902 __ LoadImmediate(out, 0); 2903 if (type == Primitive::kPrimFloat) { 2904 __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>()); 2905 } else { 2906 __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()), 2907 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>())); 2908 } 2909 __ vmstat(); // transfer FP status register to ARM APSR. 2910 __ b(compare->IsGtBias() ? &greater : &less, VS); // VS for unordered. 2911 break; 2912 } 2913 default: 2914 LOG(FATAL) << "Unexpected compare type " << type; 2915 } 2916 __ b(&done, EQ); 2917 __ b(&less, CC); // CC is for both: unsigned compare for longs and 'less than' for floats. 2918 2919 __ Bind(&greater); 2920 __ LoadImmediate(out, 1); 2921 __ b(&done); 2922 2923 __ Bind(&less); 2924 __ LoadImmediate(out, -1); 2925 2926 __ Bind(&done); 2927} 2928 2929void LocationsBuilderARM::VisitPhi(HPhi* instruction) { 2930 LocationSummary* locations = 2931 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2932 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 2933 locations->SetInAt(i, Location::Any()); 2934 } 2935 locations->SetOut(Location::Any()); 2936} 2937 2938void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) { 2939 UNUSED(instruction); 2940 LOG(FATAL) << "Unreachable"; 2941} 2942 2943void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) { 2944 // TODO (ported from quick): revisit Arm barrier kinds 2945 DmbOptions flavour = DmbOptions::ISH; // quiet c++ warnings 2946 switch (kind) { 2947 case MemBarrierKind::kAnyStore: 2948 case MemBarrierKind::kLoadAny: 2949 case MemBarrierKind::kAnyAny: { 2950 flavour = DmbOptions::ISH; 2951 break; 2952 } 2953 case MemBarrierKind::kStoreStore: { 2954 flavour = DmbOptions::ISHST; 2955 break; 2956 } 2957 default: 2958 LOG(FATAL) << "Unexpected memory barrier " << kind; 2959 } 2960 __ dmb(flavour); 2961} 2962 2963void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr, 2964 uint32_t offset, 2965 Register out_lo, 2966 Register out_hi) { 2967 if (offset != 0) { 2968 __ LoadImmediate(out_lo, offset); 2969 __ add(IP, addr, ShifterOperand(out_lo)); 2970 addr = IP; 2971 } 2972 __ ldrexd(out_lo, out_hi, addr); 2973} 2974 2975void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr, 2976 uint32_t offset, 2977 Register value_lo, 2978 Register value_hi, 2979 Register temp1, 2980 Register temp2, 2981 HInstruction* instruction) { 2982 Label fail; 2983 if (offset != 0) { 2984 __ LoadImmediate(temp1, offset); 2985 __ add(IP, addr, ShifterOperand(temp1)); 2986 addr = IP; 2987 } 2988 __ Bind(&fail); 2989 // We need a load followed by store. (The address used in a STREX instruction must 2990 // be the same as the address in the most recently executed LDREX instruction.) 2991 __ ldrexd(temp1, temp2, addr); 2992 codegen_->MaybeRecordImplicitNullCheck(instruction); 2993 __ strexd(temp1, value_lo, value_hi, addr); 2994 __ cmp(temp1, ShifterOperand(0)); 2995 __ b(&fail, NE); 2996} 2997 2998void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) { 2999 DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); 3000 3001 LocationSummary* locations = 3002 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3003 locations->SetInAt(0, Location::RequiresRegister()); 3004 3005 Primitive::Type field_type = field_info.GetFieldType(); 3006 if (Primitive::IsFloatingPointType(field_type)) { 3007 locations->SetInAt(1, Location::RequiresFpuRegister()); 3008 } else { 3009 locations->SetInAt(1, Location::RequiresRegister()); 3010 } 3011 3012 bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble; 3013 bool generate_volatile = field_info.IsVolatile() 3014 && is_wide 3015 && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); 3016 // Temporary registers for the write barrier. 3017 // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark. 3018 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 3019 locations->AddTemp(Location::RequiresRegister()); 3020 locations->AddTemp(Location::RequiresRegister()); 3021 } else if (generate_volatile) { 3022 // Arm encoding have some additional constraints for ldrexd/strexd: 3023 // - registers need to be consecutive 3024 // - the first register should be even but not R14. 3025 // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever 3026 // enable Arm encoding. 3027 DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet()); 3028 3029 locations->AddTemp(Location::RequiresRegister()); 3030 locations->AddTemp(Location::RequiresRegister()); 3031 if (field_type == Primitive::kPrimDouble) { 3032 // For doubles we need two more registers to copy the value. 3033 locations->AddTemp(Location::RegisterLocation(R2)); 3034 locations->AddTemp(Location::RegisterLocation(R3)); 3035 } 3036 } 3037} 3038 3039void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction, 3040 const FieldInfo& field_info) { 3041 DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); 3042 3043 LocationSummary* locations = instruction->GetLocations(); 3044 Register base = locations->InAt(0).AsRegister<Register>(); 3045 Location value = locations->InAt(1); 3046 3047 bool is_volatile = field_info.IsVolatile(); 3048 bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); 3049 Primitive::Type field_type = field_info.GetFieldType(); 3050 uint32_t offset = field_info.GetFieldOffset().Uint32Value(); 3051 3052 if (is_volatile) { 3053 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 3054 } 3055 3056 switch (field_type) { 3057 case Primitive::kPrimBoolean: 3058 case Primitive::kPrimByte: { 3059 __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset); 3060 break; 3061 } 3062 3063 case Primitive::kPrimShort: 3064 case Primitive::kPrimChar: { 3065 __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset); 3066 break; 3067 } 3068 3069 case Primitive::kPrimInt: 3070 case Primitive::kPrimNot: { 3071 __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset); 3072 break; 3073 } 3074 3075 case Primitive::kPrimLong: { 3076 if (is_volatile && !atomic_ldrd_strd) { 3077 GenerateWideAtomicStore(base, offset, 3078 value.AsRegisterPairLow<Register>(), 3079 value.AsRegisterPairHigh<Register>(), 3080 locations->GetTemp(0).AsRegister<Register>(), 3081 locations->GetTemp(1).AsRegister<Register>(), 3082 instruction); 3083 } else { 3084 __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset); 3085 codegen_->MaybeRecordImplicitNullCheck(instruction); 3086 } 3087 break; 3088 } 3089 3090 case Primitive::kPrimFloat: { 3091 __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset); 3092 break; 3093 } 3094 3095 case Primitive::kPrimDouble: { 3096 DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()); 3097 if (is_volatile && !atomic_ldrd_strd) { 3098 Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>(); 3099 Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>(); 3100 3101 __ vmovrrd(value_reg_lo, value_reg_hi, value_reg); 3102 3103 GenerateWideAtomicStore(base, offset, 3104 value_reg_lo, 3105 value_reg_hi, 3106 locations->GetTemp(2).AsRegister<Register>(), 3107 locations->GetTemp(3).AsRegister<Register>(), 3108 instruction); 3109 } else { 3110 __ StoreDToOffset(value_reg, base, offset); 3111 codegen_->MaybeRecordImplicitNullCheck(instruction); 3112 } 3113 break; 3114 } 3115 3116 case Primitive::kPrimVoid: 3117 LOG(FATAL) << "Unreachable type " << field_type; 3118 UNREACHABLE(); 3119 } 3120 3121 // Longs and doubles are handled in the switch. 3122 if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) { 3123 codegen_->MaybeRecordImplicitNullCheck(instruction); 3124 } 3125 3126 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 3127 Register temp = locations->GetTemp(0).AsRegister<Register>(); 3128 Register card = locations->GetTemp(1).AsRegister<Register>(); 3129 codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>()); 3130 } 3131 3132 if (is_volatile) { 3133 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 3134 } 3135} 3136 3137void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) { 3138 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); 3139 LocationSummary* locations = 3140 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3141 locations->SetInAt(0, Location::RequiresRegister()); 3142 3143 bool volatile_for_double = field_info.IsVolatile() 3144 && (field_info.GetFieldType() == Primitive::kPrimDouble) 3145 && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); 3146 bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong); 3147 3148 if (Primitive::IsFloatingPointType(instruction->GetType())) { 3149 locations->SetOut(Location::RequiresFpuRegister()); 3150 } else { 3151 locations->SetOut(Location::RequiresRegister(), 3152 (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap)); 3153 } 3154 if (volatile_for_double) { 3155 // Arm encoding have some additional constraints for ldrexd/strexd: 3156 // - registers need to be consecutive 3157 // - the first register should be even but not R14. 3158 // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever 3159 // enable Arm encoding. 3160 DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet()); 3161 locations->AddTemp(Location::RequiresRegister()); 3162 locations->AddTemp(Location::RequiresRegister()); 3163 } 3164} 3165 3166void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction, 3167 const FieldInfo& field_info) { 3168 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); 3169 3170 LocationSummary* locations = instruction->GetLocations(); 3171 Register base = locations->InAt(0).AsRegister<Register>(); 3172 Location out = locations->Out(); 3173 bool is_volatile = field_info.IsVolatile(); 3174 bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); 3175 Primitive::Type field_type = field_info.GetFieldType(); 3176 uint32_t offset = field_info.GetFieldOffset().Uint32Value(); 3177 3178 switch (field_type) { 3179 case Primitive::kPrimBoolean: { 3180 __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset); 3181 break; 3182 } 3183 3184 case Primitive::kPrimByte: { 3185 __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset); 3186 break; 3187 } 3188 3189 case Primitive::kPrimShort: { 3190 __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset); 3191 break; 3192 } 3193 3194 case Primitive::kPrimChar: { 3195 __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset); 3196 break; 3197 } 3198 3199 case Primitive::kPrimInt: 3200 case Primitive::kPrimNot: { 3201 __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset); 3202 break; 3203 } 3204 3205 case Primitive::kPrimLong: { 3206 if (is_volatile && !atomic_ldrd_strd) { 3207 GenerateWideAtomicLoad(base, offset, 3208 out.AsRegisterPairLow<Register>(), 3209 out.AsRegisterPairHigh<Register>()); 3210 } else { 3211 __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset); 3212 } 3213 break; 3214 } 3215 3216 case Primitive::kPrimFloat: { 3217 __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset); 3218 break; 3219 } 3220 3221 case Primitive::kPrimDouble: { 3222 DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()); 3223 if (is_volatile && !atomic_ldrd_strd) { 3224 Register lo = locations->GetTemp(0).AsRegister<Register>(); 3225 Register hi = locations->GetTemp(1).AsRegister<Register>(); 3226 GenerateWideAtomicLoad(base, offset, lo, hi); 3227 codegen_->MaybeRecordImplicitNullCheck(instruction); 3228 __ vmovdrr(out_reg, lo, hi); 3229 } else { 3230 __ LoadDFromOffset(out_reg, base, offset); 3231 codegen_->MaybeRecordImplicitNullCheck(instruction); 3232 } 3233 break; 3234 } 3235 3236 case Primitive::kPrimVoid: 3237 LOG(FATAL) << "Unreachable type " << field_type; 3238 UNREACHABLE(); 3239 } 3240 3241 // Doubles are handled in the switch. 3242 if (field_type != Primitive::kPrimDouble) { 3243 codegen_->MaybeRecordImplicitNullCheck(instruction); 3244 } 3245 3246 if (is_volatile) { 3247 GenerateMemoryBarrier(MemBarrierKind::kLoadAny); 3248 } 3249} 3250 3251void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 3252 HandleFieldSet(instruction, instruction->GetFieldInfo()); 3253} 3254 3255void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 3256 HandleFieldSet(instruction, instruction->GetFieldInfo()); 3257} 3258 3259void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 3260 HandleFieldGet(instruction, instruction->GetFieldInfo()); 3261} 3262 3263void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 3264 HandleFieldGet(instruction, instruction->GetFieldInfo()); 3265} 3266 3267void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) { 3268 HandleFieldGet(instruction, instruction->GetFieldInfo()); 3269} 3270 3271void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) { 3272 HandleFieldGet(instruction, instruction->GetFieldInfo()); 3273} 3274 3275void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) { 3276 HandleFieldSet(instruction, instruction->GetFieldInfo()); 3277} 3278 3279void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) { 3280 HandleFieldSet(instruction, instruction->GetFieldInfo()); 3281} 3282 3283void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) { 3284 LocationSummary* locations = 3285 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3286 locations->SetInAt(0, Location::RequiresRegister()); 3287 if (instruction->HasUses()) { 3288 locations->SetOut(Location::SameAsFirstInput()); 3289 } 3290} 3291 3292void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) { 3293 if (codegen_->CanMoveNullCheckToUser(instruction)) { 3294 return; 3295 } 3296 Location obj = instruction->GetLocations()->InAt(0); 3297 3298 __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0); 3299 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 3300} 3301 3302void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) { 3303 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction); 3304 codegen_->AddSlowPath(slow_path); 3305 3306 LocationSummary* locations = instruction->GetLocations(); 3307 Location obj = locations->InAt(0); 3308 3309 __ cmp(obj.AsRegister<Register>(), ShifterOperand(0)); 3310 __ b(slow_path->GetEntryLabel(), EQ); 3311} 3312 3313void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) { 3314 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) { 3315 GenerateImplicitNullCheck(instruction); 3316 } else { 3317 GenerateExplicitNullCheck(instruction); 3318 } 3319} 3320 3321void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) { 3322 LocationSummary* locations = 3323 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3324 locations->SetInAt(0, Location::RequiresRegister()); 3325 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 3326 if (Primitive::IsFloatingPointType(instruction->GetType())) { 3327 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 3328 } else { 3329 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 3330 } 3331} 3332 3333void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { 3334 LocationSummary* locations = instruction->GetLocations(); 3335 Register obj = locations->InAt(0).AsRegister<Register>(); 3336 Location index = locations->InAt(1); 3337 3338 switch (instruction->GetType()) { 3339 case Primitive::kPrimBoolean: { 3340 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); 3341 Register out = locations->Out().AsRegister<Register>(); 3342 if (index.IsConstant()) { 3343 size_t offset = 3344 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; 3345 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset); 3346 } else { 3347 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>())); 3348 __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset); 3349 } 3350 break; 3351 } 3352 3353 case Primitive::kPrimByte: { 3354 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value(); 3355 Register out = locations->Out().AsRegister<Register>(); 3356 if (index.IsConstant()) { 3357 size_t offset = 3358 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; 3359 __ LoadFromOffset(kLoadSignedByte, out, obj, offset); 3360 } else { 3361 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>())); 3362 __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset); 3363 } 3364 break; 3365 } 3366 3367 case Primitive::kPrimShort: { 3368 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value(); 3369 Register out = locations->Out().AsRegister<Register>(); 3370 if (index.IsConstant()) { 3371 size_t offset = 3372 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; 3373 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset); 3374 } else { 3375 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2)); 3376 __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset); 3377 } 3378 break; 3379 } 3380 3381 case Primitive::kPrimChar: { 3382 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); 3383 Register out = locations->Out().AsRegister<Register>(); 3384 if (index.IsConstant()) { 3385 size_t offset = 3386 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; 3387 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset); 3388 } else { 3389 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2)); 3390 __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset); 3391 } 3392 break; 3393 } 3394 3395 case Primitive::kPrimInt: 3396 case Primitive::kPrimNot: { 3397 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t)); 3398 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); 3399 Register out = locations->Out().AsRegister<Register>(); 3400 if (index.IsConstant()) { 3401 size_t offset = 3402 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; 3403 __ LoadFromOffset(kLoadWord, out, obj, offset); 3404 } else { 3405 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); 3406 __ LoadFromOffset(kLoadWord, out, IP, data_offset); 3407 } 3408 break; 3409 } 3410 3411 case Primitive::kPrimLong: { 3412 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); 3413 Location out = locations->Out(); 3414 if (index.IsConstant()) { 3415 size_t offset = 3416 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3417 __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset); 3418 } else { 3419 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); 3420 __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset); 3421 } 3422 break; 3423 } 3424 3425 case Primitive::kPrimFloat: { 3426 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); 3427 Location out = locations->Out(); 3428 DCHECK(out.IsFpuRegister()); 3429 if (index.IsConstant()) { 3430 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; 3431 __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset); 3432 } else { 3433 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); 3434 __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset); 3435 } 3436 break; 3437 } 3438 3439 case Primitive::kPrimDouble: { 3440 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); 3441 Location out = locations->Out(); 3442 DCHECK(out.IsFpuRegisterPair()); 3443 if (index.IsConstant()) { 3444 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3445 __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset); 3446 } else { 3447 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); 3448 __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset); 3449 } 3450 break; 3451 } 3452 3453 case Primitive::kPrimVoid: 3454 LOG(FATAL) << "Unreachable type " << instruction->GetType(); 3455 UNREACHABLE(); 3456 } 3457 codegen_->MaybeRecordImplicitNullCheck(instruction); 3458} 3459 3460void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) { 3461 Primitive::Type value_type = instruction->GetComponentType(); 3462 3463 bool needs_write_barrier = 3464 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); 3465 bool needs_runtime_call = instruction->NeedsTypeCheck(); 3466 3467 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 3468 instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall); 3469 if (needs_runtime_call) { 3470 InvokeRuntimeCallingConvention calling_convention; 3471 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 3472 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 3473 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 3474 } else { 3475 locations->SetInAt(0, Location::RequiresRegister()); 3476 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 3477 if (Primitive::IsFloatingPointType(value_type)) { 3478 locations->SetInAt(2, Location::RequiresFpuRegister()); 3479 } else { 3480 locations->SetInAt(2, Location::RequiresRegister()); 3481 } 3482 3483 if (needs_write_barrier) { 3484 // Temporary registers for the write barrier. 3485 locations->AddTemp(Location::RequiresRegister()); 3486 locations->AddTemp(Location::RequiresRegister()); 3487 } 3488 } 3489} 3490 3491void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { 3492 LocationSummary* locations = instruction->GetLocations(); 3493 Register obj = locations->InAt(0).AsRegister<Register>(); 3494 Location index = locations->InAt(1); 3495 Primitive::Type value_type = instruction->GetComponentType(); 3496 bool needs_runtime_call = locations->WillCall(); 3497 bool needs_write_barrier = 3498 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); 3499 3500 switch (value_type) { 3501 case Primitive::kPrimBoolean: 3502 case Primitive::kPrimByte: { 3503 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); 3504 Register value = locations->InAt(2).AsRegister<Register>(); 3505 if (index.IsConstant()) { 3506 size_t offset = 3507 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; 3508 __ StoreToOffset(kStoreByte, value, obj, offset); 3509 } else { 3510 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>())); 3511 __ StoreToOffset(kStoreByte, value, IP, data_offset); 3512 } 3513 break; 3514 } 3515 3516 case Primitive::kPrimShort: 3517 case Primitive::kPrimChar: { 3518 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); 3519 Register value = locations->InAt(2).AsRegister<Register>(); 3520 if (index.IsConstant()) { 3521 size_t offset = 3522 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; 3523 __ StoreToOffset(kStoreHalfword, value, obj, offset); 3524 } else { 3525 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2)); 3526 __ StoreToOffset(kStoreHalfword, value, IP, data_offset); 3527 } 3528 break; 3529 } 3530 3531 case Primitive::kPrimInt: 3532 case Primitive::kPrimNot: { 3533 if (!needs_runtime_call) { 3534 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); 3535 Register value = locations->InAt(2).AsRegister<Register>(); 3536 if (index.IsConstant()) { 3537 size_t offset = 3538 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; 3539 __ StoreToOffset(kStoreWord, value, obj, offset); 3540 } else { 3541 DCHECK(index.IsRegister()) << index; 3542 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); 3543 __ StoreToOffset(kStoreWord, value, IP, data_offset); 3544 } 3545 codegen_->MaybeRecordImplicitNullCheck(instruction); 3546 if (needs_write_barrier) { 3547 DCHECK_EQ(value_type, Primitive::kPrimNot); 3548 Register temp = locations->GetTemp(0).AsRegister<Register>(); 3549 Register card = locations->GetTemp(1).AsRegister<Register>(); 3550 codegen_->MarkGCCard(temp, card, obj, value); 3551 } 3552 } else { 3553 DCHECK_EQ(value_type, Primitive::kPrimNot); 3554 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), 3555 instruction, 3556 instruction->GetDexPc(), 3557 nullptr); 3558 } 3559 break; 3560 } 3561 3562 case Primitive::kPrimLong: { 3563 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); 3564 Location value = locations->InAt(2); 3565 if (index.IsConstant()) { 3566 size_t offset = 3567 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3568 __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset); 3569 } else { 3570 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); 3571 __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset); 3572 } 3573 break; 3574 } 3575 3576 case Primitive::kPrimFloat: { 3577 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); 3578 Location value = locations->InAt(2); 3579 DCHECK(value.IsFpuRegister()); 3580 if (index.IsConstant()) { 3581 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; 3582 __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset); 3583 } else { 3584 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); 3585 __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset); 3586 } 3587 break; 3588 } 3589 3590 case Primitive::kPrimDouble: { 3591 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); 3592 Location value = locations->InAt(2); 3593 DCHECK(value.IsFpuRegisterPair()); 3594 if (index.IsConstant()) { 3595 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3596 __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset); 3597 } else { 3598 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); 3599 __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset); 3600 } 3601 3602 break; 3603 } 3604 3605 case Primitive::kPrimVoid: 3606 LOG(FATAL) << "Unreachable type " << value_type; 3607 UNREACHABLE(); 3608 } 3609 3610 // Ints and objects are handled in the switch. 3611 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) { 3612 codegen_->MaybeRecordImplicitNullCheck(instruction); 3613 } 3614} 3615 3616void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) { 3617 LocationSummary* locations = 3618 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3619 locations->SetInAt(0, Location::RequiresRegister()); 3620 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 3621} 3622 3623void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) { 3624 LocationSummary* locations = instruction->GetLocations(); 3625 uint32_t offset = mirror::Array::LengthOffset().Uint32Value(); 3626 Register obj = locations->InAt(0).AsRegister<Register>(); 3627 Register out = locations->Out().AsRegister<Register>(); 3628 __ LoadFromOffset(kLoadWord, out, obj, offset); 3629 codegen_->MaybeRecordImplicitNullCheck(instruction); 3630} 3631 3632void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) { 3633 LocationSummary* locations = 3634 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3635 locations->SetInAt(0, Location::RequiresRegister()); 3636 locations->SetInAt(1, Location::RequiresRegister()); 3637 if (instruction->HasUses()) { 3638 locations->SetOut(Location::SameAsFirstInput()); 3639 } 3640} 3641 3642void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) { 3643 LocationSummary* locations = instruction->GetLocations(); 3644 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM( 3645 instruction, locations->InAt(0), locations->InAt(1)); 3646 codegen_->AddSlowPath(slow_path); 3647 3648 Register index = locations->InAt(0).AsRegister<Register>(); 3649 Register length = locations->InAt(1).AsRegister<Register>(); 3650 3651 __ cmp(index, ShifterOperand(length)); 3652 __ b(slow_path->GetEntryLabel(), CS); 3653} 3654 3655void CodeGeneratorARM::MarkGCCard(Register temp, Register card, Register object, Register value) { 3656 Label is_null; 3657 __ CompareAndBranchIfZero(value, &is_null); 3658 __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value()); 3659 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); 3660 __ strb(card, Address(card, temp)); 3661 __ Bind(&is_null); 3662} 3663 3664void LocationsBuilderARM::VisitTemporary(HTemporary* temp) { 3665 temp->SetLocations(nullptr); 3666} 3667 3668void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) { 3669 // Nothing to do, this is driven by the code generator. 3670 UNUSED(temp); 3671} 3672 3673void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) { 3674 UNUSED(instruction); 3675 LOG(FATAL) << "Unreachable"; 3676} 3677 3678void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) { 3679 codegen_->GetMoveResolver()->EmitNativeCode(instruction); 3680} 3681 3682void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) { 3683 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); 3684} 3685 3686void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) { 3687 HBasicBlock* block = instruction->GetBlock(); 3688 if (block->GetLoopInformation() != nullptr) { 3689 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); 3690 // The back edge will generate the suspend check. 3691 return; 3692 } 3693 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { 3694 // The goto will generate the suspend check. 3695 return; 3696 } 3697 GenerateSuspendCheck(instruction, nullptr); 3698} 3699 3700void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction, 3701 HBasicBlock* successor) { 3702 SuspendCheckSlowPathARM* slow_path = 3703 down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath()); 3704 if (slow_path == nullptr) { 3705 slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor); 3706 instruction->SetSlowPath(slow_path); 3707 codegen_->AddSlowPath(slow_path); 3708 if (successor != nullptr) { 3709 DCHECK(successor->IsLoopHeader()); 3710 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction); 3711 } 3712 } else { 3713 DCHECK_EQ(slow_path->GetSuccessor(), successor); 3714 } 3715 3716 __ LoadFromOffset( 3717 kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value()); 3718 __ cmp(IP, ShifterOperand(0)); 3719 // TODO: Figure out the branch offsets and use cbz/cbnz. 3720 if (successor == nullptr) { 3721 __ b(slow_path->GetEntryLabel(), NE); 3722 __ Bind(slow_path->GetReturnLabel()); 3723 } else { 3724 __ b(codegen_->GetLabelOf(successor), EQ); 3725 __ b(slow_path->GetEntryLabel()); 3726 } 3727} 3728 3729ArmAssembler* ParallelMoveResolverARM::GetAssembler() const { 3730 return codegen_->GetAssembler(); 3731} 3732 3733void ParallelMoveResolverARM::EmitMove(size_t index) { 3734 MoveOperands* move = moves_.Get(index); 3735 Location source = move->GetSource(); 3736 Location destination = move->GetDestination(); 3737 3738 if (source.IsRegister()) { 3739 if (destination.IsRegister()) { 3740 __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>()); 3741 } else { 3742 DCHECK(destination.IsStackSlot()); 3743 __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), 3744 SP, destination.GetStackIndex()); 3745 } 3746 } else if (source.IsStackSlot()) { 3747 if (destination.IsRegister()) { 3748 __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), 3749 SP, source.GetStackIndex()); 3750 } else if (destination.IsFpuRegister()) { 3751 __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex()); 3752 } else { 3753 DCHECK(destination.IsStackSlot()); 3754 __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex()); 3755 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3756 } 3757 } else if (source.IsFpuRegister()) { 3758 if (destination.IsFpuRegister()) { 3759 __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>()); 3760 } else { 3761 DCHECK(destination.IsStackSlot()); 3762 __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex()); 3763 } 3764 } else if (source.IsDoubleStackSlot()) { 3765 if (destination.IsDoubleStackSlot()) { 3766 __ LoadDFromOffset(DTMP, SP, source.GetStackIndex()); 3767 __ StoreDToOffset(DTMP, SP, destination.GetStackIndex()); 3768 } else if (destination.IsRegisterPair()) { 3769 DCHECK(ExpectedPairLayout(destination)); 3770 __ LoadFromOffset( 3771 kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex()); 3772 } else { 3773 DCHECK(destination.IsFpuRegisterPair()) << destination; 3774 __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), 3775 SP, 3776 source.GetStackIndex()); 3777 } 3778 } else if (source.IsRegisterPair()) { 3779 if (destination.IsRegisterPair()) { 3780 __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>()); 3781 __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>()); 3782 } else { 3783 DCHECK(destination.IsDoubleStackSlot()) << destination; 3784 DCHECK(ExpectedPairLayout(source)); 3785 __ StoreToOffset( 3786 kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex()); 3787 } 3788 } else if (source.IsFpuRegisterPair()) { 3789 if (destination.IsFpuRegisterPair()) { 3790 __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), 3791 FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())); 3792 } else { 3793 DCHECK(destination.IsDoubleStackSlot()) << destination; 3794 __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()), 3795 SP, 3796 destination.GetStackIndex()); 3797 } 3798 } else { 3799 DCHECK(source.IsConstant()) << source; 3800 HConstant* constant = source.GetConstant(); 3801 if (constant->IsIntConstant() || constant->IsNullConstant()) { 3802 int32_t value = CodeGenerator::GetInt32ValueOf(constant); 3803 if (destination.IsRegister()) { 3804 __ LoadImmediate(destination.AsRegister<Register>(), value); 3805 } else { 3806 DCHECK(destination.IsStackSlot()); 3807 __ LoadImmediate(IP, value); 3808 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3809 } 3810 } else if (constant->IsLongConstant()) { 3811 int64_t value = constant->AsLongConstant()->GetValue(); 3812 if (destination.IsRegisterPair()) { 3813 __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value)); 3814 __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value)); 3815 } else { 3816 DCHECK(destination.IsDoubleStackSlot()) << destination; 3817 __ LoadImmediate(IP, Low32Bits(value)); 3818 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3819 __ LoadImmediate(IP, High32Bits(value)); 3820 __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize)); 3821 } 3822 } else if (constant->IsDoubleConstant()) { 3823 double value = constant->AsDoubleConstant()->GetValue(); 3824 if (destination.IsFpuRegisterPair()) { 3825 __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value); 3826 } else { 3827 DCHECK(destination.IsDoubleStackSlot()) << destination; 3828 uint64_t int_value = bit_cast<uint64_t, double>(value); 3829 __ LoadImmediate(IP, Low32Bits(int_value)); 3830 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3831 __ LoadImmediate(IP, High32Bits(int_value)); 3832 __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize)); 3833 } 3834 } else { 3835 DCHECK(constant->IsFloatConstant()) << constant->DebugName(); 3836 float value = constant->AsFloatConstant()->GetValue(); 3837 if (destination.IsFpuRegister()) { 3838 __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value); 3839 } else { 3840 DCHECK(destination.IsStackSlot()); 3841 __ LoadImmediate(IP, bit_cast<int32_t, float>(value)); 3842 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3843 } 3844 } 3845 } 3846} 3847 3848void ParallelMoveResolverARM::Exchange(Register reg, int mem) { 3849 __ Mov(IP, reg); 3850 __ LoadFromOffset(kLoadWord, reg, SP, mem); 3851 __ StoreToOffset(kStoreWord, IP, SP, mem); 3852} 3853 3854void ParallelMoveResolverARM::Exchange(int mem1, int mem2) { 3855 ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters()); 3856 int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0; 3857 __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()), 3858 SP, mem1 + stack_offset); 3859 __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset); 3860 __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()), 3861 SP, mem2 + stack_offset); 3862 __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset); 3863} 3864 3865void ParallelMoveResolverARM::EmitSwap(size_t index) { 3866 MoveOperands* move = moves_.Get(index); 3867 Location source = move->GetSource(); 3868 Location destination = move->GetDestination(); 3869 3870 if (source.IsRegister() && destination.IsRegister()) { 3871 DCHECK_NE(source.AsRegister<Register>(), IP); 3872 DCHECK_NE(destination.AsRegister<Register>(), IP); 3873 __ Mov(IP, source.AsRegister<Register>()); 3874 __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>()); 3875 __ Mov(destination.AsRegister<Register>(), IP); 3876 } else if (source.IsRegister() && destination.IsStackSlot()) { 3877 Exchange(source.AsRegister<Register>(), destination.GetStackIndex()); 3878 } else if (source.IsStackSlot() && destination.IsRegister()) { 3879 Exchange(destination.AsRegister<Register>(), source.GetStackIndex()); 3880 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 3881 Exchange(source.GetStackIndex(), destination.GetStackIndex()); 3882 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { 3883 __ vmovrs(IP, source.AsFpuRegister<SRegister>()); 3884 __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>()); 3885 __ vmovsr(destination.AsFpuRegister<SRegister>(), IP); 3886 } else if (source.IsRegisterPair() && destination.IsRegisterPair()) { 3887 __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>()); 3888 __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>()); 3889 __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>()); 3890 __ vmovrrd(destination.AsRegisterPairLow<Register>(), 3891 destination.AsRegisterPairHigh<Register>(), 3892 DTMP); 3893 } else if (source.IsRegisterPair() || destination.IsRegisterPair()) { 3894 Register low_reg = source.IsRegisterPair() 3895 ? source.AsRegisterPairLow<Register>() 3896 : destination.AsRegisterPairLow<Register>(); 3897 int mem = source.IsRegisterPair() 3898 ? destination.GetStackIndex() 3899 : source.GetStackIndex(); 3900 DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination)); 3901 __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1)); 3902 __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem); 3903 __ StoreDToOffset(DTMP, SP, mem); 3904 } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) { 3905 DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()); 3906 DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()); 3907 __ vmovd(DTMP, first); 3908 __ vmovd(first, second); 3909 __ vmovd(second, DTMP); 3910 } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) { 3911 DRegister reg = source.IsFpuRegisterPair() 3912 ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()) 3913 : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()); 3914 int mem = source.IsFpuRegisterPair() 3915 ? destination.GetStackIndex() 3916 : source.GetStackIndex(); 3917 __ vmovd(DTMP, reg); 3918 __ LoadDFromOffset(reg, SP, mem); 3919 __ StoreDToOffset(DTMP, SP, mem); 3920 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { 3921 SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>() 3922 : destination.AsFpuRegister<SRegister>(); 3923 int mem = source.IsFpuRegister() 3924 ? destination.GetStackIndex() 3925 : source.GetStackIndex(); 3926 3927 __ vmovrs(IP, reg); 3928 __ LoadSFromOffset(reg, SP, mem); 3929 __ StoreToOffset(kStoreWord, IP, SP, mem); 3930 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { 3931 Exchange(source.GetStackIndex(), destination.GetStackIndex()); 3932 Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize)); 3933 } else { 3934 LOG(FATAL) << "Unimplemented" << source << " <-> " << destination; 3935 } 3936} 3937 3938void ParallelMoveResolverARM::SpillScratch(int reg) { 3939 __ Push(static_cast<Register>(reg)); 3940} 3941 3942void ParallelMoveResolverARM::RestoreScratch(int reg) { 3943 __ Pop(static_cast<Register>(reg)); 3944} 3945 3946void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) { 3947 LocationSummary::CallKind call_kind = cls->CanCallRuntime() 3948 ? LocationSummary::kCallOnSlowPath 3949 : LocationSummary::kNoCall; 3950 LocationSummary* locations = 3951 new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); 3952 locations->SetOut(Location::RequiresRegister()); 3953} 3954 3955void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { 3956 Register out = cls->GetLocations()->Out().AsRegister<Register>(); 3957 if (cls->IsReferrersClass()) { 3958 DCHECK(!cls->CanCallRuntime()); 3959 DCHECK(!cls->MustGenerateClinitCheck()); 3960 codegen_->LoadCurrentMethod(out); 3961 __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()); 3962 } else { 3963 DCHECK(cls->CanCallRuntime()); 3964 codegen_->LoadCurrentMethod(out); 3965 __ LoadFromOffset( 3966 kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()); 3967 __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())); 3968 3969 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( 3970 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); 3971 codegen_->AddSlowPath(slow_path); 3972 __ cmp(out, ShifterOperand(0)); 3973 __ b(slow_path->GetEntryLabel(), EQ); 3974 if (cls->MustGenerateClinitCheck()) { 3975 GenerateClassInitializationCheck(slow_path, out); 3976 } else { 3977 __ Bind(slow_path->GetExitLabel()); 3978 } 3979 } 3980} 3981 3982void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) { 3983 LocationSummary* locations = 3984 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); 3985 locations->SetInAt(0, Location::RequiresRegister()); 3986 if (check->HasUses()) { 3987 locations->SetOut(Location::SameAsFirstInput()); 3988 } 3989} 3990 3991void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) { 3992 // We assume the class is not null. 3993 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( 3994 check->GetLoadClass(), check, check->GetDexPc(), true); 3995 codegen_->AddSlowPath(slow_path); 3996 GenerateClassInitializationCheck(slow_path, 3997 check->GetLocations()->InAt(0).AsRegister<Register>()); 3998} 3999 4000void InstructionCodeGeneratorARM::GenerateClassInitializationCheck( 4001 SlowPathCodeARM* slow_path, Register class_reg) { 4002 __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value()); 4003 __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized)); 4004 __ b(slow_path->GetEntryLabel(), LT); 4005 // Even if the initialized flag is set, we may be in a situation where caches are not synced 4006 // properly. Therefore, we do a memory fence. 4007 __ dmb(ISH); 4008 __ Bind(slow_path->GetExitLabel()); 4009} 4010 4011void LocationsBuilderARM::VisitLoadString(HLoadString* load) { 4012 LocationSummary* locations = 4013 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath); 4014 locations->SetOut(Location::RequiresRegister()); 4015} 4016 4017void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { 4018 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load); 4019 codegen_->AddSlowPath(slow_path); 4020 4021 Register out = load->GetLocations()->Out().AsRegister<Register>(); 4022 codegen_->LoadCurrentMethod(out); 4023 __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()); 4024 __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value()); 4025 __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex())); 4026 __ cmp(out, ShifterOperand(0)); 4027 __ b(slow_path->GetEntryLabel(), EQ); 4028 __ Bind(slow_path->GetExitLabel()); 4029} 4030 4031void LocationsBuilderARM::VisitLoadException(HLoadException* load) { 4032 LocationSummary* locations = 4033 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); 4034 locations->SetOut(Location::RequiresRegister()); 4035} 4036 4037void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) { 4038 Register out = load->GetLocations()->Out().AsRegister<Register>(); 4039 int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value(); 4040 __ LoadFromOffset(kLoadWord, out, TR, offset); 4041 __ LoadImmediate(IP, 0); 4042 __ StoreToOffset(kStoreWord, IP, TR, offset); 4043} 4044 4045void LocationsBuilderARM::VisitThrow(HThrow* instruction) { 4046 LocationSummary* locations = 4047 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 4048 InvokeRuntimeCallingConvention calling_convention; 4049 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 4050} 4051 4052void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) { 4053 codegen_->InvokeRuntime( 4054 QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr); 4055} 4056 4057void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) { 4058 LocationSummary::CallKind call_kind = instruction->IsClassFinal() 4059 ? LocationSummary::kNoCall 4060 : LocationSummary::kCallOnSlowPath; 4061 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); 4062 locations->SetInAt(0, Location::RequiresRegister()); 4063 locations->SetInAt(1, Location::RequiresRegister()); 4064 // The out register is used as a temporary, so it overlaps with the inputs. 4065 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); 4066} 4067 4068void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { 4069 LocationSummary* locations = instruction->GetLocations(); 4070 Register obj = locations->InAt(0).AsRegister<Register>(); 4071 Register cls = locations->InAt(1).AsRegister<Register>(); 4072 Register out = locations->Out().AsRegister<Register>(); 4073 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 4074 Label done, zero; 4075 SlowPathCodeARM* slow_path = nullptr; 4076 4077 // Return 0 if `obj` is null. 4078 // avoid null check if we know obj is not null. 4079 if (instruction->MustDoNullCheck()) { 4080 __ cmp(obj, ShifterOperand(0)); 4081 __ b(&zero, EQ); 4082 } 4083 // Compare the class of `obj` with `cls`. 4084 __ LoadFromOffset(kLoadWord, out, obj, class_offset); 4085 __ cmp(out, ShifterOperand(cls)); 4086 if (instruction->IsClassFinal()) { 4087 // Classes must be equal for the instanceof to succeed. 4088 __ b(&zero, NE); 4089 __ LoadImmediate(out, 1); 4090 __ b(&done); 4091 } else { 4092 // If the classes are not equal, we go into a slow path. 4093 DCHECK(locations->OnlyCallsOnSlowPath()); 4094 slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( 4095 instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); 4096 codegen_->AddSlowPath(slow_path); 4097 __ b(slow_path->GetEntryLabel(), NE); 4098 __ LoadImmediate(out, 1); 4099 __ b(&done); 4100 } 4101 4102 if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) { 4103 __ Bind(&zero); 4104 __ LoadImmediate(out, 0); 4105 } 4106 4107 if (slow_path != nullptr) { 4108 __ Bind(slow_path->GetExitLabel()); 4109 } 4110 __ Bind(&done); 4111} 4112 4113void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) { 4114 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 4115 instruction, LocationSummary::kCallOnSlowPath); 4116 locations->SetInAt(0, Location::RequiresRegister()); 4117 locations->SetInAt(1, Location::RequiresRegister()); 4118 locations->AddTemp(Location::RequiresRegister()); 4119} 4120 4121void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { 4122 LocationSummary* locations = instruction->GetLocations(); 4123 Register obj = locations->InAt(0).AsRegister<Register>(); 4124 Register cls = locations->InAt(1).AsRegister<Register>(); 4125 Register temp = locations->GetTemp(0).AsRegister<Register>(); 4126 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 4127 4128 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( 4129 instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc()); 4130 codegen_->AddSlowPath(slow_path); 4131 4132 // avoid null check if we know obj is not null. 4133 if (instruction->MustDoNullCheck()) { 4134 __ cmp(obj, ShifterOperand(0)); 4135 __ b(slow_path->GetExitLabel(), EQ); 4136 } 4137 // Compare the class of `obj` with `cls`. 4138 __ LoadFromOffset(kLoadWord, temp, obj, class_offset); 4139 __ cmp(temp, ShifterOperand(cls)); 4140 __ b(slow_path->GetEntryLabel(), NE); 4141 __ Bind(slow_path->GetExitLabel()); 4142} 4143 4144void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) { 4145 LocationSummary* locations = 4146 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 4147 InvokeRuntimeCallingConvention calling_convention; 4148 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 4149} 4150 4151void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) { 4152 codegen_->InvokeRuntime(instruction->IsEnter() 4153 ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject), 4154 instruction, 4155 instruction->GetDexPc(), 4156 nullptr); 4157} 4158 4159void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); } 4160void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); } 4161void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); } 4162 4163void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) { 4164 LocationSummary* locations = 4165 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 4166 DCHECK(instruction->GetResultType() == Primitive::kPrimInt 4167 || instruction->GetResultType() == Primitive::kPrimLong); 4168 locations->SetInAt(0, Location::RequiresRegister()); 4169 locations->SetInAt(1, Location::RequiresRegister()); 4170 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 4171} 4172 4173void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) { 4174 HandleBitwiseOperation(instruction); 4175} 4176 4177void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) { 4178 HandleBitwiseOperation(instruction); 4179} 4180 4181void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) { 4182 HandleBitwiseOperation(instruction); 4183} 4184 4185void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) { 4186 LocationSummary* locations = instruction->GetLocations(); 4187 4188 if (instruction->GetResultType() == Primitive::kPrimInt) { 4189 Register first = locations->InAt(0).AsRegister<Register>(); 4190 Register second = locations->InAt(1).AsRegister<Register>(); 4191 Register out = locations->Out().AsRegister<Register>(); 4192 if (instruction->IsAnd()) { 4193 __ and_(out, first, ShifterOperand(second)); 4194 } else if (instruction->IsOr()) { 4195 __ orr(out, first, ShifterOperand(second)); 4196 } else { 4197 DCHECK(instruction->IsXor()); 4198 __ eor(out, first, ShifterOperand(second)); 4199 } 4200 } else { 4201 DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); 4202 Location first = locations->InAt(0); 4203 Location second = locations->InAt(1); 4204 Location out = locations->Out(); 4205 if (instruction->IsAnd()) { 4206 __ and_(out.AsRegisterPairLow<Register>(), 4207 first.AsRegisterPairLow<Register>(), 4208 ShifterOperand(second.AsRegisterPairLow<Register>())); 4209 __ and_(out.AsRegisterPairHigh<Register>(), 4210 first.AsRegisterPairHigh<Register>(), 4211 ShifterOperand(second.AsRegisterPairHigh<Register>())); 4212 } else if (instruction->IsOr()) { 4213 __ orr(out.AsRegisterPairLow<Register>(), 4214 first.AsRegisterPairLow<Register>(), 4215 ShifterOperand(second.AsRegisterPairLow<Register>())); 4216 __ orr(out.AsRegisterPairHigh<Register>(), 4217 first.AsRegisterPairHigh<Register>(), 4218 ShifterOperand(second.AsRegisterPairHigh<Register>())); 4219 } else { 4220 DCHECK(instruction->IsXor()); 4221 __ eor(out.AsRegisterPairLow<Register>(), 4222 first.AsRegisterPairLow<Register>(), 4223 ShifterOperand(second.AsRegisterPairLow<Register>())); 4224 __ eor(out.AsRegisterPairHigh<Register>(), 4225 first.AsRegisterPairHigh<Register>(), 4226 ShifterOperand(second.AsRegisterPairHigh<Register>())); 4227 } 4228 } 4229} 4230 4231void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) { 4232 DCHECK_EQ(temp, kArtMethodRegister); 4233 4234 // TODO: Implement all kinds of calls: 4235 // 1) boot -> boot 4236 // 2) app -> boot 4237 // 3) app -> app 4238 // 4239 // Currently we implement the app -> app logic, which looks up in the resolve cache. 4240 4241 if (invoke->IsStringInit()) { 4242 // temp = thread->string_init_entrypoint 4243 __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset()); 4244 // LR = temp[offset_of_quick_compiled_code] 4245 __ LoadFromOffset(kLoadWord, LR, temp, 4246 mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 4247 kArmWordSize).Int32Value()); 4248 // LR() 4249 __ blx(LR); 4250 } else { 4251 // temp = method; 4252 LoadCurrentMethod(temp); 4253 if (!invoke->IsRecursive()) { 4254 // temp = temp->dex_cache_resolved_methods_; 4255 __ LoadFromOffset( 4256 kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); 4257 // temp = temp[index_in_cache] 4258 __ LoadFromOffset( 4259 kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())); 4260 // LR = temp[offset_of_quick_compiled_code] 4261 __ LoadFromOffset(kLoadWord, LR, temp, 4262 mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 4263 kArmWordSize).Int32Value()); 4264 // LR() 4265 __ blx(LR); 4266 } else { 4267 __ bl(GetFrameEntryLabel()); 4268 } 4269 } 4270 4271 DCHECK(!IsLeafMethod()); 4272} 4273 4274void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) { 4275 // Nothing to do, this should be removed during prepare for register allocator. 4276 UNUSED(instruction); 4277 LOG(FATAL) << "Unreachable"; 4278} 4279 4280void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) { 4281 // Nothing to do, this should be removed during prepare for register allocator. 4282 UNUSED(instruction); 4283 LOG(FATAL) << "Unreachable"; 4284} 4285 4286} // namespace arm 4287} // namespace art 4288