code_generator_arm.cc revision fbeb4aede0ddc5b1e6a5a3a40cc6266fe8518c98
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator_arm.h" 18 19#include "arch/arm/instruction_set_features_arm.h" 20#include "art_method.h" 21#include "code_generator_utils.h" 22#include "entrypoints/quick/quick_entrypoints.h" 23#include "gc/accounting/card_table.h" 24#include "intrinsics.h" 25#include "intrinsics_arm.h" 26#include "mirror/array-inl.h" 27#include "mirror/class-inl.h" 28#include "thread.h" 29#include "utils/arm/assembler_arm.h" 30#include "utils/arm/managed_register_arm.h" 31#include "utils/assembler.h" 32#include "utils/stack_checks.h" 33 34namespace art { 35 36namespace arm { 37 38static bool ExpectedPairLayout(Location location) { 39 // We expected this for both core and fpu register pairs. 40 return ((location.low() & 1) == 0) && (location.low() + 1 == location.high()); 41} 42 43static constexpr int kCurrentMethodStackOffset = 0; 44static constexpr Register kMethodRegisterArgument = R0; 45 46// We unconditionally allocate R5 to ensure we can do long operations 47// with baseline. 48static constexpr Register kCoreSavedRegisterForBaseline = R5; 49static constexpr Register kCoreCalleeSaves[] = 50 { R5, R6, R7, R8, R10, R11, PC }; 51static constexpr SRegister kFpuCalleeSaves[] = 52 { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 }; 53 54// D31 cannot be split into two S registers, and the register allocator only works on 55// S registers. Therefore there is no need to block it. 56static constexpr DRegister DTMP = D31; 57 58#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> 59#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value() 60 61class NullCheckSlowPathARM : public SlowPathCodeARM { 62 public: 63 explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {} 64 65 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 66 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 67 __ Bind(GetEntryLabel()); 68 arm_codegen->InvokeRuntime( 69 QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this); 70 } 71 72 private: 73 HNullCheck* const instruction_; 74 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM); 75}; 76 77class DivZeroCheckSlowPathARM : public SlowPathCodeARM { 78 public: 79 explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {} 80 81 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 82 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 83 __ Bind(GetEntryLabel()); 84 arm_codegen->InvokeRuntime( 85 QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this); 86 } 87 88 private: 89 HDivZeroCheck* const instruction_; 90 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM); 91}; 92 93class SuspendCheckSlowPathARM : public SlowPathCodeARM { 94 public: 95 SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor) 96 : instruction_(instruction), successor_(successor) {} 97 98 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 99 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 100 __ Bind(GetEntryLabel()); 101 SaveLiveRegisters(codegen, instruction_->GetLocations()); 102 arm_codegen->InvokeRuntime( 103 QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this); 104 RestoreLiveRegisters(codegen, instruction_->GetLocations()); 105 if (successor_ == nullptr) { 106 __ b(GetReturnLabel()); 107 } else { 108 __ b(arm_codegen->GetLabelOf(successor_)); 109 } 110 } 111 112 Label* GetReturnLabel() { 113 DCHECK(successor_ == nullptr); 114 return &return_label_; 115 } 116 117 HBasicBlock* GetSuccessor() const { 118 return successor_; 119 } 120 121 private: 122 HSuspendCheck* const instruction_; 123 // If not null, the block to branch to after the suspend check. 124 HBasicBlock* const successor_; 125 126 // If `successor_` is null, the label to branch to after the suspend check. 127 Label return_label_; 128 129 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM); 130}; 131 132class BoundsCheckSlowPathARM : public SlowPathCodeARM { 133 public: 134 BoundsCheckSlowPathARM(HBoundsCheck* instruction, 135 Location index_location, 136 Location length_location) 137 : instruction_(instruction), 138 index_location_(index_location), 139 length_location_(length_location) {} 140 141 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 142 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 143 __ Bind(GetEntryLabel()); 144 // We're moving two locations to locations that could overlap, so we need a parallel 145 // move resolver. 146 InvokeRuntimeCallingConvention calling_convention; 147 codegen->EmitParallelMoves( 148 index_location_, 149 Location::RegisterLocation(calling_convention.GetRegisterAt(0)), 150 Primitive::kPrimInt, 151 length_location_, 152 Location::RegisterLocation(calling_convention.GetRegisterAt(1)), 153 Primitive::kPrimInt); 154 arm_codegen->InvokeRuntime( 155 QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this); 156 } 157 158 private: 159 HBoundsCheck* const instruction_; 160 const Location index_location_; 161 const Location length_location_; 162 163 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM); 164}; 165 166class LoadClassSlowPathARM : public SlowPathCodeARM { 167 public: 168 LoadClassSlowPathARM(HLoadClass* cls, 169 HInstruction* at, 170 uint32_t dex_pc, 171 bool do_clinit) 172 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { 173 DCHECK(at->IsLoadClass() || at->IsClinitCheck()); 174 } 175 176 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 177 LocationSummary* locations = at_->GetLocations(); 178 179 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 180 __ Bind(GetEntryLabel()); 181 SaveLiveRegisters(codegen, locations); 182 183 InvokeRuntimeCallingConvention calling_convention; 184 __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex()); 185 int32_t entry_point_offset = do_clinit_ 186 ? QUICK_ENTRY_POINT(pInitializeStaticStorage) 187 : QUICK_ENTRY_POINT(pInitializeType); 188 arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this); 189 190 // Move the class to the desired location. 191 Location out = locations->Out(); 192 if (out.IsValid()) { 193 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); 194 arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); 195 } 196 RestoreLiveRegisters(codegen, locations); 197 __ b(GetExitLabel()); 198 } 199 200 private: 201 // The class this slow path will load. 202 HLoadClass* const cls_; 203 204 // The instruction where this slow path is happening. 205 // (Might be the load class or an initialization check). 206 HInstruction* const at_; 207 208 // The dex PC of `at_`. 209 const uint32_t dex_pc_; 210 211 // Whether to initialize the class. 212 const bool do_clinit_; 213 214 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM); 215}; 216 217class LoadStringSlowPathARM : public SlowPathCodeARM { 218 public: 219 explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {} 220 221 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 222 LocationSummary* locations = instruction_->GetLocations(); 223 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 224 225 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 226 __ Bind(GetEntryLabel()); 227 SaveLiveRegisters(codegen, locations); 228 229 InvokeRuntimeCallingConvention calling_convention; 230 __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex()); 231 arm_codegen->InvokeRuntime( 232 QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this); 233 arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); 234 235 RestoreLiveRegisters(codegen, locations); 236 __ b(GetExitLabel()); 237 } 238 239 private: 240 HLoadString* const instruction_; 241 242 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM); 243}; 244 245class TypeCheckSlowPathARM : public SlowPathCodeARM { 246 public: 247 TypeCheckSlowPathARM(HInstruction* instruction, 248 Location class_to_check, 249 Location object_class, 250 uint32_t dex_pc) 251 : instruction_(instruction), 252 class_to_check_(class_to_check), 253 object_class_(object_class), 254 dex_pc_(dex_pc) {} 255 256 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 257 LocationSummary* locations = instruction_->GetLocations(); 258 DCHECK(instruction_->IsCheckCast() 259 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 260 261 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 262 __ Bind(GetEntryLabel()); 263 SaveLiveRegisters(codegen, locations); 264 265 // We're moving two locations to locations that could overlap, so we need a parallel 266 // move resolver. 267 InvokeRuntimeCallingConvention calling_convention; 268 codegen->EmitParallelMoves( 269 class_to_check_, 270 Location::RegisterLocation(calling_convention.GetRegisterAt(0)), 271 Primitive::kPrimNot, 272 object_class_, 273 Location::RegisterLocation(calling_convention.GetRegisterAt(1)), 274 Primitive::kPrimNot); 275 276 if (instruction_->IsInstanceOf()) { 277 arm_codegen->InvokeRuntime( 278 QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this); 279 arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); 280 } else { 281 DCHECK(instruction_->IsCheckCast()); 282 arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this); 283 } 284 285 RestoreLiveRegisters(codegen, locations); 286 __ b(GetExitLabel()); 287 } 288 289 private: 290 HInstruction* const instruction_; 291 const Location class_to_check_; 292 const Location object_class_; 293 uint32_t dex_pc_; 294 295 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM); 296}; 297 298class DeoptimizationSlowPathARM : public SlowPathCodeARM { 299 public: 300 explicit DeoptimizationSlowPathARM(HInstruction* instruction) 301 : instruction_(instruction) {} 302 303 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 304 __ Bind(GetEntryLabel()); 305 SaveLiveRegisters(codegen, instruction_->GetLocations()); 306 DCHECK(instruction_->IsDeoptimize()); 307 HDeoptimize* deoptimize = instruction_->AsDeoptimize(); 308 uint32_t dex_pc = deoptimize->GetDexPc(); 309 CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); 310 arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this); 311 } 312 313 private: 314 HInstruction* const instruction_; 315 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM); 316}; 317 318#undef __ 319 320#undef __ 321#define __ down_cast<ArmAssembler*>(GetAssembler())-> 322 323inline Condition ARMCondition(IfCondition cond) { 324 switch (cond) { 325 case kCondEQ: return EQ; 326 case kCondNE: return NE; 327 case kCondLT: return LT; 328 case kCondLE: return LE; 329 case kCondGT: return GT; 330 case kCondGE: return GE; 331 default: 332 LOG(FATAL) << "Unknown if condition"; 333 } 334 return EQ; // Unreachable. 335} 336 337inline Condition ARMOppositeCondition(IfCondition cond) { 338 switch (cond) { 339 case kCondEQ: return NE; 340 case kCondNE: return EQ; 341 case kCondLT: return GE; 342 case kCondLE: return GT; 343 case kCondGT: return LE; 344 case kCondGE: return LT; 345 default: 346 LOG(FATAL) << "Unknown if condition"; 347 } 348 return EQ; // Unreachable. 349} 350 351void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const { 352 stream << Register(reg); 353} 354 355void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const { 356 stream << SRegister(reg); 357} 358 359size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { 360 __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index); 361 return kArmWordSize; 362} 363 364size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { 365 __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index); 366 return kArmWordSize; 367} 368 369size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 370 __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index); 371 return kArmWordSize; 372} 373 374size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 375 __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index); 376 return kArmWordSize; 377} 378 379CodeGeneratorARM::CodeGeneratorARM(HGraph* graph, 380 const ArmInstructionSetFeatures& isa_features, 381 const CompilerOptions& compiler_options) 382 : CodeGenerator(graph, 383 kNumberOfCoreRegisters, 384 kNumberOfSRegisters, 385 kNumberOfRegisterPairs, 386 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves), 387 arraysize(kCoreCalleeSaves)), 388 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves), 389 arraysize(kFpuCalleeSaves)), 390 compiler_options), 391 block_labels_(graph->GetArena(), 0), 392 location_builder_(graph, this), 393 instruction_visitor_(graph, this), 394 move_resolver_(graph->GetArena(), this), 395 assembler_(false /* can_relocate_branches */), 396 isa_features_(isa_features) { 397 // Save the PC register to mimic Quick. 398 AddAllocatedRegister(Location::RegisterLocation(PC)); 399} 400 401Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const { 402 switch (type) { 403 case Primitive::kPrimLong: { 404 size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs); 405 ArmManagedRegister pair = 406 ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg)); 407 DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]); 408 DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]); 409 410 blocked_core_registers_[pair.AsRegisterPairLow()] = true; 411 blocked_core_registers_[pair.AsRegisterPairHigh()] = true; 412 UpdateBlockedPairRegisters(); 413 return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh()); 414 } 415 416 case Primitive::kPrimByte: 417 case Primitive::kPrimBoolean: 418 case Primitive::kPrimChar: 419 case Primitive::kPrimShort: 420 case Primitive::kPrimInt: 421 case Primitive::kPrimNot: { 422 int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters); 423 // Block all register pairs that contain `reg`. 424 for (int i = 0; i < kNumberOfRegisterPairs; i++) { 425 ArmManagedRegister current = 426 ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i)); 427 if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) { 428 blocked_register_pairs_[i] = true; 429 } 430 } 431 return Location::RegisterLocation(reg); 432 } 433 434 case Primitive::kPrimFloat: { 435 int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters); 436 return Location::FpuRegisterLocation(reg); 437 } 438 439 case Primitive::kPrimDouble: { 440 int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters); 441 DCHECK_EQ(reg % 2, 0); 442 return Location::FpuRegisterPairLocation(reg, reg + 1); 443 } 444 445 case Primitive::kPrimVoid: 446 LOG(FATAL) << "Unreachable type " << type; 447 } 448 449 return Location(); 450} 451 452void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const { 453 // Don't allocate the dalvik style register pair passing. 454 blocked_register_pairs_[R1_R2] = true; 455 456 // Stack register, LR and PC are always reserved. 457 blocked_core_registers_[SP] = true; 458 blocked_core_registers_[LR] = true; 459 blocked_core_registers_[PC] = true; 460 461 // Reserve thread register. 462 blocked_core_registers_[TR] = true; 463 464 // Reserve temp register. 465 blocked_core_registers_[IP] = true; 466 467 if (is_baseline) { 468 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) { 469 blocked_core_registers_[kCoreCalleeSaves[i]] = true; 470 } 471 472 blocked_core_registers_[kCoreSavedRegisterForBaseline] = false; 473 474 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) { 475 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true; 476 } 477 } 478 479 UpdateBlockedPairRegisters(); 480} 481 482void CodeGeneratorARM::UpdateBlockedPairRegisters() const { 483 for (int i = 0; i < kNumberOfRegisterPairs; i++) { 484 ArmManagedRegister current = 485 ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i)); 486 if (blocked_core_registers_[current.AsRegisterPairLow()] 487 || blocked_core_registers_[current.AsRegisterPairHigh()]) { 488 blocked_register_pairs_[i] = true; 489 } 490 } 491} 492 493InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen) 494 : HGraphVisitor(graph), 495 assembler_(codegen->GetAssembler()), 496 codegen_(codegen) {} 497 498void CodeGeneratorARM::ComputeSpillMask() { 499 core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_; 500 // Save one extra register for baseline. Note that on thumb2, there is no easy 501 // instruction to restore just the PC, so this actually helps both baseline 502 // and non-baseline to save and restore at least two registers at entry and exit. 503 core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline); 504 DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved"; 505 fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_; 506 // We use vpush and vpop for saving and restoring floating point registers, which take 507 // a SRegister and the number of registers to save/restore after that SRegister. We 508 // therefore update the `fpu_spill_mask_` to also contain those registers not allocated, 509 // but in the range. 510 if (fpu_spill_mask_ != 0) { 511 uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_); 512 uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_); 513 for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) { 514 fpu_spill_mask_ |= (1 << i); 515 } 516 } 517} 518 519static dwarf::Reg DWARFReg(Register reg) { 520 return dwarf::Reg::ArmCore(static_cast<int>(reg)); 521} 522 523static dwarf::Reg DWARFReg(SRegister reg) { 524 return dwarf::Reg::ArmFp(static_cast<int>(reg)); 525} 526 527void CodeGeneratorARM::GenerateFrameEntry() { 528 bool skip_overflow_check = 529 IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm); 530 DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); 531 __ Bind(&frame_entry_label_); 532 533 if (HasEmptyFrame()) { 534 return; 535 } 536 537 if (!skip_overflow_check) { 538 __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm))); 539 __ LoadFromOffset(kLoadWord, IP, IP, 0); 540 RecordPcInfo(nullptr, 0); 541 } 542 543 // PC is in the list of callee-save to mimic Quick, but we need to push 544 // LR at entry instead. 545 uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR; 546 __ PushList(push_mask); 547 __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask)); 548 __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize); 549 if (fpu_spill_mask_ != 0) { 550 SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_)); 551 __ vpushs(start_register, POPCOUNT(fpu_spill_mask_)); 552 __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_)); 553 __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize); 554 } 555 int adjust = GetFrameSize() - FrameEntrySpillSize(); 556 __ AddConstant(SP, -adjust); 557 __ cfi().AdjustCFAOffset(adjust); 558 __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0); 559} 560 561void CodeGeneratorARM::GenerateFrameExit() { 562 if (HasEmptyFrame()) { 563 __ bx(LR); 564 return; 565 } 566 __ cfi().RememberState(); 567 int adjust = GetFrameSize() - FrameEntrySpillSize(); 568 __ AddConstant(SP, adjust); 569 __ cfi().AdjustCFAOffset(-adjust); 570 if (fpu_spill_mask_ != 0) { 571 SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_)); 572 __ vpops(start_register, POPCOUNT(fpu_spill_mask_)); 573 __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_)); 574 __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_); 575 } 576 __ PopList(core_spill_mask_); 577 __ cfi().RestoreState(); 578 __ cfi().DefCFAOffset(GetFrameSize()); 579} 580 581void CodeGeneratorARM::Bind(HBasicBlock* block) { 582 __ Bind(GetLabelOf(block)); 583} 584 585Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const { 586 switch (load->GetType()) { 587 case Primitive::kPrimLong: 588 case Primitive::kPrimDouble: 589 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 590 591 case Primitive::kPrimInt: 592 case Primitive::kPrimNot: 593 case Primitive::kPrimFloat: 594 return Location::StackSlot(GetStackSlot(load->GetLocal())); 595 596 case Primitive::kPrimBoolean: 597 case Primitive::kPrimByte: 598 case Primitive::kPrimChar: 599 case Primitive::kPrimShort: 600 case Primitive::kPrimVoid: 601 LOG(FATAL) << "Unexpected type " << load->GetType(); 602 UNREACHABLE(); 603 } 604 605 LOG(FATAL) << "Unreachable"; 606 UNREACHABLE(); 607} 608 609Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) { 610 switch (type) { 611 case Primitive::kPrimBoolean: 612 case Primitive::kPrimByte: 613 case Primitive::kPrimChar: 614 case Primitive::kPrimShort: 615 case Primitive::kPrimInt: 616 case Primitive::kPrimNot: { 617 uint32_t index = gp_index_++; 618 uint32_t stack_index = stack_index_++; 619 if (index < calling_convention.GetNumberOfRegisters()) { 620 return Location::RegisterLocation(calling_convention.GetRegisterAt(index)); 621 } else { 622 return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index)); 623 } 624 } 625 626 case Primitive::kPrimLong: { 627 uint32_t index = gp_index_; 628 uint32_t stack_index = stack_index_; 629 gp_index_ += 2; 630 stack_index_ += 2; 631 if (index + 1 < calling_convention.GetNumberOfRegisters()) { 632 if (calling_convention.GetRegisterAt(index) == R1) { 633 // Skip R1, and use R2_R3 instead. 634 gp_index_++; 635 index++; 636 } 637 } 638 if (index + 1 < calling_convention.GetNumberOfRegisters()) { 639 DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1, 640 calling_convention.GetRegisterAt(index + 1)); 641 return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index), 642 calling_convention.GetRegisterAt(index + 1)); 643 } else { 644 return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index)); 645 } 646 } 647 648 case Primitive::kPrimFloat: { 649 uint32_t stack_index = stack_index_++; 650 if (float_index_ % 2 == 0) { 651 float_index_ = std::max(double_index_, float_index_); 652 } 653 if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) { 654 return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++)); 655 } else { 656 return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index)); 657 } 658 } 659 660 case Primitive::kPrimDouble: { 661 double_index_ = std::max(double_index_, RoundUp(float_index_, 2)); 662 uint32_t stack_index = stack_index_; 663 stack_index_ += 2; 664 if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) { 665 uint32_t index = double_index_; 666 double_index_ += 2; 667 Location result = Location::FpuRegisterPairLocation( 668 calling_convention.GetFpuRegisterAt(index), 669 calling_convention.GetFpuRegisterAt(index + 1)); 670 DCHECK(ExpectedPairLayout(result)); 671 return result; 672 } else { 673 return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index)); 674 } 675 } 676 677 case Primitive::kPrimVoid: 678 LOG(FATAL) << "Unexpected parameter type " << type; 679 break; 680 } 681 return Location(); 682} 683 684Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const { 685 switch (type) { 686 case Primitive::kPrimBoolean: 687 case Primitive::kPrimByte: 688 case Primitive::kPrimChar: 689 case Primitive::kPrimShort: 690 case Primitive::kPrimInt: 691 case Primitive::kPrimNot: { 692 return Location::RegisterLocation(R0); 693 } 694 695 case Primitive::kPrimFloat: { 696 return Location::FpuRegisterLocation(S0); 697 } 698 699 case Primitive::kPrimLong: { 700 return Location::RegisterPairLocation(R0, R1); 701 } 702 703 case Primitive::kPrimDouble: { 704 return Location::FpuRegisterPairLocation(S0, S1); 705 } 706 707 case Primitive::kPrimVoid: 708 return Location(); 709 } 710 711 UNREACHABLE(); 712} 713 714Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const { 715 return Location::RegisterLocation(kMethodRegisterArgument); 716} 717 718void CodeGeneratorARM::Move32(Location destination, Location source) { 719 if (source.Equals(destination)) { 720 return; 721 } 722 if (destination.IsRegister()) { 723 if (source.IsRegister()) { 724 __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>()); 725 } else if (source.IsFpuRegister()) { 726 __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>()); 727 } else { 728 __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex()); 729 } 730 } else if (destination.IsFpuRegister()) { 731 if (source.IsRegister()) { 732 __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>()); 733 } else if (source.IsFpuRegister()) { 734 __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>()); 735 } else { 736 __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex()); 737 } 738 } else { 739 DCHECK(destination.IsStackSlot()) << destination; 740 if (source.IsRegister()) { 741 __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex()); 742 } else if (source.IsFpuRegister()) { 743 __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex()); 744 } else { 745 DCHECK(source.IsStackSlot()) << source; 746 __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex()); 747 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 748 } 749 } 750} 751 752void CodeGeneratorARM::Move64(Location destination, Location source) { 753 if (source.Equals(destination)) { 754 return; 755 } 756 if (destination.IsRegisterPair()) { 757 if (source.IsRegisterPair()) { 758 EmitParallelMoves( 759 Location::RegisterLocation(source.AsRegisterPairHigh<Register>()), 760 Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()), 761 Primitive::kPrimInt, 762 Location::RegisterLocation(source.AsRegisterPairLow<Register>()), 763 Location::RegisterLocation(destination.AsRegisterPairLow<Register>()), 764 Primitive::kPrimInt); 765 } else if (source.IsFpuRegister()) { 766 UNIMPLEMENTED(FATAL); 767 } else { 768 DCHECK(source.IsDoubleStackSlot()); 769 DCHECK(ExpectedPairLayout(destination)); 770 __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(), 771 SP, source.GetStackIndex()); 772 } 773 } else if (destination.IsFpuRegisterPair()) { 774 if (source.IsDoubleStackSlot()) { 775 __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), 776 SP, 777 source.GetStackIndex()); 778 } else { 779 UNIMPLEMENTED(FATAL); 780 } 781 } else { 782 DCHECK(destination.IsDoubleStackSlot()); 783 if (source.IsRegisterPair()) { 784 // No conflict possible, so just do the moves. 785 if (source.AsRegisterPairLow<Register>() == R1) { 786 DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2); 787 __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex()); 788 __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize)); 789 } else { 790 __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(), 791 SP, destination.GetStackIndex()); 792 } 793 } else if (source.IsFpuRegisterPair()) { 794 __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()), 795 SP, 796 destination.GetStackIndex()); 797 } else { 798 DCHECK(source.IsDoubleStackSlot()); 799 EmitParallelMoves( 800 Location::StackSlot(source.GetStackIndex()), 801 Location::StackSlot(destination.GetStackIndex()), 802 Primitive::kPrimInt, 803 Location::StackSlot(source.GetHighStackIndex(kArmWordSize)), 804 Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)), 805 Primitive::kPrimInt); 806 } 807 } 808} 809 810void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) { 811 LocationSummary* locations = instruction->GetLocations(); 812 if (instruction->IsCurrentMethod()) { 813 Move32(location, Location::StackSlot(kCurrentMethodStackOffset)); 814 } else if (locations != nullptr && locations->Out().Equals(location)) { 815 return; 816 } else if (locations != nullptr && locations->Out().IsConstant()) { 817 HConstant* const_to_move = locations->Out().GetConstant(); 818 if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) { 819 int32_t value = GetInt32ValueOf(const_to_move); 820 if (location.IsRegister()) { 821 __ LoadImmediate(location.AsRegister<Register>(), value); 822 } else { 823 DCHECK(location.IsStackSlot()); 824 __ LoadImmediate(IP, value); 825 __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex()); 826 } 827 } else { 828 DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName(); 829 int64_t value = const_to_move->AsLongConstant()->GetValue(); 830 if (location.IsRegisterPair()) { 831 __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value)); 832 __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value)); 833 } else { 834 DCHECK(location.IsDoubleStackSlot()); 835 __ LoadImmediate(IP, Low32Bits(value)); 836 __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex()); 837 __ LoadImmediate(IP, High32Bits(value)); 838 __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize)); 839 } 840 } 841 } else if (instruction->IsLoadLocal()) { 842 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal()); 843 switch (instruction->GetType()) { 844 case Primitive::kPrimBoolean: 845 case Primitive::kPrimByte: 846 case Primitive::kPrimChar: 847 case Primitive::kPrimShort: 848 case Primitive::kPrimInt: 849 case Primitive::kPrimNot: 850 case Primitive::kPrimFloat: 851 Move32(location, Location::StackSlot(stack_slot)); 852 break; 853 854 case Primitive::kPrimLong: 855 case Primitive::kPrimDouble: 856 Move64(location, Location::DoubleStackSlot(stack_slot)); 857 break; 858 859 default: 860 LOG(FATAL) << "Unexpected type " << instruction->GetType(); 861 } 862 } else if (instruction->IsTemporary()) { 863 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 864 if (temp_location.IsStackSlot()) { 865 Move32(location, temp_location); 866 } else { 867 DCHECK(temp_location.IsDoubleStackSlot()); 868 Move64(location, temp_location); 869 } 870 } else { 871 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary()); 872 switch (instruction->GetType()) { 873 case Primitive::kPrimBoolean: 874 case Primitive::kPrimByte: 875 case Primitive::kPrimChar: 876 case Primitive::kPrimShort: 877 case Primitive::kPrimNot: 878 case Primitive::kPrimInt: 879 case Primitive::kPrimFloat: 880 Move32(location, locations->Out()); 881 break; 882 883 case Primitive::kPrimLong: 884 case Primitive::kPrimDouble: 885 Move64(location, locations->Out()); 886 break; 887 888 default: 889 LOG(FATAL) << "Unexpected type " << instruction->GetType(); 890 } 891 } 892} 893 894void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset, 895 HInstruction* instruction, 896 uint32_t dex_pc, 897 SlowPathCode* slow_path) { 898 __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset); 899 __ blx(LR); 900 RecordPcInfo(instruction, dex_pc, slow_path); 901 DCHECK(instruction->IsSuspendCheck() 902 || instruction->IsBoundsCheck() 903 || instruction->IsNullCheck() 904 || instruction->IsDivZeroCheck() 905 || instruction->GetLocations()->CanCall() 906 || !IsLeafMethod()); 907} 908 909void LocationsBuilderARM::VisitGoto(HGoto* got) { 910 got->SetLocations(nullptr); 911} 912 913void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) { 914 HBasicBlock* successor = got->GetSuccessor(); 915 DCHECK(!successor->IsExitBlock()); 916 917 HBasicBlock* block = got->GetBlock(); 918 HInstruction* previous = got->GetPrevious(); 919 920 HLoopInformation* info = block->GetLoopInformation(); 921 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) { 922 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck()); 923 GenerateSuspendCheck(info->GetSuspendCheck(), successor); 924 return; 925 } 926 927 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { 928 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); 929 } 930 if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) { 931 __ b(codegen_->GetLabelOf(successor)); 932 } 933} 934 935void LocationsBuilderARM::VisitExit(HExit* exit) { 936 exit->SetLocations(nullptr); 937} 938 939void InstructionCodeGeneratorARM::VisitExit(HExit* exit) { 940 UNUSED(exit); 941} 942 943void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction, 944 Label* true_target, 945 Label* false_target, 946 Label* always_true_target) { 947 HInstruction* cond = instruction->InputAt(0); 948 if (cond->IsIntConstant()) { 949 // Constant condition, statically compared against 1. 950 int32_t cond_value = cond->AsIntConstant()->GetValue(); 951 if (cond_value == 1) { 952 if (always_true_target != nullptr) { 953 __ b(always_true_target); 954 } 955 return; 956 } else { 957 DCHECK_EQ(cond_value, 0); 958 } 959 } else { 960 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 961 // Condition has been materialized, compare the output to 0 962 DCHECK(instruction->GetLocations()->InAt(0).IsRegister()); 963 __ cmp(instruction->GetLocations()->InAt(0).AsRegister<Register>(), 964 ShifterOperand(0)); 965 __ b(true_target, NE); 966 } else { 967 // Condition has not been materialized, use its inputs as the 968 // comparison and its condition as the branch condition. 969 LocationSummary* locations = cond->GetLocations(); 970 DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0); 971 Register left = locations->InAt(0).AsRegister<Register>(); 972 if (locations->InAt(1).IsRegister()) { 973 __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>())); 974 } else { 975 DCHECK(locations->InAt(1).IsConstant()); 976 HConstant* constant = locations->InAt(1).GetConstant(); 977 int32_t value = CodeGenerator::GetInt32ValueOf(constant); 978 ShifterOperand operand; 979 if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) { 980 __ cmp(left, operand); 981 } else { 982 Register temp = IP; 983 __ LoadImmediate(temp, value); 984 __ cmp(left, ShifterOperand(temp)); 985 } 986 } 987 __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition())); 988 } 989 } 990 if (false_target != nullptr) { 991 __ b(false_target); 992 } 993} 994 995void LocationsBuilderARM::VisitIf(HIf* if_instr) { 996 LocationSummary* locations = 997 new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall); 998 HInstruction* cond = if_instr->InputAt(0); 999 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 1000 locations->SetInAt(0, Location::RequiresRegister()); 1001 } 1002} 1003 1004void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) { 1005 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor()); 1006 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); 1007 Label* always_true_target = true_target; 1008 if (codegen_->GoesToNextBlock(if_instr->GetBlock(), 1009 if_instr->IfTrueSuccessor())) { 1010 always_true_target = nullptr; 1011 } 1012 if (codegen_->GoesToNextBlock(if_instr->GetBlock(), 1013 if_instr->IfFalseSuccessor())) { 1014 false_target = nullptr; 1015 } 1016 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target); 1017} 1018 1019void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) { 1020 LocationSummary* locations = new (GetGraph()->GetArena()) 1021 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); 1022 HInstruction* cond = deoptimize->InputAt(0); 1023 DCHECK(cond->IsCondition()); 1024 if (cond->AsCondition()->NeedsMaterialization()) { 1025 locations->SetInAt(0, Location::RequiresRegister()); 1026 } 1027} 1028 1029void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) { 1030 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) 1031 DeoptimizationSlowPathARM(deoptimize); 1032 codegen_->AddSlowPath(slow_path); 1033 Label* slow_path_entry = slow_path->GetEntryLabel(); 1034 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry); 1035} 1036 1037void LocationsBuilderARM::VisitCondition(HCondition* cond) { 1038 LocationSummary* locations = 1039 new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); 1040 locations->SetInAt(0, Location::RequiresRegister()); 1041 locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); 1042 if (cond->NeedsMaterialization()) { 1043 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1044 } 1045} 1046 1047void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) { 1048 if (!cond->NeedsMaterialization()) return; 1049 LocationSummary* locations = cond->GetLocations(); 1050 Register left = locations->InAt(0).AsRegister<Register>(); 1051 1052 if (locations->InAt(1).IsRegister()) { 1053 __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>())); 1054 } else { 1055 DCHECK(locations->InAt(1).IsConstant()); 1056 int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant()); 1057 ShifterOperand operand; 1058 if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) { 1059 __ cmp(left, operand); 1060 } else { 1061 Register temp = IP; 1062 __ LoadImmediate(temp, value); 1063 __ cmp(left, ShifterOperand(temp)); 1064 } 1065 } 1066 __ it(ARMCondition(cond->GetCondition()), kItElse); 1067 __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1), 1068 ARMCondition(cond->GetCondition())); 1069 __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0), 1070 ARMOppositeCondition(cond->GetCondition())); 1071} 1072 1073void LocationsBuilderARM::VisitEqual(HEqual* comp) { 1074 VisitCondition(comp); 1075} 1076 1077void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) { 1078 VisitCondition(comp); 1079} 1080 1081void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) { 1082 VisitCondition(comp); 1083} 1084 1085void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) { 1086 VisitCondition(comp); 1087} 1088 1089void LocationsBuilderARM::VisitLessThan(HLessThan* comp) { 1090 VisitCondition(comp); 1091} 1092 1093void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) { 1094 VisitCondition(comp); 1095} 1096 1097void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) { 1098 VisitCondition(comp); 1099} 1100 1101void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) { 1102 VisitCondition(comp); 1103} 1104 1105void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) { 1106 VisitCondition(comp); 1107} 1108 1109void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) { 1110 VisitCondition(comp); 1111} 1112 1113void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { 1114 VisitCondition(comp); 1115} 1116 1117void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { 1118 VisitCondition(comp); 1119} 1120 1121void LocationsBuilderARM::VisitLocal(HLocal* local) { 1122 local->SetLocations(nullptr); 1123} 1124 1125void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) { 1126 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock()); 1127} 1128 1129void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) { 1130 load->SetLocations(nullptr); 1131} 1132 1133void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) { 1134 // Nothing to do, this is driven by the code generator. 1135 UNUSED(load); 1136} 1137 1138void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) { 1139 LocationSummary* locations = 1140 new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall); 1141 switch (store->InputAt(1)->GetType()) { 1142 case Primitive::kPrimBoolean: 1143 case Primitive::kPrimByte: 1144 case Primitive::kPrimChar: 1145 case Primitive::kPrimShort: 1146 case Primitive::kPrimInt: 1147 case Primitive::kPrimNot: 1148 case Primitive::kPrimFloat: 1149 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal()))); 1150 break; 1151 1152 case Primitive::kPrimLong: 1153 case Primitive::kPrimDouble: 1154 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal()))); 1155 break; 1156 1157 default: 1158 LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType(); 1159 } 1160} 1161 1162void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) { 1163 UNUSED(store); 1164} 1165 1166void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) { 1167 LocationSummary* locations = 1168 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1169 locations->SetOut(Location::ConstantLocation(constant)); 1170} 1171 1172void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) { 1173 // Will be generated at use site. 1174 UNUSED(constant); 1175} 1176 1177void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) { 1178 LocationSummary* locations = 1179 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1180 locations->SetOut(Location::ConstantLocation(constant)); 1181} 1182 1183void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) { 1184 // Will be generated at use site. 1185 UNUSED(constant); 1186} 1187 1188void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) { 1189 LocationSummary* locations = 1190 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1191 locations->SetOut(Location::ConstantLocation(constant)); 1192} 1193 1194void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) { 1195 // Will be generated at use site. 1196 UNUSED(constant); 1197} 1198 1199void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) { 1200 LocationSummary* locations = 1201 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1202 locations->SetOut(Location::ConstantLocation(constant)); 1203} 1204 1205void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) { 1206 // Will be generated at use site. 1207 UNUSED(constant); 1208} 1209 1210void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) { 1211 LocationSummary* locations = 1212 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1213 locations->SetOut(Location::ConstantLocation(constant)); 1214} 1215 1216void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) { 1217 // Will be generated at use site. 1218 UNUSED(constant); 1219} 1220 1221void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { 1222 memory_barrier->SetLocations(nullptr); 1223} 1224 1225void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { 1226 GenerateMemoryBarrier(memory_barrier->GetBarrierKind()); 1227} 1228 1229void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) { 1230 ret->SetLocations(nullptr); 1231} 1232 1233void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) { 1234 UNUSED(ret); 1235 codegen_->GenerateFrameExit(); 1236} 1237 1238void LocationsBuilderARM::VisitReturn(HReturn* ret) { 1239 LocationSummary* locations = 1240 new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall); 1241 locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType())); 1242} 1243 1244void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) { 1245 UNUSED(ret); 1246 codegen_->GenerateFrameExit(); 1247} 1248 1249void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1250 // When we do not run baseline, explicit clinit checks triggered by static 1251 // invokes must have been pruned by art::PrepareForRegisterAllocation. 1252 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck()); 1253 1254 IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(), 1255 codegen_->GetInstructionSetFeatures()); 1256 if (intrinsic.TryDispatch(invoke)) { 1257 return; 1258 } 1259 1260 HandleInvoke(invoke); 1261} 1262 1263void CodeGeneratorARM::LoadCurrentMethod(Register reg) { 1264 DCHECK(RequiresCurrentMethod()); 1265 __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset); 1266} 1267 1268static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) { 1269 if (invoke->GetLocations()->Intrinsified()) { 1270 IntrinsicCodeGeneratorARM intrinsic(codegen); 1271 intrinsic.Dispatch(invoke); 1272 return true; 1273 } 1274 return false; 1275} 1276 1277void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1278 // When we do not run baseline, explicit clinit checks triggered by static 1279 // invokes must have been pruned by art::PrepareForRegisterAllocation. 1280 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck()); 1281 1282 if (TryGenerateIntrinsicCode(invoke, codegen_)) { 1283 return; 1284 } 1285 1286 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); 1287 1288 codegen_->GenerateStaticOrDirectCall(invoke, temp); 1289 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1290} 1291 1292void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) { 1293 InvokeDexCallingConventionVisitorARM calling_convention_visitor; 1294 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor); 1295} 1296 1297void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1298 IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(), 1299 codegen_->GetInstructionSetFeatures()); 1300 if (intrinsic.TryDispatch(invoke)) { 1301 return; 1302 } 1303 1304 HandleInvoke(invoke); 1305} 1306 1307void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1308 if (TryGenerateIntrinsicCode(invoke, codegen_)) { 1309 return; 1310 } 1311 1312 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); 1313 uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( 1314 invoke->GetVTableIndex(), kArmPointerSize).Uint32Value(); 1315 LocationSummary* locations = invoke->GetLocations(); 1316 Location receiver = locations->InAt(0); 1317 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 1318 // temp = object->GetClass(); 1319 if (receiver.IsStackSlot()) { 1320 __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex()); 1321 __ LoadFromOffset(kLoadWord, temp, temp, class_offset); 1322 } else { 1323 __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); 1324 } 1325 codegen_->MaybeRecordImplicitNullCheck(invoke); 1326 // temp = temp->GetMethodAt(method_offset); 1327 uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset( 1328 kArmWordSize).Int32Value(); 1329 __ LoadFromOffset(kLoadWord, temp, temp, method_offset); 1330 // LR = temp->GetEntryPoint(); 1331 __ LoadFromOffset(kLoadWord, LR, temp, entry_point); 1332 // LR(); 1333 __ blx(LR); 1334 DCHECK(!codegen_->IsLeafMethod()); 1335 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1336} 1337 1338void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) { 1339 HandleInvoke(invoke); 1340 // Add the hidden argument. 1341 invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12)); 1342} 1343 1344void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) { 1345 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. 1346 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); 1347 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( 1348 invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); 1349 LocationSummary* locations = invoke->GetLocations(); 1350 Location receiver = locations->InAt(0); 1351 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 1352 1353 // Set the hidden argument. 1354 __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(), 1355 invoke->GetDexMethodIndex()); 1356 1357 // temp = object->GetClass(); 1358 if (receiver.IsStackSlot()) { 1359 __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex()); 1360 __ LoadFromOffset(kLoadWord, temp, temp, class_offset); 1361 } else { 1362 __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); 1363 } 1364 codegen_->MaybeRecordImplicitNullCheck(invoke); 1365 // temp = temp->GetImtEntryAt(method_offset); 1366 uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset( 1367 kArmWordSize).Int32Value(); 1368 __ LoadFromOffset(kLoadWord, temp, temp, method_offset); 1369 // LR = temp->GetEntryPoint(); 1370 __ LoadFromOffset(kLoadWord, LR, temp, entry_point); 1371 // LR(); 1372 __ blx(LR); 1373 DCHECK(!codegen_->IsLeafMethod()); 1374 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1375} 1376 1377void LocationsBuilderARM::VisitNeg(HNeg* neg) { 1378 LocationSummary* locations = 1379 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); 1380 switch (neg->GetResultType()) { 1381 case Primitive::kPrimInt: { 1382 locations->SetInAt(0, Location::RequiresRegister()); 1383 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1384 break; 1385 } 1386 case Primitive::kPrimLong: { 1387 locations->SetInAt(0, Location::RequiresRegister()); 1388 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); 1389 break; 1390 } 1391 1392 case Primitive::kPrimFloat: 1393 case Primitive::kPrimDouble: 1394 locations->SetInAt(0, Location::RequiresFpuRegister()); 1395 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1396 break; 1397 1398 default: 1399 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 1400 } 1401} 1402 1403void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) { 1404 LocationSummary* locations = neg->GetLocations(); 1405 Location out = locations->Out(); 1406 Location in = locations->InAt(0); 1407 switch (neg->GetResultType()) { 1408 case Primitive::kPrimInt: 1409 DCHECK(in.IsRegister()); 1410 __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0)); 1411 break; 1412 1413 case Primitive::kPrimLong: 1414 DCHECK(in.IsRegisterPair()); 1415 // out.lo = 0 - in.lo (and update the carry/borrow (C) flag) 1416 __ rsbs(out.AsRegisterPairLow<Register>(), 1417 in.AsRegisterPairLow<Register>(), 1418 ShifterOperand(0)); 1419 // We cannot emit an RSC (Reverse Subtract with Carry) 1420 // instruction here, as it does not exist in the Thumb-2 1421 // instruction set. We use the following approach 1422 // using SBC and SUB instead. 1423 // 1424 // out.hi = -C 1425 __ sbc(out.AsRegisterPairHigh<Register>(), 1426 out.AsRegisterPairHigh<Register>(), 1427 ShifterOperand(out.AsRegisterPairHigh<Register>())); 1428 // out.hi = out.hi - in.hi 1429 __ sub(out.AsRegisterPairHigh<Register>(), 1430 out.AsRegisterPairHigh<Register>(), 1431 ShifterOperand(in.AsRegisterPairHigh<Register>())); 1432 break; 1433 1434 case Primitive::kPrimFloat: 1435 DCHECK(in.IsFpuRegister()); 1436 __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>()); 1437 break; 1438 1439 case Primitive::kPrimDouble: 1440 DCHECK(in.IsFpuRegisterPair()); 1441 __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 1442 FromLowSToD(in.AsFpuRegisterPairLow<SRegister>())); 1443 break; 1444 1445 default: 1446 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 1447 } 1448} 1449 1450void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) { 1451 Primitive::Type result_type = conversion->GetResultType(); 1452 Primitive::Type input_type = conversion->GetInputType(); 1453 DCHECK_NE(result_type, input_type); 1454 1455 // The float-to-long, double-to-long and long-to-float type conversions 1456 // rely on a call to the runtime. 1457 LocationSummary::CallKind call_kind = 1458 (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble) 1459 && result_type == Primitive::kPrimLong) 1460 || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat)) 1461 ? LocationSummary::kCall 1462 : LocationSummary::kNoCall; 1463 LocationSummary* locations = 1464 new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind); 1465 1466 // The Java language does not allow treating boolean as an integral type but 1467 // our bit representation makes it safe. 1468 1469 switch (result_type) { 1470 case Primitive::kPrimByte: 1471 switch (input_type) { 1472 case Primitive::kPrimBoolean: 1473 // Boolean input is a result of code transformations. 1474 case Primitive::kPrimShort: 1475 case Primitive::kPrimInt: 1476 case Primitive::kPrimChar: 1477 // Processing a Dex `int-to-byte' instruction. 1478 locations->SetInAt(0, Location::RequiresRegister()); 1479 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1480 break; 1481 1482 default: 1483 LOG(FATAL) << "Unexpected type conversion from " << input_type 1484 << " to " << result_type; 1485 } 1486 break; 1487 1488 case Primitive::kPrimShort: 1489 switch (input_type) { 1490 case Primitive::kPrimBoolean: 1491 // Boolean input is a result of code transformations. 1492 case Primitive::kPrimByte: 1493 case Primitive::kPrimInt: 1494 case Primitive::kPrimChar: 1495 // Processing a Dex `int-to-short' instruction. 1496 locations->SetInAt(0, Location::RequiresRegister()); 1497 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1498 break; 1499 1500 default: 1501 LOG(FATAL) << "Unexpected type conversion from " << input_type 1502 << " to " << result_type; 1503 } 1504 break; 1505 1506 case Primitive::kPrimInt: 1507 switch (input_type) { 1508 case Primitive::kPrimLong: 1509 // Processing a Dex `long-to-int' instruction. 1510 locations->SetInAt(0, Location::Any()); 1511 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1512 break; 1513 1514 case Primitive::kPrimFloat: 1515 // Processing a Dex `float-to-int' instruction. 1516 locations->SetInAt(0, Location::RequiresFpuRegister()); 1517 locations->SetOut(Location::RequiresRegister()); 1518 locations->AddTemp(Location::RequiresFpuRegister()); 1519 break; 1520 1521 case Primitive::kPrimDouble: 1522 // Processing a Dex `double-to-int' instruction. 1523 locations->SetInAt(0, Location::RequiresFpuRegister()); 1524 locations->SetOut(Location::RequiresRegister()); 1525 locations->AddTemp(Location::RequiresFpuRegister()); 1526 break; 1527 1528 default: 1529 LOG(FATAL) << "Unexpected type conversion from " << input_type 1530 << " to " << result_type; 1531 } 1532 break; 1533 1534 case Primitive::kPrimLong: 1535 switch (input_type) { 1536 case Primitive::kPrimBoolean: 1537 // Boolean input is a result of code transformations. 1538 case Primitive::kPrimByte: 1539 case Primitive::kPrimShort: 1540 case Primitive::kPrimInt: 1541 case Primitive::kPrimChar: 1542 // Processing a Dex `int-to-long' instruction. 1543 locations->SetInAt(0, Location::RequiresRegister()); 1544 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1545 break; 1546 1547 case Primitive::kPrimFloat: { 1548 // Processing a Dex `float-to-long' instruction. 1549 InvokeRuntimeCallingConvention calling_convention; 1550 locations->SetInAt(0, Location::FpuRegisterLocation( 1551 calling_convention.GetFpuRegisterAt(0))); 1552 locations->SetOut(Location::RegisterPairLocation(R0, R1)); 1553 break; 1554 } 1555 1556 case Primitive::kPrimDouble: { 1557 // Processing a Dex `double-to-long' instruction. 1558 InvokeRuntimeCallingConvention calling_convention; 1559 locations->SetInAt(0, Location::FpuRegisterPairLocation( 1560 calling_convention.GetFpuRegisterAt(0), 1561 calling_convention.GetFpuRegisterAt(1))); 1562 locations->SetOut(Location::RegisterPairLocation(R0, R1)); 1563 break; 1564 } 1565 1566 default: 1567 LOG(FATAL) << "Unexpected type conversion from " << input_type 1568 << " to " << result_type; 1569 } 1570 break; 1571 1572 case Primitive::kPrimChar: 1573 switch (input_type) { 1574 case Primitive::kPrimBoolean: 1575 // Boolean input is a result of code transformations. 1576 case Primitive::kPrimByte: 1577 case Primitive::kPrimShort: 1578 case Primitive::kPrimInt: 1579 // Processing a Dex `int-to-char' instruction. 1580 locations->SetInAt(0, Location::RequiresRegister()); 1581 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1582 break; 1583 1584 default: 1585 LOG(FATAL) << "Unexpected type conversion from " << input_type 1586 << " to " << result_type; 1587 } 1588 break; 1589 1590 case Primitive::kPrimFloat: 1591 switch (input_type) { 1592 case Primitive::kPrimBoolean: 1593 // Boolean input is a result of code transformations. 1594 case Primitive::kPrimByte: 1595 case Primitive::kPrimShort: 1596 case Primitive::kPrimInt: 1597 case Primitive::kPrimChar: 1598 // Processing a Dex `int-to-float' instruction. 1599 locations->SetInAt(0, Location::RequiresRegister()); 1600 locations->SetOut(Location::RequiresFpuRegister()); 1601 break; 1602 1603 case Primitive::kPrimLong: { 1604 // Processing a Dex `long-to-float' instruction. 1605 InvokeRuntimeCallingConvention calling_convention; 1606 locations->SetInAt(0, Location::RegisterPairLocation( 1607 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); 1608 locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); 1609 break; 1610 } 1611 1612 case Primitive::kPrimDouble: 1613 // Processing a Dex `double-to-float' instruction. 1614 locations->SetInAt(0, Location::RequiresFpuRegister()); 1615 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1616 break; 1617 1618 default: 1619 LOG(FATAL) << "Unexpected type conversion from " << input_type 1620 << " to " << result_type; 1621 }; 1622 break; 1623 1624 case Primitive::kPrimDouble: 1625 switch (input_type) { 1626 case Primitive::kPrimBoolean: 1627 // Boolean input is a result of code transformations. 1628 case Primitive::kPrimByte: 1629 case Primitive::kPrimShort: 1630 case Primitive::kPrimInt: 1631 case Primitive::kPrimChar: 1632 // Processing a Dex `int-to-double' instruction. 1633 locations->SetInAt(0, Location::RequiresRegister()); 1634 locations->SetOut(Location::RequiresFpuRegister()); 1635 break; 1636 1637 case Primitive::kPrimLong: 1638 // Processing a Dex `long-to-double' instruction. 1639 locations->SetInAt(0, Location::RequiresRegister()); 1640 locations->SetOut(Location::RequiresFpuRegister()); 1641 locations->AddTemp(Location::RequiresFpuRegister()); 1642 locations->AddTemp(Location::RequiresFpuRegister()); 1643 break; 1644 1645 case Primitive::kPrimFloat: 1646 // Processing a Dex `float-to-double' instruction. 1647 locations->SetInAt(0, Location::RequiresFpuRegister()); 1648 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1649 break; 1650 1651 default: 1652 LOG(FATAL) << "Unexpected type conversion from " << input_type 1653 << " to " << result_type; 1654 }; 1655 break; 1656 1657 default: 1658 LOG(FATAL) << "Unexpected type conversion from " << input_type 1659 << " to " << result_type; 1660 } 1661} 1662 1663void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) { 1664 LocationSummary* locations = conversion->GetLocations(); 1665 Location out = locations->Out(); 1666 Location in = locations->InAt(0); 1667 Primitive::Type result_type = conversion->GetResultType(); 1668 Primitive::Type input_type = conversion->GetInputType(); 1669 DCHECK_NE(result_type, input_type); 1670 switch (result_type) { 1671 case Primitive::kPrimByte: 1672 switch (input_type) { 1673 case Primitive::kPrimBoolean: 1674 // Boolean input is a result of code transformations. 1675 case Primitive::kPrimShort: 1676 case Primitive::kPrimInt: 1677 case Primitive::kPrimChar: 1678 // Processing a Dex `int-to-byte' instruction. 1679 __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8); 1680 break; 1681 1682 default: 1683 LOG(FATAL) << "Unexpected type conversion from " << input_type 1684 << " to " << result_type; 1685 } 1686 break; 1687 1688 case Primitive::kPrimShort: 1689 switch (input_type) { 1690 case Primitive::kPrimBoolean: 1691 // Boolean input is a result of code transformations. 1692 case Primitive::kPrimByte: 1693 case Primitive::kPrimInt: 1694 case Primitive::kPrimChar: 1695 // Processing a Dex `int-to-short' instruction. 1696 __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16); 1697 break; 1698 1699 default: 1700 LOG(FATAL) << "Unexpected type conversion from " << input_type 1701 << " to " << result_type; 1702 } 1703 break; 1704 1705 case Primitive::kPrimInt: 1706 switch (input_type) { 1707 case Primitive::kPrimLong: 1708 // Processing a Dex `long-to-int' instruction. 1709 DCHECK(out.IsRegister()); 1710 if (in.IsRegisterPair()) { 1711 __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>()); 1712 } else if (in.IsDoubleStackSlot()) { 1713 __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex()); 1714 } else { 1715 DCHECK(in.IsConstant()); 1716 DCHECK(in.GetConstant()->IsLongConstant()); 1717 int64_t value = in.GetConstant()->AsLongConstant()->GetValue(); 1718 __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value)); 1719 } 1720 break; 1721 1722 case Primitive::kPrimFloat: { 1723 // Processing a Dex `float-to-int' instruction. 1724 SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>(); 1725 __ vmovs(temp, in.AsFpuRegister<SRegister>()); 1726 __ vcvtis(temp, temp); 1727 __ vmovrs(out.AsRegister<Register>(), temp); 1728 break; 1729 } 1730 1731 case Primitive::kPrimDouble: { 1732 // Processing a Dex `double-to-int' instruction. 1733 SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>(); 1734 DRegister temp_d = FromLowSToD(temp_s); 1735 __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>())); 1736 __ vcvtid(temp_s, temp_d); 1737 __ vmovrs(out.AsRegister<Register>(), temp_s); 1738 break; 1739 } 1740 1741 default: 1742 LOG(FATAL) << "Unexpected type conversion from " << input_type 1743 << " to " << result_type; 1744 } 1745 break; 1746 1747 case Primitive::kPrimLong: 1748 switch (input_type) { 1749 case Primitive::kPrimBoolean: 1750 // Boolean input is a result of code transformations. 1751 case Primitive::kPrimByte: 1752 case Primitive::kPrimShort: 1753 case Primitive::kPrimInt: 1754 case Primitive::kPrimChar: 1755 // Processing a Dex `int-to-long' instruction. 1756 DCHECK(out.IsRegisterPair()); 1757 DCHECK(in.IsRegister()); 1758 __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>()); 1759 // Sign extension. 1760 __ Asr(out.AsRegisterPairHigh<Register>(), 1761 out.AsRegisterPairLow<Register>(), 1762 31); 1763 break; 1764 1765 case Primitive::kPrimFloat: 1766 // Processing a Dex `float-to-long' instruction. 1767 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l), 1768 conversion, 1769 conversion->GetDexPc(), 1770 nullptr); 1771 break; 1772 1773 case Primitive::kPrimDouble: 1774 // Processing a Dex `double-to-long' instruction. 1775 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l), 1776 conversion, 1777 conversion->GetDexPc(), 1778 nullptr); 1779 break; 1780 1781 default: 1782 LOG(FATAL) << "Unexpected type conversion from " << input_type 1783 << " to " << result_type; 1784 } 1785 break; 1786 1787 case Primitive::kPrimChar: 1788 switch (input_type) { 1789 case Primitive::kPrimBoolean: 1790 // Boolean input is a result of code transformations. 1791 case Primitive::kPrimByte: 1792 case Primitive::kPrimShort: 1793 case Primitive::kPrimInt: 1794 // Processing a Dex `int-to-char' instruction. 1795 __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16); 1796 break; 1797 1798 default: 1799 LOG(FATAL) << "Unexpected type conversion from " << input_type 1800 << " to " << result_type; 1801 } 1802 break; 1803 1804 case Primitive::kPrimFloat: 1805 switch (input_type) { 1806 case Primitive::kPrimBoolean: 1807 // Boolean input is a result of code transformations. 1808 case Primitive::kPrimByte: 1809 case Primitive::kPrimShort: 1810 case Primitive::kPrimInt: 1811 case Primitive::kPrimChar: { 1812 // Processing a Dex `int-to-float' instruction. 1813 __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>()); 1814 __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>()); 1815 break; 1816 } 1817 1818 case Primitive::kPrimLong: 1819 // Processing a Dex `long-to-float' instruction. 1820 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f), 1821 conversion, 1822 conversion->GetDexPc(), 1823 nullptr); 1824 break; 1825 1826 case Primitive::kPrimDouble: 1827 // Processing a Dex `double-to-float' instruction. 1828 __ vcvtsd(out.AsFpuRegister<SRegister>(), 1829 FromLowSToD(in.AsFpuRegisterPairLow<SRegister>())); 1830 break; 1831 1832 default: 1833 LOG(FATAL) << "Unexpected type conversion from " << input_type 1834 << " to " << result_type; 1835 }; 1836 break; 1837 1838 case Primitive::kPrimDouble: 1839 switch (input_type) { 1840 case Primitive::kPrimBoolean: 1841 // Boolean input is a result of code transformations. 1842 case Primitive::kPrimByte: 1843 case Primitive::kPrimShort: 1844 case Primitive::kPrimInt: 1845 case Primitive::kPrimChar: { 1846 // Processing a Dex `int-to-double' instruction. 1847 __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>()); 1848 __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 1849 out.AsFpuRegisterPairLow<SRegister>()); 1850 break; 1851 } 1852 1853 case Primitive::kPrimLong: { 1854 // Processing a Dex `long-to-double' instruction. 1855 Register low = in.AsRegisterPairLow<Register>(); 1856 Register high = in.AsRegisterPairHigh<Register>(); 1857 SRegister out_s = out.AsFpuRegisterPairLow<SRegister>(); 1858 DRegister out_d = FromLowSToD(out_s); 1859 SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>(); 1860 DRegister temp_d = FromLowSToD(temp_s); 1861 SRegister constant_s = locations->GetTemp(1).AsFpuRegisterPairLow<SRegister>(); 1862 DRegister constant_d = FromLowSToD(constant_s); 1863 1864 // temp_d = int-to-double(high) 1865 __ vmovsr(temp_s, high); 1866 __ vcvtdi(temp_d, temp_s); 1867 // constant_d = k2Pow32EncodingForDouble 1868 __ LoadDImmediate(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble)); 1869 // out_d = unsigned-to-double(low) 1870 __ vmovsr(out_s, low); 1871 __ vcvtdu(out_d, out_s); 1872 // out_d += temp_d * constant_d 1873 __ vmlad(out_d, temp_d, constant_d); 1874 break; 1875 } 1876 1877 case Primitive::kPrimFloat: 1878 // Processing a Dex `float-to-double' instruction. 1879 __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 1880 in.AsFpuRegister<SRegister>()); 1881 break; 1882 1883 default: 1884 LOG(FATAL) << "Unexpected type conversion from " << input_type 1885 << " to " << result_type; 1886 }; 1887 break; 1888 1889 default: 1890 LOG(FATAL) << "Unexpected type conversion from " << input_type 1891 << " to " << result_type; 1892 } 1893} 1894 1895void LocationsBuilderARM::VisitAdd(HAdd* add) { 1896 LocationSummary* locations = 1897 new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); 1898 switch (add->GetResultType()) { 1899 case Primitive::kPrimInt: { 1900 locations->SetInAt(0, Location::RequiresRegister()); 1901 locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1))); 1902 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1903 break; 1904 } 1905 1906 case Primitive::kPrimLong: { 1907 locations->SetInAt(0, Location::RequiresRegister()); 1908 locations->SetInAt(1, Location::RequiresRegister()); 1909 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1910 break; 1911 } 1912 1913 case Primitive::kPrimFloat: 1914 case Primitive::kPrimDouble: { 1915 locations->SetInAt(0, Location::RequiresFpuRegister()); 1916 locations->SetInAt(1, Location::RequiresFpuRegister()); 1917 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1918 break; 1919 } 1920 1921 default: 1922 LOG(FATAL) << "Unexpected add type " << add->GetResultType(); 1923 } 1924} 1925 1926void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) { 1927 LocationSummary* locations = add->GetLocations(); 1928 Location out = locations->Out(); 1929 Location first = locations->InAt(0); 1930 Location second = locations->InAt(1); 1931 switch (add->GetResultType()) { 1932 case Primitive::kPrimInt: 1933 if (second.IsRegister()) { 1934 __ add(out.AsRegister<Register>(), 1935 first.AsRegister<Register>(), 1936 ShifterOperand(second.AsRegister<Register>())); 1937 } else { 1938 __ AddConstant(out.AsRegister<Register>(), 1939 first.AsRegister<Register>(), 1940 second.GetConstant()->AsIntConstant()->GetValue()); 1941 } 1942 break; 1943 1944 case Primitive::kPrimLong: { 1945 DCHECK(second.IsRegisterPair()); 1946 __ adds(out.AsRegisterPairLow<Register>(), 1947 first.AsRegisterPairLow<Register>(), 1948 ShifterOperand(second.AsRegisterPairLow<Register>())); 1949 __ adc(out.AsRegisterPairHigh<Register>(), 1950 first.AsRegisterPairHigh<Register>(), 1951 ShifterOperand(second.AsRegisterPairHigh<Register>())); 1952 break; 1953 } 1954 1955 case Primitive::kPrimFloat: 1956 __ vadds(out.AsFpuRegister<SRegister>(), 1957 first.AsFpuRegister<SRegister>(), 1958 second.AsFpuRegister<SRegister>()); 1959 break; 1960 1961 case Primitive::kPrimDouble: 1962 __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 1963 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()), 1964 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>())); 1965 break; 1966 1967 default: 1968 LOG(FATAL) << "Unexpected add type " << add->GetResultType(); 1969 } 1970} 1971 1972void LocationsBuilderARM::VisitSub(HSub* sub) { 1973 LocationSummary* locations = 1974 new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); 1975 switch (sub->GetResultType()) { 1976 case Primitive::kPrimInt: { 1977 locations->SetInAt(0, Location::RequiresRegister()); 1978 locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1))); 1979 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1980 break; 1981 } 1982 1983 case Primitive::kPrimLong: { 1984 locations->SetInAt(0, Location::RequiresRegister()); 1985 locations->SetInAt(1, Location::RequiresRegister()); 1986 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1987 break; 1988 } 1989 case Primitive::kPrimFloat: 1990 case Primitive::kPrimDouble: { 1991 locations->SetInAt(0, Location::RequiresFpuRegister()); 1992 locations->SetInAt(1, Location::RequiresFpuRegister()); 1993 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1994 break; 1995 } 1996 default: 1997 LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); 1998 } 1999} 2000 2001void InstructionCodeGeneratorARM::VisitSub(HSub* sub) { 2002 LocationSummary* locations = sub->GetLocations(); 2003 Location out = locations->Out(); 2004 Location first = locations->InAt(0); 2005 Location second = locations->InAt(1); 2006 switch (sub->GetResultType()) { 2007 case Primitive::kPrimInt: { 2008 if (second.IsRegister()) { 2009 __ sub(out.AsRegister<Register>(), 2010 first.AsRegister<Register>(), 2011 ShifterOperand(second.AsRegister<Register>())); 2012 } else { 2013 __ AddConstant(out.AsRegister<Register>(), 2014 first.AsRegister<Register>(), 2015 -second.GetConstant()->AsIntConstant()->GetValue()); 2016 } 2017 break; 2018 } 2019 2020 case Primitive::kPrimLong: { 2021 DCHECK(second.IsRegisterPair()); 2022 __ subs(out.AsRegisterPairLow<Register>(), 2023 first.AsRegisterPairLow<Register>(), 2024 ShifterOperand(second.AsRegisterPairLow<Register>())); 2025 __ sbc(out.AsRegisterPairHigh<Register>(), 2026 first.AsRegisterPairHigh<Register>(), 2027 ShifterOperand(second.AsRegisterPairHigh<Register>())); 2028 break; 2029 } 2030 2031 case Primitive::kPrimFloat: { 2032 __ vsubs(out.AsFpuRegister<SRegister>(), 2033 first.AsFpuRegister<SRegister>(), 2034 second.AsFpuRegister<SRegister>()); 2035 break; 2036 } 2037 2038 case Primitive::kPrimDouble: { 2039 __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 2040 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()), 2041 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>())); 2042 break; 2043 } 2044 2045 2046 default: 2047 LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); 2048 } 2049} 2050 2051void LocationsBuilderARM::VisitMul(HMul* mul) { 2052 LocationSummary* locations = 2053 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); 2054 switch (mul->GetResultType()) { 2055 case Primitive::kPrimInt: 2056 case Primitive::kPrimLong: { 2057 locations->SetInAt(0, Location::RequiresRegister()); 2058 locations->SetInAt(1, Location::RequiresRegister()); 2059 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2060 break; 2061 } 2062 2063 case Primitive::kPrimFloat: 2064 case Primitive::kPrimDouble: { 2065 locations->SetInAt(0, Location::RequiresFpuRegister()); 2066 locations->SetInAt(1, Location::RequiresFpuRegister()); 2067 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2068 break; 2069 } 2070 2071 default: 2072 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2073 } 2074} 2075 2076void InstructionCodeGeneratorARM::VisitMul(HMul* mul) { 2077 LocationSummary* locations = mul->GetLocations(); 2078 Location out = locations->Out(); 2079 Location first = locations->InAt(0); 2080 Location second = locations->InAt(1); 2081 switch (mul->GetResultType()) { 2082 case Primitive::kPrimInt: { 2083 __ mul(out.AsRegister<Register>(), 2084 first.AsRegister<Register>(), 2085 second.AsRegister<Register>()); 2086 break; 2087 } 2088 case Primitive::kPrimLong: { 2089 Register out_hi = out.AsRegisterPairHigh<Register>(); 2090 Register out_lo = out.AsRegisterPairLow<Register>(); 2091 Register in1_hi = first.AsRegisterPairHigh<Register>(); 2092 Register in1_lo = first.AsRegisterPairLow<Register>(); 2093 Register in2_hi = second.AsRegisterPairHigh<Register>(); 2094 Register in2_lo = second.AsRegisterPairLow<Register>(); 2095 2096 // Extra checks to protect caused by the existence of R1_R2. 2097 // The algorithm is wrong if out.hi is either in1.lo or in2.lo: 2098 // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2); 2099 DCHECK_NE(out_hi, in1_lo); 2100 DCHECK_NE(out_hi, in2_lo); 2101 2102 // input: in1 - 64 bits, in2 - 64 bits 2103 // output: out 2104 // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo 2105 // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32] 2106 // parts: out.lo = (in1.lo * in2.lo)[31:0] 2107 2108 // IP <- in1.lo * in2.hi 2109 __ mul(IP, in1_lo, in2_hi); 2110 // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo 2111 __ mla(out_hi, in1_hi, in2_lo, IP); 2112 // out.lo <- (in1.lo * in2.lo)[31:0]; 2113 __ umull(out_lo, IP, in1_lo, in2_lo); 2114 // out.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32] 2115 __ add(out_hi, out_hi, ShifterOperand(IP)); 2116 break; 2117 } 2118 2119 case Primitive::kPrimFloat: { 2120 __ vmuls(out.AsFpuRegister<SRegister>(), 2121 first.AsFpuRegister<SRegister>(), 2122 second.AsFpuRegister<SRegister>()); 2123 break; 2124 } 2125 2126 case Primitive::kPrimDouble: { 2127 __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 2128 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()), 2129 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>())); 2130 break; 2131 } 2132 2133 default: 2134 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2135 } 2136} 2137 2138void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) { 2139 DCHECK(instruction->IsDiv() || instruction->IsRem()); 2140 DCHECK(instruction->GetResultType() == Primitive::kPrimInt); 2141 2142 LocationSummary* locations = instruction->GetLocations(); 2143 Location second = locations->InAt(1); 2144 DCHECK(second.IsConstant()); 2145 2146 Register out = locations->Out().AsRegister<Register>(); 2147 Register dividend = locations->InAt(0).AsRegister<Register>(); 2148 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue(); 2149 DCHECK(imm == 1 || imm == -1); 2150 2151 if (instruction->IsRem()) { 2152 __ LoadImmediate(out, 0); 2153 } else { 2154 if (imm == 1) { 2155 __ Mov(out, dividend); 2156 } else { 2157 __ rsb(out, dividend, ShifterOperand(0)); 2158 } 2159 } 2160} 2161 2162void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) { 2163 DCHECK(instruction->IsDiv() || instruction->IsRem()); 2164 DCHECK(instruction->GetResultType() == Primitive::kPrimInt); 2165 2166 LocationSummary* locations = instruction->GetLocations(); 2167 Location second = locations->InAt(1); 2168 DCHECK(second.IsConstant()); 2169 2170 Register out = locations->Out().AsRegister<Register>(); 2171 Register dividend = locations->InAt(0).AsRegister<Register>(); 2172 Register temp = locations->GetTemp(0).AsRegister<Register>(); 2173 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue(); 2174 uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm)); 2175 DCHECK(IsPowerOfTwo(abs_imm)); 2176 int ctz_imm = CTZ(abs_imm); 2177 2178 if (ctz_imm == 1) { 2179 __ Lsr(temp, dividend, 32 - ctz_imm); 2180 } else { 2181 __ Asr(temp, dividend, 31); 2182 __ Lsr(temp, temp, 32 - ctz_imm); 2183 } 2184 __ add(out, temp, ShifterOperand(dividend)); 2185 2186 if (instruction->IsDiv()) { 2187 __ Asr(out, out, ctz_imm); 2188 if (imm < 0) { 2189 __ rsb(out, out, ShifterOperand(0)); 2190 } 2191 } else { 2192 __ ubfx(out, out, 0, ctz_imm); 2193 __ sub(out, out, ShifterOperand(temp)); 2194 } 2195} 2196 2197void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) { 2198 DCHECK(instruction->IsDiv() || instruction->IsRem()); 2199 DCHECK(instruction->GetResultType() == Primitive::kPrimInt); 2200 2201 LocationSummary* locations = instruction->GetLocations(); 2202 Location second = locations->InAt(1); 2203 DCHECK(second.IsConstant()); 2204 2205 Register out = locations->Out().AsRegister<Register>(); 2206 Register dividend = locations->InAt(0).AsRegister<Register>(); 2207 Register temp1 = locations->GetTemp(0).AsRegister<Register>(); 2208 Register temp2 = locations->GetTemp(1).AsRegister<Register>(); 2209 int64_t imm = second.GetConstant()->AsIntConstant()->GetValue(); 2210 2211 int64_t magic; 2212 int shift; 2213 CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); 2214 2215 __ LoadImmediate(temp1, magic); 2216 __ smull(temp2, temp1, dividend, temp1); 2217 2218 if (imm > 0 && magic < 0) { 2219 __ add(temp1, temp1, ShifterOperand(dividend)); 2220 } else if (imm < 0 && magic > 0) { 2221 __ sub(temp1, temp1, ShifterOperand(dividend)); 2222 } 2223 2224 if (shift != 0) { 2225 __ Asr(temp1, temp1, shift); 2226 } 2227 2228 if (instruction->IsDiv()) { 2229 __ sub(out, temp1, ShifterOperand(temp1, ASR, 31)); 2230 } else { 2231 __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31)); 2232 // TODO: Strength reduction for mls. 2233 __ LoadImmediate(temp2, imm); 2234 __ mls(out, temp1, temp2, dividend); 2235 } 2236} 2237 2238void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) { 2239 DCHECK(instruction->IsDiv() || instruction->IsRem()); 2240 DCHECK(instruction->GetResultType() == Primitive::kPrimInt); 2241 2242 LocationSummary* locations = instruction->GetLocations(); 2243 Location second = locations->InAt(1); 2244 DCHECK(second.IsConstant()); 2245 2246 int32_t imm = second.GetConstant()->AsIntConstant()->GetValue(); 2247 if (imm == 0) { 2248 // Do not generate anything. DivZeroCheck would prevent any code to be executed. 2249 } else if (imm == 1 || imm == -1) { 2250 DivRemOneOrMinusOne(instruction); 2251 } else if (IsPowerOfTwo(std::abs(imm))) { 2252 DivRemByPowerOfTwo(instruction); 2253 } else { 2254 DCHECK(imm <= -2 || imm >= 2); 2255 GenerateDivRemWithAnyConstant(instruction); 2256 } 2257} 2258 2259void LocationsBuilderARM::VisitDiv(HDiv* div) { 2260 LocationSummary::CallKind call_kind = LocationSummary::kNoCall; 2261 if (div->GetResultType() == Primitive::kPrimLong) { 2262 // pLdiv runtime call. 2263 call_kind = LocationSummary::kCall; 2264 } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) { 2265 // sdiv will be replaced by other instruction sequence. 2266 } else if (div->GetResultType() == Primitive::kPrimInt && 2267 !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2268 // pIdivmod runtime call. 2269 call_kind = LocationSummary::kCall; 2270 } 2271 2272 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind); 2273 2274 switch (div->GetResultType()) { 2275 case Primitive::kPrimInt: { 2276 if (div->InputAt(1)->IsConstant()) { 2277 locations->SetInAt(0, Location::RequiresRegister()); 2278 locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1))); 2279 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2280 int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue()); 2281 if (abs_imm <= 1) { 2282 // No temp register required. 2283 } else { 2284 locations->AddTemp(Location::RequiresRegister()); 2285 if (!IsPowerOfTwo(abs_imm)) { 2286 locations->AddTemp(Location::RequiresRegister()); 2287 } 2288 } 2289 } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2290 locations->SetInAt(0, Location::RequiresRegister()); 2291 locations->SetInAt(1, Location::RequiresRegister()); 2292 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2293 } else { 2294 InvokeRuntimeCallingConvention calling_convention; 2295 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2296 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2297 // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but 2298 // we only need the former. 2299 locations->SetOut(Location::RegisterLocation(R0)); 2300 } 2301 break; 2302 } 2303 case Primitive::kPrimLong: { 2304 InvokeRuntimeCallingConvention calling_convention; 2305 locations->SetInAt(0, Location::RegisterPairLocation( 2306 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); 2307 locations->SetInAt(1, Location::RegisterPairLocation( 2308 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3))); 2309 locations->SetOut(Location::RegisterPairLocation(R0, R1)); 2310 break; 2311 } 2312 case Primitive::kPrimFloat: 2313 case Primitive::kPrimDouble: { 2314 locations->SetInAt(0, Location::RequiresFpuRegister()); 2315 locations->SetInAt(1, Location::RequiresFpuRegister()); 2316 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2317 break; 2318 } 2319 2320 default: 2321 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 2322 } 2323} 2324 2325void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) { 2326 LocationSummary* locations = div->GetLocations(); 2327 Location out = locations->Out(); 2328 Location first = locations->InAt(0); 2329 Location second = locations->InAt(1); 2330 2331 switch (div->GetResultType()) { 2332 case Primitive::kPrimInt: { 2333 if (second.IsConstant()) { 2334 GenerateDivRemConstantIntegral(div); 2335 } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2336 __ sdiv(out.AsRegister<Register>(), 2337 first.AsRegister<Register>(), 2338 second.AsRegister<Register>()); 2339 } else { 2340 InvokeRuntimeCallingConvention calling_convention; 2341 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>()); 2342 DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>()); 2343 DCHECK_EQ(R0, out.AsRegister<Register>()); 2344 2345 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr); 2346 } 2347 break; 2348 } 2349 2350 case Primitive::kPrimLong: { 2351 InvokeRuntimeCallingConvention calling_convention; 2352 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>()); 2353 DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>()); 2354 DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>()); 2355 DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>()); 2356 DCHECK_EQ(R0, out.AsRegisterPairLow<Register>()); 2357 DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>()); 2358 2359 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr); 2360 break; 2361 } 2362 2363 case Primitive::kPrimFloat: { 2364 __ vdivs(out.AsFpuRegister<SRegister>(), 2365 first.AsFpuRegister<SRegister>(), 2366 second.AsFpuRegister<SRegister>()); 2367 break; 2368 } 2369 2370 case Primitive::kPrimDouble: { 2371 __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), 2372 FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()), 2373 FromLowSToD(second.AsFpuRegisterPairLow<SRegister>())); 2374 break; 2375 } 2376 2377 default: 2378 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 2379 } 2380} 2381 2382void LocationsBuilderARM::VisitRem(HRem* rem) { 2383 Primitive::Type type = rem->GetResultType(); 2384 2385 // Most remainders are implemented in the runtime. 2386 LocationSummary::CallKind call_kind = LocationSummary::kCall; 2387 if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) { 2388 // sdiv will be replaced by other instruction sequence. 2389 call_kind = LocationSummary::kNoCall; 2390 } else if ((rem->GetResultType() == Primitive::kPrimInt) 2391 && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2392 // Have hardware divide instruction for int, do it with three instructions. 2393 call_kind = LocationSummary::kNoCall; 2394 } 2395 2396 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); 2397 2398 switch (type) { 2399 case Primitive::kPrimInt: { 2400 if (rem->InputAt(1)->IsConstant()) { 2401 locations->SetInAt(0, Location::RequiresRegister()); 2402 locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1))); 2403 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2404 int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue()); 2405 if (abs_imm <= 1) { 2406 // No temp register required. 2407 } else { 2408 locations->AddTemp(Location::RequiresRegister()); 2409 if (!IsPowerOfTwo(abs_imm)) { 2410 locations->AddTemp(Location::RequiresRegister()); 2411 } 2412 } 2413 } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2414 locations->SetInAt(0, Location::RequiresRegister()); 2415 locations->SetInAt(1, Location::RequiresRegister()); 2416 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2417 locations->AddTemp(Location::RequiresRegister()); 2418 } else { 2419 InvokeRuntimeCallingConvention calling_convention; 2420 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2421 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2422 // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but 2423 // we only need the latter. 2424 locations->SetOut(Location::RegisterLocation(R1)); 2425 } 2426 break; 2427 } 2428 case Primitive::kPrimLong: { 2429 InvokeRuntimeCallingConvention calling_convention; 2430 locations->SetInAt(0, Location::RegisterPairLocation( 2431 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); 2432 locations->SetInAt(1, Location::RegisterPairLocation( 2433 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3))); 2434 // The runtime helper puts the output in R2,R3. 2435 locations->SetOut(Location::RegisterPairLocation(R2, R3)); 2436 break; 2437 } 2438 case Primitive::kPrimFloat: { 2439 InvokeRuntimeCallingConvention calling_convention; 2440 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); 2441 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1))); 2442 locations->SetOut(Location::FpuRegisterLocation(S0)); 2443 break; 2444 } 2445 2446 case Primitive::kPrimDouble: { 2447 InvokeRuntimeCallingConvention calling_convention; 2448 locations->SetInAt(0, Location::FpuRegisterPairLocation( 2449 calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1))); 2450 locations->SetInAt(1, Location::FpuRegisterPairLocation( 2451 calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3))); 2452 locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1)); 2453 break; 2454 } 2455 2456 default: 2457 LOG(FATAL) << "Unexpected rem type " << type; 2458 } 2459} 2460 2461void InstructionCodeGeneratorARM::VisitRem(HRem* rem) { 2462 LocationSummary* locations = rem->GetLocations(); 2463 Location out = locations->Out(); 2464 Location first = locations->InAt(0); 2465 Location second = locations->InAt(1); 2466 2467 Primitive::Type type = rem->GetResultType(); 2468 switch (type) { 2469 case Primitive::kPrimInt: { 2470 if (second.IsConstant()) { 2471 GenerateDivRemConstantIntegral(rem); 2472 } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) { 2473 Register reg1 = first.AsRegister<Register>(); 2474 Register reg2 = second.AsRegister<Register>(); 2475 Register temp = locations->GetTemp(0).AsRegister<Register>(); 2476 2477 // temp = reg1 / reg2 (integer division) 2478 // temp = temp * reg2 2479 // dest = reg1 - temp 2480 __ sdiv(temp, reg1, reg2); 2481 __ mul(temp, temp, reg2); 2482 __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp)); 2483 } else { 2484 InvokeRuntimeCallingConvention calling_convention; 2485 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>()); 2486 DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>()); 2487 DCHECK_EQ(R1, out.AsRegister<Register>()); 2488 2489 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr); 2490 } 2491 break; 2492 } 2493 2494 case Primitive::kPrimLong: { 2495 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr); 2496 break; 2497 } 2498 2499 case Primitive::kPrimFloat: { 2500 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr); 2501 break; 2502 } 2503 2504 case Primitive::kPrimDouble: { 2505 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr); 2506 break; 2507 } 2508 2509 default: 2510 LOG(FATAL) << "Unexpected rem type " << type; 2511 } 2512} 2513 2514void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) { 2515 LocationSummary* locations = 2516 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2517 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); 2518 if (instruction->HasUses()) { 2519 locations->SetOut(Location::SameAsFirstInput()); 2520 } 2521} 2522 2523void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) { 2524 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction); 2525 codegen_->AddSlowPath(slow_path); 2526 2527 LocationSummary* locations = instruction->GetLocations(); 2528 Location value = locations->InAt(0); 2529 2530 switch (instruction->GetType()) { 2531 case Primitive::kPrimInt: { 2532 if (value.IsRegister()) { 2533 __ cmp(value.AsRegister<Register>(), ShifterOperand(0)); 2534 __ b(slow_path->GetEntryLabel(), EQ); 2535 } else { 2536 DCHECK(value.IsConstant()) << value; 2537 if (value.GetConstant()->AsIntConstant()->GetValue() == 0) { 2538 __ b(slow_path->GetEntryLabel()); 2539 } 2540 } 2541 break; 2542 } 2543 case Primitive::kPrimLong: { 2544 if (value.IsRegisterPair()) { 2545 __ orrs(IP, 2546 value.AsRegisterPairLow<Register>(), 2547 ShifterOperand(value.AsRegisterPairHigh<Register>())); 2548 __ b(slow_path->GetEntryLabel(), EQ); 2549 } else { 2550 DCHECK(value.IsConstant()) << value; 2551 if (value.GetConstant()->AsLongConstant()->GetValue() == 0) { 2552 __ b(slow_path->GetEntryLabel()); 2553 } 2554 } 2555 break; 2556 default: 2557 LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType(); 2558 } 2559 } 2560} 2561 2562void LocationsBuilderARM::HandleShift(HBinaryOperation* op) { 2563 DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); 2564 2565 LocationSummary* locations = 2566 new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall); 2567 2568 switch (op->GetResultType()) { 2569 case Primitive::kPrimInt: { 2570 locations->SetInAt(0, Location::RequiresRegister()); 2571 locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1))); 2572 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2573 break; 2574 } 2575 case Primitive::kPrimLong: { 2576 locations->SetInAt(0, Location::RequiresRegister()); 2577 locations->SetInAt(1, Location::RequiresRegister()); 2578 locations->AddTemp(Location::RequiresRegister()); 2579 locations->SetOut(Location::RequiresRegister()); 2580 break; 2581 } 2582 default: 2583 LOG(FATAL) << "Unexpected operation type " << op->GetResultType(); 2584 } 2585} 2586 2587void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) { 2588 DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); 2589 2590 LocationSummary* locations = op->GetLocations(); 2591 Location out = locations->Out(); 2592 Location first = locations->InAt(0); 2593 Location second = locations->InAt(1); 2594 2595 Primitive::Type type = op->GetResultType(); 2596 switch (type) { 2597 case Primitive::kPrimInt: { 2598 Register out_reg = out.AsRegister<Register>(); 2599 Register first_reg = first.AsRegister<Register>(); 2600 // Arm doesn't mask the shift count so we need to do it ourselves. 2601 if (second.IsRegister()) { 2602 Register second_reg = second.AsRegister<Register>(); 2603 __ and_(second_reg, second_reg, ShifterOperand(kMaxIntShiftValue)); 2604 if (op->IsShl()) { 2605 __ Lsl(out_reg, first_reg, second_reg); 2606 } else if (op->IsShr()) { 2607 __ Asr(out_reg, first_reg, second_reg); 2608 } else { 2609 __ Lsr(out_reg, first_reg, second_reg); 2610 } 2611 } else { 2612 int32_t cst = second.GetConstant()->AsIntConstant()->GetValue(); 2613 uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue); 2614 if (shift_value == 0) { // arm does not support shifting with 0 immediate. 2615 __ Mov(out_reg, first_reg); 2616 } else if (op->IsShl()) { 2617 __ Lsl(out_reg, first_reg, shift_value); 2618 } else if (op->IsShr()) { 2619 __ Asr(out_reg, first_reg, shift_value); 2620 } else { 2621 __ Lsr(out_reg, first_reg, shift_value); 2622 } 2623 } 2624 break; 2625 } 2626 case Primitive::kPrimLong: { 2627 Register o_h = out.AsRegisterPairHigh<Register>(); 2628 Register o_l = out.AsRegisterPairLow<Register>(); 2629 2630 Register temp = locations->GetTemp(0).AsRegister<Register>(); 2631 2632 Register high = first.AsRegisterPairHigh<Register>(); 2633 Register low = first.AsRegisterPairLow<Register>(); 2634 2635 Register second_reg = second.AsRegister<Register>(); 2636 2637 if (op->IsShl()) { 2638 // Shift the high part 2639 __ and_(second_reg, second_reg, ShifterOperand(63)); 2640 __ Lsl(o_h, high, second_reg); 2641 // Shift the low part and `or` what overflew on the high part 2642 __ rsb(temp, second_reg, ShifterOperand(32)); 2643 __ Lsr(temp, low, temp); 2644 __ orr(o_h, o_h, ShifterOperand(temp)); 2645 // If the shift is > 32 bits, override the high part 2646 __ subs(temp, second_reg, ShifterOperand(32)); 2647 __ it(PL); 2648 __ Lsl(o_h, low, temp, false, PL); 2649 // Shift the low part 2650 __ Lsl(o_l, low, second_reg); 2651 } else if (op->IsShr()) { 2652 // Shift the low part 2653 __ and_(second_reg, second_reg, ShifterOperand(63)); 2654 __ Lsr(o_l, low, second_reg); 2655 // Shift the high part and `or` what underflew on the low part 2656 __ rsb(temp, second_reg, ShifterOperand(32)); 2657 __ Lsl(temp, high, temp); 2658 __ orr(o_l, o_l, ShifterOperand(temp)); 2659 // If the shift is > 32 bits, override the low part 2660 __ subs(temp, second_reg, ShifterOperand(32)); 2661 __ it(PL); 2662 __ Asr(o_l, high, temp, false, PL); 2663 // Shift the high part 2664 __ Asr(o_h, high, second_reg); 2665 } else { 2666 // same as Shr except we use `Lsr`s and not `Asr`s 2667 __ and_(second_reg, second_reg, ShifterOperand(63)); 2668 __ Lsr(o_l, low, second_reg); 2669 __ rsb(temp, second_reg, ShifterOperand(32)); 2670 __ Lsl(temp, high, temp); 2671 __ orr(o_l, o_l, ShifterOperand(temp)); 2672 __ subs(temp, second_reg, ShifterOperand(32)); 2673 __ it(PL); 2674 __ Lsr(o_l, high, temp, false, PL); 2675 __ Lsr(o_h, high, second_reg); 2676 } 2677 break; 2678 } 2679 default: 2680 LOG(FATAL) << "Unexpected operation type " << type; 2681 } 2682} 2683 2684void LocationsBuilderARM::VisitShl(HShl* shl) { 2685 HandleShift(shl); 2686} 2687 2688void InstructionCodeGeneratorARM::VisitShl(HShl* shl) { 2689 HandleShift(shl); 2690} 2691 2692void LocationsBuilderARM::VisitShr(HShr* shr) { 2693 HandleShift(shr); 2694} 2695 2696void InstructionCodeGeneratorARM::VisitShr(HShr* shr) { 2697 HandleShift(shr); 2698} 2699 2700void LocationsBuilderARM::VisitUShr(HUShr* ushr) { 2701 HandleShift(ushr); 2702} 2703 2704void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) { 2705 HandleShift(ushr); 2706} 2707 2708void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) { 2709 LocationSummary* locations = 2710 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2711 InvokeRuntimeCallingConvention calling_convention; 2712 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2713 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2714 locations->SetOut(Location::RegisterLocation(R0)); 2715} 2716 2717void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) { 2718 InvokeRuntimeCallingConvention calling_convention; 2719 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); 2720 __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex()); 2721 codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(), 2722 instruction, 2723 instruction->GetDexPc(), 2724 nullptr); 2725} 2726 2727void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) { 2728 LocationSummary* locations = 2729 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2730 InvokeRuntimeCallingConvention calling_convention; 2731 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2732 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 2733 locations->SetOut(Location::RegisterLocation(R0)); 2734 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2735} 2736 2737void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) { 2738 InvokeRuntimeCallingConvention calling_convention; 2739 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2)); 2740 __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex()); 2741 codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(), 2742 instruction, 2743 instruction->GetDexPc(), 2744 nullptr); 2745} 2746 2747void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) { 2748 LocationSummary* locations = 2749 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2750 Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); 2751 if (location.IsStackSlot()) { 2752 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2753 } else if (location.IsDoubleStackSlot()) { 2754 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2755 } 2756 locations->SetOut(location); 2757} 2758 2759void InstructionCodeGeneratorARM::VisitParameterValue( 2760 HParameterValue* instruction ATTRIBUTE_UNUSED) { 2761 // Nothing to do, the parameter is already at its location. 2762} 2763 2764void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) { 2765 LocationSummary* locations = 2766 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2767 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument)); 2768} 2769 2770void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) { 2771 // Nothing to do, the method is already at its location. 2772} 2773 2774void LocationsBuilderARM::VisitNot(HNot* not_) { 2775 LocationSummary* locations = 2776 new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall); 2777 locations->SetInAt(0, Location::RequiresRegister()); 2778 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2779} 2780 2781void InstructionCodeGeneratorARM::VisitNot(HNot* not_) { 2782 LocationSummary* locations = not_->GetLocations(); 2783 Location out = locations->Out(); 2784 Location in = locations->InAt(0); 2785 switch (not_->GetResultType()) { 2786 case Primitive::kPrimInt: 2787 __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>())); 2788 break; 2789 2790 case Primitive::kPrimLong: 2791 __ mvn(out.AsRegisterPairLow<Register>(), 2792 ShifterOperand(in.AsRegisterPairLow<Register>())); 2793 __ mvn(out.AsRegisterPairHigh<Register>(), 2794 ShifterOperand(in.AsRegisterPairHigh<Register>())); 2795 break; 2796 2797 default: 2798 LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType(); 2799 } 2800} 2801 2802void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) { 2803 LocationSummary* locations = 2804 new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall); 2805 locations->SetInAt(0, Location::RequiresRegister()); 2806 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2807} 2808 2809void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) { 2810 LocationSummary* locations = bool_not->GetLocations(); 2811 Location out = locations->Out(); 2812 Location in = locations->InAt(0); 2813 __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1)); 2814} 2815 2816void LocationsBuilderARM::VisitCompare(HCompare* compare) { 2817 LocationSummary* locations = 2818 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); 2819 switch (compare->InputAt(0)->GetType()) { 2820 case Primitive::kPrimLong: { 2821 locations->SetInAt(0, Location::RequiresRegister()); 2822 locations->SetInAt(1, Location::RequiresRegister()); 2823 // Output overlaps because it is written before doing the low comparison. 2824 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); 2825 break; 2826 } 2827 case Primitive::kPrimFloat: 2828 case Primitive::kPrimDouble: { 2829 locations->SetInAt(0, Location::RequiresFpuRegister()); 2830 locations->SetInAt(1, Location::RequiresFpuRegister()); 2831 locations->SetOut(Location::RequiresRegister()); 2832 break; 2833 } 2834 default: 2835 LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType(); 2836 } 2837} 2838 2839void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) { 2840 LocationSummary* locations = compare->GetLocations(); 2841 Register out = locations->Out().AsRegister<Register>(); 2842 Location left = locations->InAt(0); 2843 Location right = locations->InAt(1); 2844 2845 NearLabel less, greater, done; 2846 Primitive::Type type = compare->InputAt(0)->GetType(); 2847 switch (type) { 2848 case Primitive::kPrimLong: { 2849 __ cmp(left.AsRegisterPairHigh<Register>(), 2850 ShifterOperand(right.AsRegisterPairHigh<Register>())); // Signed compare. 2851 __ b(&less, LT); 2852 __ b(&greater, GT); 2853 // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags. 2854 __ LoadImmediate(out, 0); 2855 __ cmp(left.AsRegisterPairLow<Register>(), 2856 ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare. 2857 break; 2858 } 2859 case Primitive::kPrimFloat: 2860 case Primitive::kPrimDouble: { 2861 __ LoadImmediate(out, 0); 2862 if (type == Primitive::kPrimFloat) { 2863 __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>()); 2864 } else { 2865 __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()), 2866 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>())); 2867 } 2868 __ vmstat(); // transfer FP status register to ARM APSR. 2869 __ b(compare->IsGtBias() ? &greater : &less, VS); // VS for unordered. 2870 break; 2871 } 2872 default: 2873 LOG(FATAL) << "Unexpected compare type " << type; 2874 } 2875 __ b(&done, EQ); 2876 __ b(&less, CC); // CC is for both: unsigned compare for longs and 'less than' for floats. 2877 2878 __ Bind(&greater); 2879 __ LoadImmediate(out, 1); 2880 __ b(&done); 2881 2882 __ Bind(&less); 2883 __ LoadImmediate(out, -1); 2884 2885 __ Bind(&done); 2886} 2887 2888void LocationsBuilderARM::VisitPhi(HPhi* instruction) { 2889 LocationSummary* locations = 2890 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2891 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 2892 locations->SetInAt(i, Location::Any()); 2893 } 2894 locations->SetOut(Location::Any()); 2895} 2896 2897void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) { 2898 UNUSED(instruction); 2899 LOG(FATAL) << "Unreachable"; 2900} 2901 2902void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) { 2903 // TODO (ported from quick): revisit Arm barrier kinds 2904 DmbOptions flavor = DmbOptions::ISH; // quiet c++ warnings 2905 switch (kind) { 2906 case MemBarrierKind::kAnyStore: 2907 case MemBarrierKind::kLoadAny: 2908 case MemBarrierKind::kAnyAny: { 2909 flavor = DmbOptions::ISH; 2910 break; 2911 } 2912 case MemBarrierKind::kStoreStore: { 2913 flavor = DmbOptions::ISHST; 2914 break; 2915 } 2916 default: 2917 LOG(FATAL) << "Unexpected memory barrier " << kind; 2918 } 2919 __ dmb(flavor); 2920} 2921 2922void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr, 2923 uint32_t offset, 2924 Register out_lo, 2925 Register out_hi) { 2926 if (offset != 0) { 2927 __ LoadImmediate(out_lo, offset); 2928 __ add(IP, addr, ShifterOperand(out_lo)); 2929 addr = IP; 2930 } 2931 __ ldrexd(out_lo, out_hi, addr); 2932} 2933 2934void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr, 2935 uint32_t offset, 2936 Register value_lo, 2937 Register value_hi, 2938 Register temp1, 2939 Register temp2, 2940 HInstruction* instruction) { 2941 NearLabel fail; 2942 if (offset != 0) { 2943 __ LoadImmediate(temp1, offset); 2944 __ add(IP, addr, ShifterOperand(temp1)); 2945 addr = IP; 2946 } 2947 __ Bind(&fail); 2948 // We need a load followed by store. (The address used in a STREX instruction must 2949 // be the same as the address in the most recently executed LDREX instruction.) 2950 __ ldrexd(temp1, temp2, addr); 2951 codegen_->MaybeRecordImplicitNullCheck(instruction); 2952 __ strexd(temp1, value_lo, value_hi, addr); 2953 __ cmp(temp1, ShifterOperand(0)); 2954 __ b(&fail, NE); 2955} 2956 2957void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) { 2958 DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); 2959 2960 LocationSummary* locations = 2961 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2962 locations->SetInAt(0, Location::RequiresRegister()); 2963 2964 Primitive::Type field_type = field_info.GetFieldType(); 2965 if (Primitive::IsFloatingPointType(field_type)) { 2966 locations->SetInAt(1, Location::RequiresFpuRegister()); 2967 } else { 2968 locations->SetInAt(1, Location::RequiresRegister()); 2969 } 2970 2971 bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble; 2972 bool generate_volatile = field_info.IsVolatile() 2973 && is_wide 2974 && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); 2975 // Temporary registers for the write barrier. 2976 // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark. 2977 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 2978 locations->AddTemp(Location::RequiresRegister()); 2979 locations->AddTemp(Location::RequiresRegister()); 2980 } else if (generate_volatile) { 2981 // Arm encoding have some additional constraints for ldrexd/strexd: 2982 // - registers need to be consecutive 2983 // - the first register should be even but not R14. 2984 // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever 2985 // enable Arm encoding. 2986 DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet()); 2987 2988 locations->AddTemp(Location::RequiresRegister()); 2989 locations->AddTemp(Location::RequiresRegister()); 2990 if (field_type == Primitive::kPrimDouble) { 2991 // For doubles we need two more registers to copy the value. 2992 locations->AddTemp(Location::RegisterLocation(R2)); 2993 locations->AddTemp(Location::RegisterLocation(R3)); 2994 } 2995 } 2996} 2997 2998void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction, 2999 const FieldInfo& field_info, 3000 bool value_can_be_null) { 3001 DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); 3002 3003 LocationSummary* locations = instruction->GetLocations(); 3004 Register base = locations->InAt(0).AsRegister<Register>(); 3005 Location value = locations->InAt(1); 3006 3007 bool is_volatile = field_info.IsVolatile(); 3008 bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); 3009 Primitive::Type field_type = field_info.GetFieldType(); 3010 uint32_t offset = field_info.GetFieldOffset().Uint32Value(); 3011 3012 if (is_volatile) { 3013 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 3014 } 3015 3016 switch (field_type) { 3017 case Primitive::kPrimBoolean: 3018 case Primitive::kPrimByte: { 3019 __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset); 3020 break; 3021 } 3022 3023 case Primitive::kPrimShort: 3024 case Primitive::kPrimChar: { 3025 __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset); 3026 break; 3027 } 3028 3029 case Primitive::kPrimInt: 3030 case Primitive::kPrimNot: { 3031 __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset); 3032 break; 3033 } 3034 3035 case Primitive::kPrimLong: { 3036 if (is_volatile && !atomic_ldrd_strd) { 3037 GenerateWideAtomicStore(base, offset, 3038 value.AsRegisterPairLow<Register>(), 3039 value.AsRegisterPairHigh<Register>(), 3040 locations->GetTemp(0).AsRegister<Register>(), 3041 locations->GetTemp(1).AsRegister<Register>(), 3042 instruction); 3043 } else { 3044 __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset); 3045 codegen_->MaybeRecordImplicitNullCheck(instruction); 3046 } 3047 break; 3048 } 3049 3050 case Primitive::kPrimFloat: { 3051 __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset); 3052 break; 3053 } 3054 3055 case Primitive::kPrimDouble: { 3056 DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()); 3057 if (is_volatile && !atomic_ldrd_strd) { 3058 Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>(); 3059 Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>(); 3060 3061 __ vmovrrd(value_reg_lo, value_reg_hi, value_reg); 3062 3063 GenerateWideAtomicStore(base, offset, 3064 value_reg_lo, 3065 value_reg_hi, 3066 locations->GetTemp(2).AsRegister<Register>(), 3067 locations->GetTemp(3).AsRegister<Register>(), 3068 instruction); 3069 } else { 3070 __ StoreDToOffset(value_reg, base, offset); 3071 codegen_->MaybeRecordImplicitNullCheck(instruction); 3072 } 3073 break; 3074 } 3075 3076 case Primitive::kPrimVoid: 3077 LOG(FATAL) << "Unreachable type " << field_type; 3078 UNREACHABLE(); 3079 } 3080 3081 // Longs and doubles are handled in the switch. 3082 if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) { 3083 codegen_->MaybeRecordImplicitNullCheck(instruction); 3084 } 3085 3086 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 3087 Register temp = locations->GetTemp(0).AsRegister<Register>(); 3088 Register card = locations->GetTemp(1).AsRegister<Register>(); 3089 codegen_->MarkGCCard( 3090 temp, card, base, value.AsRegister<Register>(), value_can_be_null); 3091 } 3092 3093 if (is_volatile) { 3094 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 3095 } 3096} 3097 3098void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) { 3099 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); 3100 LocationSummary* locations = 3101 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3102 locations->SetInAt(0, Location::RequiresRegister()); 3103 3104 bool volatile_for_double = field_info.IsVolatile() 3105 && (field_info.GetFieldType() == Primitive::kPrimDouble) 3106 && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); 3107 bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong); 3108 3109 if (Primitive::IsFloatingPointType(instruction->GetType())) { 3110 locations->SetOut(Location::RequiresFpuRegister()); 3111 } else { 3112 locations->SetOut(Location::RequiresRegister(), 3113 (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap)); 3114 } 3115 if (volatile_for_double) { 3116 // Arm encoding have some additional constraints for ldrexd/strexd: 3117 // - registers need to be consecutive 3118 // - the first register should be even but not R14. 3119 // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever 3120 // enable Arm encoding. 3121 DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet()); 3122 locations->AddTemp(Location::RequiresRegister()); 3123 locations->AddTemp(Location::RequiresRegister()); 3124 } 3125} 3126 3127void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction, 3128 const FieldInfo& field_info) { 3129 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); 3130 3131 LocationSummary* locations = instruction->GetLocations(); 3132 Register base = locations->InAt(0).AsRegister<Register>(); 3133 Location out = locations->Out(); 3134 bool is_volatile = field_info.IsVolatile(); 3135 bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd(); 3136 Primitive::Type field_type = field_info.GetFieldType(); 3137 uint32_t offset = field_info.GetFieldOffset().Uint32Value(); 3138 3139 switch (field_type) { 3140 case Primitive::kPrimBoolean: { 3141 __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset); 3142 break; 3143 } 3144 3145 case Primitive::kPrimByte: { 3146 __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset); 3147 break; 3148 } 3149 3150 case Primitive::kPrimShort: { 3151 __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset); 3152 break; 3153 } 3154 3155 case Primitive::kPrimChar: { 3156 __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset); 3157 break; 3158 } 3159 3160 case Primitive::kPrimInt: 3161 case Primitive::kPrimNot: { 3162 __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset); 3163 break; 3164 } 3165 3166 case Primitive::kPrimLong: { 3167 if (is_volatile && !atomic_ldrd_strd) { 3168 GenerateWideAtomicLoad(base, offset, 3169 out.AsRegisterPairLow<Register>(), 3170 out.AsRegisterPairHigh<Register>()); 3171 } else { 3172 __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset); 3173 } 3174 break; 3175 } 3176 3177 case Primitive::kPrimFloat: { 3178 __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset); 3179 break; 3180 } 3181 3182 case Primitive::kPrimDouble: { 3183 DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()); 3184 if (is_volatile && !atomic_ldrd_strd) { 3185 Register lo = locations->GetTemp(0).AsRegister<Register>(); 3186 Register hi = locations->GetTemp(1).AsRegister<Register>(); 3187 GenerateWideAtomicLoad(base, offset, lo, hi); 3188 codegen_->MaybeRecordImplicitNullCheck(instruction); 3189 __ vmovdrr(out_reg, lo, hi); 3190 } else { 3191 __ LoadDFromOffset(out_reg, base, offset); 3192 codegen_->MaybeRecordImplicitNullCheck(instruction); 3193 } 3194 break; 3195 } 3196 3197 case Primitive::kPrimVoid: 3198 LOG(FATAL) << "Unreachable type " << field_type; 3199 UNREACHABLE(); 3200 } 3201 3202 // Doubles are handled in the switch. 3203 if (field_type != Primitive::kPrimDouble) { 3204 codegen_->MaybeRecordImplicitNullCheck(instruction); 3205 } 3206 3207 if (is_volatile) { 3208 GenerateMemoryBarrier(MemBarrierKind::kLoadAny); 3209 } 3210} 3211 3212void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 3213 HandleFieldSet(instruction, instruction->GetFieldInfo()); 3214} 3215 3216void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 3217 HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); 3218} 3219 3220void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 3221 HandleFieldGet(instruction, instruction->GetFieldInfo()); 3222} 3223 3224void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 3225 HandleFieldGet(instruction, instruction->GetFieldInfo()); 3226} 3227 3228void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) { 3229 HandleFieldGet(instruction, instruction->GetFieldInfo()); 3230} 3231 3232void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) { 3233 HandleFieldGet(instruction, instruction->GetFieldInfo()); 3234} 3235 3236void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) { 3237 HandleFieldSet(instruction, instruction->GetFieldInfo()); 3238} 3239 3240void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) { 3241 HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); 3242} 3243 3244void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) { 3245 LocationSummary* locations = 3246 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3247 locations->SetInAt(0, Location::RequiresRegister()); 3248 if (instruction->HasUses()) { 3249 locations->SetOut(Location::SameAsFirstInput()); 3250 } 3251} 3252 3253void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) { 3254 if (codegen_->CanMoveNullCheckToUser(instruction)) { 3255 return; 3256 } 3257 Location obj = instruction->GetLocations()->InAt(0); 3258 3259 __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0); 3260 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 3261} 3262 3263void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) { 3264 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction); 3265 codegen_->AddSlowPath(slow_path); 3266 3267 LocationSummary* locations = instruction->GetLocations(); 3268 Location obj = locations->InAt(0); 3269 3270 __ cmp(obj.AsRegister<Register>(), ShifterOperand(0)); 3271 __ b(slow_path->GetEntryLabel(), EQ); 3272} 3273 3274void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) { 3275 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) { 3276 GenerateImplicitNullCheck(instruction); 3277 } else { 3278 GenerateExplicitNullCheck(instruction); 3279 } 3280} 3281 3282void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) { 3283 LocationSummary* locations = 3284 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3285 locations->SetInAt(0, Location::RequiresRegister()); 3286 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 3287 if (Primitive::IsFloatingPointType(instruction->GetType())) { 3288 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 3289 } else { 3290 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 3291 } 3292} 3293 3294void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { 3295 LocationSummary* locations = instruction->GetLocations(); 3296 Register obj = locations->InAt(0).AsRegister<Register>(); 3297 Location index = locations->InAt(1); 3298 3299 switch (instruction->GetType()) { 3300 case Primitive::kPrimBoolean: { 3301 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); 3302 Register out = locations->Out().AsRegister<Register>(); 3303 if (index.IsConstant()) { 3304 size_t offset = 3305 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; 3306 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset); 3307 } else { 3308 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>())); 3309 __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset); 3310 } 3311 break; 3312 } 3313 3314 case Primitive::kPrimByte: { 3315 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value(); 3316 Register out = locations->Out().AsRegister<Register>(); 3317 if (index.IsConstant()) { 3318 size_t offset = 3319 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; 3320 __ LoadFromOffset(kLoadSignedByte, out, obj, offset); 3321 } else { 3322 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>())); 3323 __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset); 3324 } 3325 break; 3326 } 3327 3328 case Primitive::kPrimShort: { 3329 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value(); 3330 Register out = locations->Out().AsRegister<Register>(); 3331 if (index.IsConstant()) { 3332 size_t offset = 3333 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; 3334 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset); 3335 } else { 3336 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2)); 3337 __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset); 3338 } 3339 break; 3340 } 3341 3342 case Primitive::kPrimChar: { 3343 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); 3344 Register out = locations->Out().AsRegister<Register>(); 3345 if (index.IsConstant()) { 3346 size_t offset = 3347 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; 3348 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset); 3349 } else { 3350 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2)); 3351 __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset); 3352 } 3353 break; 3354 } 3355 3356 case Primitive::kPrimInt: 3357 case Primitive::kPrimNot: { 3358 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t)); 3359 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); 3360 Register out = locations->Out().AsRegister<Register>(); 3361 if (index.IsConstant()) { 3362 size_t offset = 3363 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; 3364 __ LoadFromOffset(kLoadWord, out, obj, offset); 3365 } else { 3366 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); 3367 __ LoadFromOffset(kLoadWord, out, IP, data_offset); 3368 } 3369 break; 3370 } 3371 3372 case Primitive::kPrimLong: { 3373 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); 3374 Location out = locations->Out(); 3375 if (index.IsConstant()) { 3376 size_t offset = 3377 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3378 __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset); 3379 } else { 3380 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); 3381 __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset); 3382 } 3383 break; 3384 } 3385 3386 case Primitive::kPrimFloat: { 3387 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); 3388 Location out = locations->Out(); 3389 DCHECK(out.IsFpuRegister()); 3390 if (index.IsConstant()) { 3391 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; 3392 __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset); 3393 } else { 3394 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); 3395 __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset); 3396 } 3397 break; 3398 } 3399 3400 case Primitive::kPrimDouble: { 3401 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); 3402 Location out = locations->Out(); 3403 DCHECK(out.IsFpuRegisterPair()); 3404 if (index.IsConstant()) { 3405 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3406 __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset); 3407 } else { 3408 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); 3409 __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset); 3410 } 3411 break; 3412 } 3413 3414 case Primitive::kPrimVoid: 3415 LOG(FATAL) << "Unreachable type " << instruction->GetType(); 3416 UNREACHABLE(); 3417 } 3418 codegen_->MaybeRecordImplicitNullCheck(instruction); 3419} 3420 3421void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) { 3422 Primitive::Type value_type = instruction->GetComponentType(); 3423 3424 bool needs_write_barrier = 3425 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); 3426 bool needs_runtime_call = instruction->NeedsTypeCheck(); 3427 3428 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 3429 instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall); 3430 if (needs_runtime_call) { 3431 InvokeRuntimeCallingConvention calling_convention; 3432 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 3433 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 3434 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 3435 } else { 3436 locations->SetInAt(0, Location::RequiresRegister()); 3437 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 3438 if (Primitive::IsFloatingPointType(value_type)) { 3439 locations->SetInAt(2, Location::RequiresFpuRegister()); 3440 } else { 3441 locations->SetInAt(2, Location::RequiresRegister()); 3442 } 3443 3444 if (needs_write_barrier) { 3445 // Temporary registers for the write barrier. 3446 locations->AddTemp(Location::RequiresRegister()); 3447 locations->AddTemp(Location::RequiresRegister()); 3448 } 3449 } 3450} 3451 3452void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { 3453 LocationSummary* locations = instruction->GetLocations(); 3454 Register obj = locations->InAt(0).AsRegister<Register>(); 3455 Location index = locations->InAt(1); 3456 Primitive::Type value_type = instruction->GetComponentType(); 3457 bool needs_runtime_call = locations->WillCall(); 3458 bool needs_write_barrier = 3459 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); 3460 3461 switch (value_type) { 3462 case Primitive::kPrimBoolean: 3463 case Primitive::kPrimByte: { 3464 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); 3465 Register value = locations->InAt(2).AsRegister<Register>(); 3466 if (index.IsConstant()) { 3467 size_t offset = 3468 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; 3469 __ StoreToOffset(kStoreByte, value, obj, offset); 3470 } else { 3471 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>())); 3472 __ StoreToOffset(kStoreByte, value, IP, data_offset); 3473 } 3474 break; 3475 } 3476 3477 case Primitive::kPrimShort: 3478 case Primitive::kPrimChar: { 3479 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); 3480 Register value = locations->InAt(2).AsRegister<Register>(); 3481 if (index.IsConstant()) { 3482 size_t offset = 3483 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; 3484 __ StoreToOffset(kStoreHalfword, value, obj, offset); 3485 } else { 3486 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2)); 3487 __ StoreToOffset(kStoreHalfword, value, IP, data_offset); 3488 } 3489 break; 3490 } 3491 3492 case Primitive::kPrimInt: 3493 case Primitive::kPrimNot: { 3494 if (!needs_runtime_call) { 3495 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); 3496 Register value = locations->InAt(2).AsRegister<Register>(); 3497 if (index.IsConstant()) { 3498 size_t offset = 3499 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; 3500 __ StoreToOffset(kStoreWord, value, obj, offset); 3501 } else { 3502 DCHECK(index.IsRegister()) << index; 3503 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); 3504 __ StoreToOffset(kStoreWord, value, IP, data_offset); 3505 } 3506 codegen_->MaybeRecordImplicitNullCheck(instruction); 3507 if (needs_write_barrier) { 3508 DCHECK_EQ(value_type, Primitive::kPrimNot); 3509 Register temp = locations->GetTemp(0).AsRegister<Register>(); 3510 Register card = locations->GetTemp(1).AsRegister<Register>(); 3511 codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull()); 3512 } 3513 } else { 3514 DCHECK_EQ(value_type, Primitive::kPrimNot); 3515 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), 3516 instruction, 3517 instruction->GetDexPc(), 3518 nullptr); 3519 } 3520 break; 3521 } 3522 3523 case Primitive::kPrimLong: { 3524 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); 3525 Location value = locations->InAt(2); 3526 if (index.IsConstant()) { 3527 size_t offset = 3528 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3529 __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset); 3530 } else { 3531 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); 3532 __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset); 3533 } 3534 break; 3535 } 3536 3537 case Primitive::kPrimFloat: { 3538 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); 3539 Location value = locations->InAt(2); 3540 DCHECK(value.IsFpuRegister()); 3541 if (index.IsConstant()) { 3542 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; 3543 __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset); 3544 } else { 3545 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); 3546 __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset); 3547 } 3548 break; 3549 } 3550 3551 case Primitive::kPrimDouble: { 3552 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); 3553 Location value = locations->InAt(2); 3554 DCHECK(value.IsFpuRegisterPair()); 3555 if (index.IsConstant()) { 3556 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3557 __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset); 3558 } else { 3559 __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); 3560 __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset); 3561 } 3562 3563 break; 3564 } 3565 3566 case Primitive::kPrimVoid: 3567 LOG(FATAL) << "Unreachable type " << value_type; 3568 UNREACHABLE(); 3569 } 3570 3571 // Ints and objects are handled in the switch. 3572 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) { 3573 codegen_->MaybeRecordImplicitNullCheck(instruction); 3574 } 3575} 3576 3577void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) { 3578 LocationSummary* locations = 3579 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3580 locations->SetInAt(0, Location::RequiresRegister()); 3581 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 3582} 3583 3584void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) { 3585 LocationSummary* locations = instruction->GetLocations(); 3586 uint32_t offset = mirror::Array::LengthOffset().Uint32Value(); 3587 Register obj = locations->InAt(0).AsRegister<Register>(); 3588 Register out = locations->Out().AsRegister<Register>(); 3589 __ LoadFromOffset(kLoadWord, out, obj, offset); 3590 codegen_->MaybeRecordImplicitNullCheck(instruction); 3591} 3592 3593void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) { 3594 LocationSummary* locations = 3595 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3596 locations->SetInAt(0, Location::RequiresRegister()); 3597 locations->SetInAt(1, Location::RequiresRegister()); 3598 if (instruction->HasUses()) { 3599 locations->SetOut(Location::SameAsFirstInput()); 3600 } 3601} 3602 3603void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) { 3604 LocationSummary* locations = instruction->GetLocations(); 3605 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM( 3606 instruction, locations->InAt(0), locations->InAt(1)); 3607 codegen_->AddSlowPath(slow_path); 3608 3609 Register index = locations->InAt(0).AsRegister<Register>(); 3610 Register length = locations->InAt(1).AsRegister<Register>(); 3611 3612 __ cmp(index, ShifterOperand(length)); 3613 __ b(slow_path->GetEntryLabel(), CS); 3614} 3615 3616void CodeGeneratorARM::MarkGCCard(Register temp, 3617 Register card, 3618 Register object, 3619 Register value, 3620 bool can_be_null) { 3621 NearLabel is_null; 3622 if (can_be_null) { 3623 __ CompareAndBranchIfZero(value, &is_null); 3624 } 3625 __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value()); 3626 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); 3627 __ strb(card, Address(card, temp)); 3628 if (can_be_null) { 3629 __ Bind(&is_null); 3630 } 3631} 3632 3633void LocationsBuilderARM::VisitTemporary(HTemporary* temp) { 3634 temp->SetLocations(nullptr); 3635} 3636 3637void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) { 3638 // Nothing to do, this is driven by the code generator. 3639 UNUSED(temp); 3640} 3641 3642void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) { 3643 UNUSED(instruction); 3644 LOG(FATAL) << "Unreachable"; 3645} 3646 3647void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) { 3648 codegen_->GetMoveResolver()->EmitNativeCode(instruction); 3649} 3650 3651void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) { 3652 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); 3653} 3654 3655void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) { 3656 HBasicBlock* block = instruction->GetBlock(); 3657 if (block->GetLoopInformation() != nullptr) { 3658 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); 3659 // The back edge will generate the suspend check. 3660 return; 3661 } 3662 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { 3663 // The goto will generate the suspend check. 3664 return; 3665 } 3666 GenerateSuspendCheck(instruction, nullptr); 3667} 3668 3669void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction, 3670 HBasicBlock* successor) { 3671 SuspendCheckSlowPathARM* slow_path = 3672 down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath()); 3673 if (slow_path == nullptr) { 3674 slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor); 3675 instruction->SetSlowPath(slow_path); 3676 codegen_->AddSlowPath(slow_path); 3677 if (successor != nullptr) { 3678 DCHECK(successor->IsLoopHeader()); 3679 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction); 3680 } 3681 } else { 3682 DCHECK_EQ(slow_path->GetSuccessor(), successor); 3683 } 3684 3685 __ LoadFromOffset( 3686 kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value()); 3687 __ cmp(IP, ShifterOperand(0)); 3688 // TODO: Figure out the branch offsets and use cbz/cbnz. 3689 if (successor == nullptr) { 3690 __ b(slow_path->GetEntryLabel(), NE); 3691 __ Bind(slow_path->GetReturnLabel()); 3692 } else { 3693 __ b(codegen_->GetLabelOf(successor), EQ); 3694 __ b(slow_path->GetEntryLabel()); 3695 } 3696} 3697 3698ArmAssembler* ParallelMoveResolverARM::GetAssembler() const { 3699 return codegen_->GetAssembler(); 3700} 3701 3702void ParallelMoveResolverARM::EmitMove(size_t index) { 3703 MoveOperands* move = moves_.Get(index); 3704 Location source = move->GetSource(); 3705 Location destination = move->GetDestination(); 3706 3707 if (source.IsRegister()) { 3708 if (destination.IsRegister()) { 3709 __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>()); 3710 } else { 3711 DCHECK(destination.IsStackSlot()); 3712 __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), 3713 SP, destination.GetStackIndex()); 3714 } 3715 } else if (source.IsStackSlot()) { 3716 if (destination.IsRegister()) { 3717 __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), 3718 SP, source.GetStackIndex()); 3719 } else if (destination.IsFpuRegister()) { 3720 __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex()); 3721 } else { 3722 DCHECK(destination.IsStackSlot()); 3723 __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex()); 3724 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3725 } 3726 } else if (source.IsFpuRegister()) { 3727 if (destination.IsFpuRegister()) { 3728 __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>()); 3729 } else { 3730 DCHECK(destination.IsStackSlot()); 3731 __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex()); 3732 } 3733 } else if (source.IsDoubleStackSlot()) { 3734 if (destination.IsDoubleStackSlot()) { 3735 __ LoadDFromOffset(DTMP, SP, source.GetStackIndex()); 3736 __ StoreDToOffset(DTMP, SP, destination.GetStackIndex()); 3737 } else if (destination.IsRegisterPair()) { 3738 DCHECK(ExpectedPairLayout(destination)); 3739 __ LoadFromOffset( 3740 kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex()); 3741 } else { 3742 DCHECK(destination.IsFpuRegisterPair()) << destination; 3743 __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), 3744 SP, 3745 source.GetStackIndex()); 3746 } 3747 } else if (source.IsRegisterPair()) { 3748 if (destination.IsRegisterPair()) { 3749 __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>()); 3750 __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>()); 3751 } else { 3752 DCHECK(destination.IsDoubleStackSlot()) << destination; 3753 DCHECK(ExpectedPairLayout(source)); 3754 __ StoreToOffset( 3755 kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex()); 3756 } 3757 } else if (source.IsFpuRegisterPair()) { 3758 if (destination.IsFpuRegisterPair()) { 3759 __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), 3760 FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())); 3761 } else { 3762 DCHECK(destination.IsDoubleStackSlot()) << destination; 3763 __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()), 3764 SP, 3765 destination.GetStackIndex()); 3766 } 3767 } else { 3768 DCHECK(source.IsConstant()) << source; 3769 HConstant* constant = source.GetConstant(); 3770 if (constant->IsIntConstant() || constant->IsNullConstant()) { 3771 int32_t value = CodeGenerator::GetInt32ValueOf(constant); 3772 if (destination.IsRegister()) { 3773 __ LoadImmediate(destination.AsRegister<Register>(), value); 3774 } else { 3775 DCHECK(destination.IsStackSlot()); 3776 __ LoadImmediate(IP, value); 3777 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3778 } 3779 } else if (constant->IsLongConstant()) { 3780 int64_t value = constant->AsLongConstant()->GetValue(); 3781 if (destination.IsRegisterPair()) { 3782 __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value)); 3783 __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value)); 3784 } else { 3785 DCHECK(destination.IsDoubleStackSlot()) << destination; 3786 __ LoadImmediate(IP, Low32Bits(value)); 3787 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3788 __ LoadImmediate(IP, High32Bits(value)); 3789 __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize)); 3790 } 3791 } else if (constant->IsDoubleConstant()) { 3792 double value = constant->AsDoubleConstant()->GetValue(); 3793 if (destination.IsFpuRegisterPair()) { 3794 __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value); 3795 } else { 3796 DCHECK(destination.IsDoubleStackSlot()) << destination; 3797 uint64_t int_value = bit_cast<uint64_t, double>(value); 3798 __ LoadImmediate(IP, Low32Bits(int_value)); 3799 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3800 __ LoadImmediate(IP, High32Bits(int_value)); 3801 __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize)); 3802 } 3803 } else { 3804 DCHECK(constant->IsFloatConstant()) << constant->DebugName(); 3805 float value = constant->AsFloatConstant()->GetValue(); 3806 if (destination.IsFpuRegister()) { 3807 __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value); 3808 } else { 3809 DCHECK(destination.IsStackSlot()); 3810 __ LoadImmediate(IP, bit_cast<int32_t, float>(value)); 3811 __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex()); 3812 } 3813 } 3814 } 3815} 3816 3817void ParallelMoveResolverARM::Exchange(Register reg, int mem) { 3818 __ Mov(IP, reg); 3819 __ LoadFromOffset(kLoadWord, reg, SP, mem); 3820 __ StoreToOffset(kStoreWord, IP, SP, mem); 3821} 3822 3823void ParallelMoveResolverARM::Exchange(int mem1, int mem2) { 3824 ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters()); 3825 int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0; 3826 __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()), 3827 SP, mem1 + stack_offset); 3828 __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset); 3829 __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()), 3830 SP, mem2 + stack_offset); 3831 __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset); 3832} 3833 3834void ParallelMoveResolverARM::EmitSwap(size_t index) { 3835 MoveOperands* move = moves_.Get(index); 3836 Location source = move->GetSource(); 3837 Location destination = move->GetDestination(); 3838 3839 if (source.IsRegister() && destination.IsRegister()) { 3840 DCHECK_NE(source.AsRegister<Register>(), IP); 3841 DCHECK_NE(destination.AsRegister<Register>(), IP); 3842 __ Mov(IP, source.AsRegister<Register>()); 3843 __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>()); 3844 __ Mov(destination.AsRegister<Register>(), IP); 3845 } else if (source.IsRegister() && destination.IsStackSlot()) { 3846 Exchange(source.AsRegister<Register>(), destination.GetStackIndex()); 3847 } else if (source.IsStackSlot() && destination.IsRegister()) { 3848 Exchange(destination.AsRegister<Register>(), source.GetStackIndex()); 3849 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 3850 Exchange(source.GetStackIndex(), destination.GetStackIndex()); 3851 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { 3852 __ vmovrs(IP, source.AsFpuRegister<SRegister>()); 3853 __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>()); 3854 __ vmovsr(destination.AsFpuRegister<SRegister>(), IP); 3855 } else if (source.IsRegisterPair() && destination.IsRegisterPair()) { 3856 __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>()); 3857 __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>()); 3858 __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>()); 3859 __ vmovrrd(destination.AsRegisterPairLow<Register>(), 3860 destination.AsRegisterPairHigh<Register>(), 3861 DTMP); 3862 } else if (source.IsRegisterPair() || destination.IsRegisterPair()) { 3863 Register low_reg = source.IsRegisterPair() 3864 ? source.AsRegisterPairLow<Register>() 3865 : destination.AsRegisterPairLow<Register>(); 3866 int mem = source.IsRegisterPair() 3867 ? destination.GetStackIndex() 3868 : source.GetStackIndex(); 3869 DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination)); 3870 __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1)); 3871 __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem); 3872 __ StoreDToOffset(DTMP, SP, mem); 3873 } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) { 3874 DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()); 3875 DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()); 3876 __ vmovd(DTMP, first); 3877 __ vmovd(first, second); 3878 __ vmovd(second, DTMP); 3879 } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) { 3880 DRegister reg = source.IsFpuRegisterPair() 3881 ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()) 3882 : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()); 3883 int mem = source.IsFpuRegisterPair() 3884 ? destination.GetStackIndex() 3885 : source.GetStackIndex(); 3886 __ vmovd(DTMP, reg); 3887 __ LoadDFromOffset(reg, SP, mem); 3888 __ StoreDToOffset(DTMP, SP, mem); 3889 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { 3890 SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>() 3891 : destination.AsFpuRegister<SRegister>(); 3892 int mem = source.IsFpuRegister() 3893 ? destination.GetStackIndex() 3894 : source.GetStackIndex(); 3895 3896 __ vmovrs(IP, reg); 3897 __ LoadSFromOffset(reg, SP, mem); 3898 __ StoreToOffset(kStoreWord, IP, SP, mem); 3899 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { 3900 Exchange(source.GetStackIndex(), destination.GetStackIndex()); 3901 Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize)); 3902 } else { 3903 LOG(FATAL) << "Unimplemented" << source << " <-> " << destination; 3904 } 3905} 3906 3907void ParallelMoveResolverARM::SpillScratch(int reg) { 3908 __ Push(static_cast<Register>(reg)); 3909} 3910 3911void ParallelMoveResolverARM::RestoreScratch(int reg) { 3912 __ Pop(static_cast<Register>(reg)); 3913} 3914 3915void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) { 3916 LocationSummary::CallKind call_kind = cls->CanCallRuntime() 3917 ? LocationSummary::kCallOnSlowPath 3918 : LocationSummary::kNoCall; 3919 LocationSummary* locations = 3920 new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); 3921 locations->SetInAt(0, Location::RequiresRegister()); 3922 locations->SetOut(Location::RequiresRegister()); 3923} 3924 3925void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { 3926 LocationSummary* locations = cls->GetLocations(); 3927 Register out = locations->Out().AsRegister<Register>(); 3928 Register current_method = locations->InAt(0).AsRegister<Register>(); 3929 if (cls->IsReferrersClass()) { 3930 DCHECK(!cls->CanCallRuntime()); 3931 DCHECK(!cls->MustGenerateClinitCheck()); 3932 __ LoadFromOffset( 3933 kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value()); 3934 } else { 3935 DCHECK(cls->CanCallRuntime()); 3936 __ LoadFromOffset(kLoadWord, 3937 out, 3938 current_method, 3939 ArtMethod::DexCacheResolvedTypesOffset().Int32Value()); 3940 __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())); 3941 3942 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( 3943 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); 3944 codegen_->AddSlowPath(slow_path); 3945 __ cmp(out, ShifterOperand(0)); 3946 __ b(slow_path->GetEntryLabel(), EQ); 3947 if (cls->MustGenerateClinitCheck()) { 3948 GenerateClassInitializationCheck(slow_path, out); 3949 } else { 3950 __ Bind(slow_path->GetExitLabel()); 3951 } 3952 } 3953} 3954 3955void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) { 3956 LocationSummary* locations = 3957 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); 3958 locations->SetInAt(0, Location::RequiresRegister()); 3959 if (check->HasUses()) { 3960 locations->SetOut(Location::SameAsFirstInput()); 3961 } 3962} 3963 3964void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) { 3965 // We assume the class is not null. 3966 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( 3967 check->GetLoadClass(), check, check->GetDexPc(), true); 3968 codegen_->AddSlowPath(slow_path); 3969 GenerateClassInitializationCheck(slow_path, 3970 check->GetLocations()->InAt(0).AsRegister<Register>()); 3971} 3972 3973void InstructionCodeGeneratorARM::GenerateClassInitializationCheck( 3974 SlowPathCodeARM* slow_path, Register class_reg) { 3975 __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value()); 3976 __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized)); 3977 __ b(slow_path->GetEntryLabel(), LT); 3978 // Even if the initialized flag is set, we may be in a situation where caches are not synced 3979 // properly. Therefore, we do a memory fence. 3980 __ dmb(ISH); 3981 __ Bind(slow_path->GetExitLabel()); 3982} 3983 3984void LocationsBuilderARM::VisitLoadString(HLoadString* load) { 3985 LocationSummary* locations = 3986 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath); 3987 locations->SetInAt(0, Location::RequiresRegister()); 3988 locations->SetOut(Location::RequiresRegister()); 3989} 3990 3991void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { 3992 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load); 3993 codegen_->AddSlowPath(slow_path); 3994 3995 LocationSummary* locations = load->GetLocations(); 3996 Register out = locations->Out().AsRegister<Register>(); 3997 Register current_method = locations->InAt(0).AsRegister<Register>(); 3998 __ LoadFromOffset( 3999 kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value()); 4000 __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value()); 4001 __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex())); 4002 __ cmp(out, ShifterOperand(0)); 4003 __ b(slow_path->GetEntryLabel(), EQ); 4004 __ Bind(slow_path->GetExitLabel()); 4005} 4006 4007void LocationsBuilderARM::VisitLoadException(HLoadException* load) { 4008 LocationSummary* locations = 4009 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); 4010 locations->SetOut(Location::RequiresRegister()); 4011} 4012 4013void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) { 4014 Register out = load->GetLocations()->Out().AsRegister<Register>(); 4015 int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value(); 4016 __ LoadFromOffset(kLoadWord, out, TR, offset); 4017 __ LoadImmediate(IP, 0); 4018 __ StoreToOffset(kStoreWord, IP, TR, offset); 4019} 4020 4021void LocationsBuilderARM::VisitThrow(HThrow* instruction) { 4022 LocationSummary* locations = 4023 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 4024 InvokeRuntimeCallingConvention calling_convention; 4025 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 4026} 4027 4028void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) { 4029 codegen_->InvokeRuntime( 4030 QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr); 4031} 4032 4033void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) { 4034 LocationSummary::CallKind call_kind = instruction->IsClassFinal() 4035 ? LocationSummary::kNoCall 4036 : LocationSummary::kCallOnSlowPath; 4037 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); 4038 locations->SetInAt(0, Location::RequiresRegister()); 4039 locations->SetInAt(1, Location::RequiresRegister()); 4040 // The out register is used as a temporary, so it overlaps with the inputs. 4041 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); 4042} 4043 4044void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { 4045 LocationSummary* locations = instruction->GetLocations(); 4046 Register obj = locations->InAt(0).AsRegister<Register>(); 4047 Register cls = locations->InAt(1).AsRegister<Register>(); 4048 Register out = locations->Out().AsRegister<Register>(); 4049 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 4050 NearLabel done, zero; 4051 SlowPathCodeARM* slow_path = nullptr; 4052 4053 // Return 0 if `obj` is null. 4054 // avoid null check if we know obj is not null. 4055 if (instruction->MustDoNullCheck()) { 4056 __ CompareAndBranchIfZero(obj, &zero); 4057 } 4058 // Compare the class of `obj` with `cls`. 4059 __ LoadFromOffset(kLoadWord, out, obj, class_offset); 4060 __ cmp(out, ShifterOperand(cls)); 4061 if (instruction->IsClassFinal()) { 4062 // Classes must be equal for the instanceof to succeed. 4063 __ b(&zero, NE); 4064 __ LoadImmediate(out, 1); 4065 __ b(&done); 4066 } else { 4067 // If the classes are not equal, we go into a slow path. 4068 DCHECK(locations->OnlyCallsOnSlowPath()); 4069 slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( 4070 instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); 4071 codegen_->AddSlowPath(slow_path); 4072 __ b(slow_path->GetEntryLabel(), NE); 4073 __ LoadImmediate(out, 1); 4074 __ b(&done); 4075 } 4076 4077 if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) { 4078 __ Bind(&zero); 4079 __ LoadImmediate(out, 0); 4080 } 4081 4082 if (slow_path != nullptr) { 4083 __ Bind(slow_path->GetExitLabel()); 4084 } 4085 __ Bind(&done); 4086} 4087 4088void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) { 4089 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 4090 instruction, LocationSummary::kCallOnSlowPath); 4091 locations->SetInAt(0, Location::RequiresRegister()); 4092 locations->SetInAt(1, Location::RequiresRegister()); 4093 locations->AddTemp(Location::RequiresRegister()); 4094} 4095 4096void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { 4097 LocationSummary* locations = instruction->GetLocations(); 4098 Register obj = locations->InAt(0).AsRegister<Register>(); 4099 Register cls = locations->InAt(1).AsRegister<Register>(); 4100 Register temp = locations->GetTemp(0).AsRegister<Register>(); 4101 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 4102 4103 SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( 4104 instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc()); 4105 codegen_->AddSlowPath(slow_path); 4106 4107 NearLabel done; 4108 // avoid null check if we know obj is not null. 4109 if (instruction->MustDoNullCheck()) { 4110 __ CompareAndBranchIfZero(obj, &done); 4111 } 4112 // Compare the class of `obj` with `cls`. 4113 __ LoadFromOffset(kLoadWord, temp, obj, class_offset); 4114 __ cmp(temp, ShifterOperand(cls)); 4115 __ b(slow_path->GetEntryLabel(), NE); 4116 __ Bind(slow_path->GetExitLabel()); 4117 if (instruction->MustDoNullCheck()) { 4118 __ Bind(&done); 4119 } 4120} 4121 4122void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) { 4123 LocationSummary* locations = 4124 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 4125 InvokeRuntimeCallingConvention calling_convention; 4126 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 4127} 4128 4129void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) { 4130 codegen_->InvokeRuntime(instruction->IsEnter() 4131 ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject), 4132 instruction, 4133 instruction->GetDexPc(), 4134 nullptr); 4135} 4136 4137void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); } 4138void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); } 4139void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); } 4140 4141void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) { 4142 LocationSummary* locations = 4143 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 4144 DCHECK(instruction->GetResultType() == Primitive::kPrimInt 4145 || instruction->GetResultType() == Primitive::kPrimLong); 4146 locations->SetInAt(0, Location::RequiresRegister()); 4147 locations->SetInAt(1, Location::RequiresRegister()); 4148 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 4149} 4150 4151void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) { 4152 HandleBitwiseOperation(instruction); 4153} 4154 4155void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) { 4156 HandleBitwiseOperation(instruction); 4157} 4158 4159void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) { 4160 HandleBitwiseOperation(instruction); 4161} 4162 4163void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) { 4164 LocationSummary* locations = instruction->GetLocations(); 4165 4166 if (instruction->GetResultType() == Primitive::kPrimInt) { 4167 Register first = locations->InAt(0).AsRegister<Register>(); 4168 Register second = locations->InAt(1).AsRegister<Register>(); 4169 Register out = locations->Out().AsRegister<Register>(); 4170 if (instruction->IsAnd()) { 4171 __ and_(out, first, ShifterOperand(second)); 4172 } else if (instruction->IsOr()) { 4173 __ orr(out, first, ShifterOperand(second)); 4174 } else { 4175 DCHECK(instruction->IsXor()); 4176 __ eor(out, first, ShifterOperand(second)); 4177 } 4178 } else { 4179 DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); 4180 Location first = locations->InAt(0); 4181 Location second = locations->InAt(1); 4182 Location out = locations->Out(); 4183 if (instruction->IsAnd()) { 4184 __ and_(out.AsRegisterPairLow<Register>(), 4185 first.AsRegisterPairLow<Register>(), 4186 ShifterOperand(second.AsRegisterPairLow<Register>())); 4187 __ and_(out.AsRegisterPairHigh<Register>(), 4188 first.AsRegisterPairHigh<Register>(), 4189 ShifterOperand(second.AsRegisterPairHigh<Register>())); 4190 } else if (instruction->IsOr()) { 4191 __ orr(out.AsRegisterPairLow<Register>(), 4192 first.AsRegisterPairLow<Register>(), 4193 ShifterOperand(second.AsRegisterPairLow<Register>())); 4194 __ orr(out.AsRegisterPairHigh<Register>(), 4195 first.AsRegisterPairHigh<Register>(), 4196 ShifterOperand(second.AsRegisterPairHigh<Register>())); 4197 } else { 4198 DCHECK(instruction->IsXor()); 4199 __ eor(out.AsRegisterPairLow<Register>(), 4200 first.AsRegisterPairLow<Register>(), 4201 ShifterOperand(second.AsRegisterPairLow<Register>())); 4202 __ eor(out.AsRegisterPairHigh<Register>(), 4203 first.AsRegisterPairHigh<Register>(), 4204 ShifterOperand(second.AsRegisterPairHigh<Register>())); 4205 } 4206 } 4207} 4208 4209void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) { 4210 DCHECK_EQ(temp, kArtMethodRegister); 4211 4212 // TODO: Implement all kinds of calls: 4213 // 1) boot -> boot 4214 // 2) app -> boot 4215 // 3) app -> app 4216 // 4217 // Currently we implement the app -> app logic, which looks up in the resolve cache. 4218 4219 if (invoke->IsStringInit()) { 4220 // temp = thread->string_init_entrypoint 4221 __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset()); 4222 // LR = temp[offset_of_quick_compiled_code] 4223 __ LoadFromOffset(kLoadWord, LR, temp, 4224 ArtMethod::EntryPointFromQuickCompiledCodeOffset( 4225 kArmWordSize).Int32Value()); 4226 // LR() 4227 __ blx(LR); 4228 } else { 4229 // temp = method; 4230 LoadCurrentMethod(temp); 4231 if (!invoke->IsRecursive()) { 4232 // temp = temp->dex_cache_resolved_methods_; 4233 __ LoadFromOffset( 4234 kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); 4235 // temp = temp[index_in_cache] 4236 __ LoadFromOffset( 4237 kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())); 4238 // LR = temp[offset_of_quick_compiled_code] 4239 __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( 4240 kArmWordSize).Int32Value()); 4241 // LR() 4242 __ blx(LR); 4243 } else { 4244 __ bl(GetFrameEntryLabel()); 4245 } 4246 } 4247 4248 DCHECK(!IsLeafMethod()); 4249} 4250 4251void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) { 4252 // Nothing to do, this should be removed during prepare for register allocator. 4253 UNUSED(instruction); 4254 LOG(FATAL) << "Unreachable"; 4255} 4256 4257void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) { 4258 // Nothing to do, this should be removed during prepare for register allocator. 4259 UNUSED(instruction); 4260 LOG(FATAL) << "Unreachable"; 4261} 4262 4263} // namespace arm 4264} // namespace art 4265