code_generator_arm64.cc revision 542361f6e9ff05e3ca1f56c94c88bc3efeddd9c4
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator_arm64.h" 18 19#include "common_arm64.h" 20#include "entrypoints/quick/quick_entrypoints.h" 21#include "entrypoints/quick/quick_entrypoints_enum.h" 22#include "gc/accounting/card_table.h" 23#include "intrinsics.h" 24#include "intrinsics_arm64.h" 25#include "mirror/array-inl.h" 26#include "mirror/art_method.h" 27#include "mirror/class.h" 28#include "offsets.h" 29#include "thread.h" 30#include "utils/arm64/assembler_arm64.h" 31#include "utils/assembler.h" 32#include "utils/stack_checks.h" 33 34 35using namespace vixl; // NOLINT(build/namespaces) 36 37#ifdef __ 38#error "ARM64 Codegen VIXL macro-assembler macro already defined." 39#endif 40 41namespace art { 42 43namespace arm64 { 44 45using helpers::CPURegisterFrom; 46using helpers::DRegisterFrom; 47using helpers::FPRegisterFrom; 48using helpers::HeapOperand; 49using helpers::HeapOperandFrom; 50using helpers::InputCPURegisterAt; 51using helpers::InputFPRegisterAt; 52using helpers::InputRegisterAt; 53using helpers::InputOperandAt; 54using helpers::Int64ConstantFrom; 55using helpers::LocationFrom; 56using helpers::OperandFromMemOperand; 57using helpers::OutputCPURegister; 58using helpers::OutputFPRegister; 59using helpers::OutputRegister; 60using helpers::RegisterFrom; 61using helpers::StackOperandFrom; 62using helpers::VIXLRegCodeFromART; 63using helpers::WRegisterFrom; 64using helpers::XRegisterFrom; 65 66static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>); 67static constexpr int kCurrentMethodStackOffset = 0; 68 69inline Condition ARM64Condition(IfCondition cond) { 70 switch (cond) { 71 case kCondEQ: return eq; 72 case kCondNE: return ne; 73 case kCondLT: return lt; 74 case kCondLE: return le; 75 case kCondGT: return gt; 76 case kCondGE: return ge; 77 default: 78 LOG(FATAL) << "Unknown if condition"; 79 } 80 return nv; // Unreachable. 81} 82 83Location ARM64ReturnLocation(Primitive::Type return_type) { 84 DCHECK_NE(return_type, Primitive::kPrimVoid); 85 // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the 86 // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`, 87 // but we use the exact registers for clarity. 88 if (return_type == Primitive::kPrimFloat) { 89 return LocationFrom(s0); 90 } else if (return_type == Primitive::kPrimDouble) { 91 return LocationFrom(d0); 92 } else if (return_type == Primitive::kPrimLong) { 93 return LocationFrom(x0); 94 } else { 95 return LocationFrom(w0); 96 } 97} 98 99static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 }; 100static constexpr size_t kRuntimeParameterCoreRegistersLength = 101 arraysize(kRuntimeParameterCoreRegisters); 102static const FPRegister kRuntimeParameterFpuRegisters[] = { d0, d1, d2, d3, d4, d5, d6, d7 }; 103static constexpr size_t kRuntimeParameterFpuRegistersLength = 104 arraysize(kRuntimeParameterCoreRegisters); 105 106class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> { 107 public: 108 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); 109 110 InvokeRuntimeCallingConvention() 111 : CallingConvention(kRuntimeParameterCoreRegisters, 112 kRuntimeParameterCoreRegistersLength, 113 kRuntimeParameterFpuRegisters, 114 kRuntimeParameterFpuRegistersLength) {} 115 116 Location GetReturnLocation(Primitive::Type return_type); 117 118 private: 119 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 120}; 121 122Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) { 123 return ARM64ReturnLocation(return_type); 124} 125 126#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> 127#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value() 128 129class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { 130 public: 131 BoundsCheckSlowPathARM64(HBoundsCheck* instruction, 132 Location index_location, 133 Location length_location) 134 : instruction_(instruction), 135 index_location_(index_location), 136 length_location_(length_location) {} 137 138 139 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 140 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 141 __ Bind(GetEntryLabel()); 142 // We're moving two locations to locations that could overlap, so we need a parallel 143 // move resolver. 144 InvokeRuntimeCallingConvention calling_convention; 145 codegen->EmitParallelMoves( 146 index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), 147 length_location_, LocationFrom(calling_convention.GetRegisterAt(1))); 148 arm64_codegen->InvokeRuntime( 149 QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc()); 150 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); 151 } 152 153 private: 154 HBoundsCheck* const instruction_; 155 const Location index_location_; 156 const Location length_location_; 157 158 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64); 159}; 160 161class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { 162 public: 163 explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {} 164 165 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 166 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 167 __ Bind(GetEntryLabel()); 168 arm64_codegen->InvokeRuntime( 169 QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc()); 170 CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); 171 } 172 173 private: 174 HDivZeroCheck* const instruction_; 175 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64); 176}; 177 178class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { 179 public: 180 LoadClassSlowPathARM64(HLoadClass* cls, 181 HInstruction* at, 182 uint32_t dex_pc, 183 bool do_clinit) 184 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { 185 DCHECK(at->IsLoadClass() || at->IsClinitCheck()); 186 } 187 188 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 189 LocationSummary* locations = at_->GetLocations(); 190 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 191 192 __ Bind(GetEntryLabel()); 193 codegen->SaveLiveRegisters(locations); 194 195 InvokeRuntimeCallingConvention calling_convention; 196 __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex()); 197 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 198 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) 199 : QUICK_ENTRY_POINT(pInitializeType); 200 arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_); 201 if (do_clinit_) { 202 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*>(); 203 } else { 204 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t, mirror::ArtMethod*>(); 205 } 206 207 // Move the class to the desired location. 208 Location out = locations->Out(); 209 if (out.IsValid()) { 210 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); 211 Primitive::Type type = at_->GetType(); 212 arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type); 213 } 214 215 codegen->RestoreLiveRegisters(locations); 216 __ B(GetExitLabel()); 217 } 218 219 private: 220 // The class this slow path will load. 221 HLoadClass* const cls_; 222 223 // The instruction where this slow path is happening. 224 // (Might be the load class or an initialization check). 225 HInstruction* const at_; 226 227 // The dex PC of `at_`. 228 const uint32_t dex_pc_; 229 230 // Whether to initialize the class. 231 const bool do_clinit_; 232 233 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64); 234}; 235 236class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { 237 public: 238 explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {} 239 240 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 241 LocationSummary* locations = instruction_->GetLocations(); 242 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 243 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 244 245 __ Bind(GetEntryLabel()); 246 codegen->SaveLiveRegisters(locations); 247 248 InvokeRuntimeCallingConvention calling_convention; 249 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 250 __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex()); 251 arm64_codegen->InvokeRuntime( 252 QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc()); 253 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t, mirror::ArtMethod*>(); 254 Primitive::Type type = instruction_->GetType(); 255 arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type); 256 257 codegen->RestoreLiveRegisters(locations); 258 __ B(GetExitLabel()); 259 } 260 261 private: 262 HLoadString* const instruction_; 263 264 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64); 265}; 266 267class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { 268 public: 269 explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {} 270 271 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 272 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 273 __ Bind(GetEntryLabel()); 274 arm64_codegen->InvokeRuntime( 275 QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc()); 276 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); 277 } 278 279 private: 280 HNullCheck* const instruction_; 281 282 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64); 283}; 284 285class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { 286 public: 287 explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction, 288 HBasicBlock* successor) 289 : instruction_(instruction), successor_(successor) {} 290 291 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 292 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 293 __ Bind(GetEntryLabel()); 294 codegen->SaveLiveRegisters(instruction_->GetLocations()); 295 arm64_codegen->InvokeRuntime( 296 QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc()); 297 CheckEntrypointTypes<kQuickTestSuspend, void, void>(); 298 codegen->RestoreLiveRegisters(instruction_->GetLocations()); 299 if (successor_ == nullptr) { 300 __ B(GetReturnLabel()); 301 } else { 302 __ B(arm64_codegen->GetLabelOf(successor_)); 303 } 304 } 305 306 vixl::Label* GetReturnLabel() { 307 DCHECK(successor_ == nullptr); 308 return &return_label_; 309 } 310 311 private: 312 HSuspendCheck* const instruction_; 313 // If not null, the block to branch to after the suspend check. 314 HBasicBlock* const successor_; 315 316 // If `successor_` is null, the label to branch to after the suspend check. 317 vixl::Label return_label_; 318 319 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64); 320}; 321 322class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { 323 public: 324 TypeCheckSlowPathARM64(HInstruction* instruction, 325 Location class_to_check, 326 Location object_class, 327 uint32_t dex_pc) 328 : instruction_(instruction), 329 class_to_check_(class_to_check), 330 object_class_(object_class), 331 dex_pc_(dex_pc) {} 332 333 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 334 LocationSummary* locations = instruction_->GetLocations(); 335 DCHECK(instruction_->IsCheckCast() 336 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 337 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 338 339 __ Bind(GetEntryLabel()); 340 codegen->SaveLiveRegisters(locations); 341 342 // We're moving two locations to locations that could overlap, so we need a parallel 343 // move resolver. 344 InvokeRuntimeCallingConvention calling_convention; 345 codegen->EmitParallelMoves( 346 class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), 347 object_class_, LocationFrom(calling_convention.GetRegisterAt(1))); 348 349 if (instruction_->IsInstanceOf()) { 350 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_); 351 Primitive::Type ret_type = instruction_->GetType(); 352 Location ret_loc = calling_convention.GetReturnLocation(ret_type); 353 arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); 354 CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t, 355 const mirror::Class*, const mirror::Class*>(); 356 } else { 357 DCHECK(instruction_->IsCheckCast()); 358 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_); 359 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); 360 } 361 362 codegen->RestoreLiveRegisters(locations); 363 __ B(GetExitLabel()); 364 } 365 366 private: 367 HInstruction* const instruction_; 368 const Location class_to_check_; 369 const Location object_class_; 370 uint32_t dex_pc_; 371 372 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64); 373}; 374 375#undef __ 376 377Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) { 378 Location next_location; 379 if (type == Primitive::kPrimVoid) { 380 LOG(FATAL) << "Unreachable type " << type; 381 } 382 383 if (Primitive::IsFloatingPointType(type) && 384 (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) { 385 next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++)); 386 } else if (!Primitive::IsFloatingPointType(type) && 387 (gp_index_ < calling_convention.GetNumberOfRegisters())) { 388 next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++)); 389 } else { 390 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_); 391 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) 392 : Location::StackSlot(stack_offset); 393 } 394 395 // Space on the stack is reserved for all arguments. 396 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1; 397 return next_location; 398} 399 400CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, const CompilerOptions& compiler_options) 401 : CodeGenerator(graph, 402 kNumberOfAllocatableRegisters, 403 kNumberOfAllocatableFPRegisters, 404 kNumberOfAllocatableRegisterPairs, 405 (1 << LR), 406 0, 407 compiler_options), 408 block_labels_(nullptr), 409 location_builder_(graph, this), 410 instruction_visitor_(graph, this), 411 move_resolver_(graph->GetArena(), this) { 412 // Save the link register (containing the return address) to mimic Quick. 413 AddAllocatedRegister(Location::RegisterLocation(LR)); 414} 415 416#undef __ 417#define __ GetVIXLAssembler()-> 418 419void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { 420 // Ensure we emit the literal pool. 421 __ FinalizeCode(); 422 CodeGenerator::Finalize(allocator); 423} 424 425void ParallelMoveResolverARM64::EmitMove(size_t index) { 426 MoveOperands* move = moves_.Get(index); 427 codegen_->MoveLocation(move->GetDestination(), move->GetSource()); 428} 429 430void ParallelMoveResolverARM64::EmitSwap(size_t index) { 431 MoveOperands* move = moves_.Get(index); 432 codegen_->SwapLocations(move->GetDestination(), move->GetSource()); 433} 434 435void ParallelMoveResolverARM64::RestoreScratch(int reg) { 436 __ Pop(Register(VIXLRegCodeFromART(reg), kXRegSize)); 437} 438 439void ParallelMoveResolverARM64::SpillScratch(int reg) { 440 __ Push(Register(VIXLRegCodeFromART(reg), kXRegSize)); 441} 442 443void CodeGeneratorARM64::GenerateFrameEntry() { 444 __ Bind(&frame_entry_label_); 445 446 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod(); 447 if (do_overflow_check) { 448 UseScratchRegisterScope temps(GetVIXLAssembler()); 449 Register temp = temps.AcquireX(); 450 DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); 451 __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64))); 452 __ Ldr(wzr, MemOperand(temp, 0)); 453 RecordPcInfo(nullptr, 0); 454 } 455 456 int frame_size = GetFrameSize(); 457 __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex)); 458 __ PokeCPURegList(GetFramePreservedRegisters(), frame_size - FrameEntrySpillSize()); 459 460 // Stack layout: 461 // sp[frame_size - 8] : lr. 462 // ... : other preserved registers. 463 // sp[frame_size - regs_size]: first preserved register. 464 // ... : reserved frame space. 465 // sp[0] : current method. 466} 467 468void CodeGeneratorARM64::GenerateFrameExit() { 469 int frame_size = GetFrameSize(); 470 __ PeekCPURegList(GetFramePreservedRegisters(), frame_size - FrameEntrySpillSize()); 471 __ Drop(frame_size); 472} 473 474void CodeGeneratorARM64::Bind(HBasicBlock* block) { 475 __ Bind(GetLabelOf(block)); 476} 477 478void CodeGeneratorARM64::Move(HInstruction* instruction, 479 Location location, 480 HInstruction* move_for) { 481 LocationSummary* locations = instruction->GetLocations(); 482 if (locations != nullptr && locations->Out().Equals(location)) { 483 return; 484 } 485 486 Primitive::Type type = instruction->GetType(); 487 DCHECK_NE(type, Primitive::kPrimVoid); 488 489 if (instruction->IsIntConstant() || instruction->IsLongConstant()) { 490 int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue() 491 : instruction->AsLongConstant()->GetValue(); 492 if (location.IsRegister()) { 493 Register dst = RegisterFrom(location, type); 494 DCHECK((instruction->IsIntConstant() && dst.Is32Bits()) || 495 (instruction->IsLongConstant() && dst.Is64Bits())); 496 __ Mov(dst, value); 497 } else { 498 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot()); 499 UseScratchRegisterScope temps(GetVIXLAssembler()); 500 Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX(); 501 __ Mov(temp, value); 502 __ Str(temp, StackOperandFrom(location)); 503 } 504 } else if (instruction->IsTemporary()) { 505 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 506 MoveLocation(location, temp_location, type); 507 } else if (instruction->IsLoadLocal()) { 508 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal()); 509 if (Primitive::Is64BitType(type)) { 510 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type); 511 } else { 512 MoveLocation(location, Location::StackSlot(stack_slot), type); 513 } 514 515 } else { 516 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary()); 517 MoveLocation(location, locations->Out(), type); 518 } 519} 520 521Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const { 522 Primitive::Type type = load->GetType(); 523 524 switch (type) { 525 case Primitive::kPrimNot: 526 case Primitive::kPrimInt: 527 case Primitive::kPrimFloat: 528 return Location::StackSlot(GetStackSlot(load->GetLocal())); 529 530 case Primitive::kPrimLong: 531 case Primitive::kPrimDouble: 532 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 533 534 case Primitive::kPrimBoolean: 535 case Primitive::kPrimByte: 536 case Primitive::kPrimChar: 537 case Primitive::kPrimShort: 538 case Primitive::kPrimVoid: 539 LOG(FATAL) << "Unexpected type " << type; 540 } 541 542 LOG(FATAL) << "Unreachable"; 543 return Location::NoLocation(); 544} 545 546void CodeGeneratorARM64::MarkGCCard(Register object, Register value) { 547 UseScratchRegisterScope temps(GetVIXLAssembler()); 548 Register card = temps.AcquireX(); 549 Register temp = temps.AcquireW(); // Index within the CardTable - 32bit. 550 vixl::Label done; 551 __ Cbz(value, &done); 552 __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value())); 553 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); 554 __ Strb(card, MemOperand(card, temp.X())); 555 __ Bind(&done); 556} 557 558void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const { 559 // Block reserved registers: 560 // ip0 (VIXL temporary) 561 // ip1 (VIXL temporary) 562 // tr 563 // lr 564 // sp is not part of the allocatable registers, so we don't need to block it. 565 // TODO: Avoid blocking callee-saved registers, and instead preserve them 566 // where necessary. 567 CPURegList reserved_core_registers = vixl_reserved_core_registers; 568 reserved_core_registers.Combine(runtime_reserved_core_registers); 569 reserved_core_registers.Combine(quick_callee_saved_registers); 570 while (!reserved_core_registers.IsEmpty()) { 571 blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true; 572 } 573 CPURegList reserved_fp_registers = vixl_reserved_fp_registers; 574 reserved_fp_registers.Combine(CPURegList::GetCalleeSavedFP()); 575 while (!reserved_core_registers.IsEmpty()) { 576 blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true; 577 } 578} 579 580Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const { 581 if (type == Primitive::kPrimVoid) { 582 LOG(FATAL) << "Unreachable type " << type; 583 } 584 585 if (Primitive::IsFloatingPointType(type)) { 586 ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters); 587 DCHECK_NE(reg, -1); 588 return Location::FpuRegisterLocation(reg); 589 } else { 590 ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters); 591 DCHECK_NE(reg, -1); 592 return Location::RegisterLocation(reg); 593 } 594} 595 596size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { 597 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 598 __ Str(reg, MemOperand(sp, stack_index)); 599 return kArm64WordSize; 600} 601 602size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { 603 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 604 __ Ldr(reg, MemOperand(sp, stack_index)); 605 return kArm64WordSize; 606} 607 608size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 609 FPRegister reg = FPRegister(reg_id, kDRegSize); 610 __ Str(reg, MemOperand(sp, stack_index)); 611 return kArm64WordSize; 612} 613 614size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 615 FPRegister reg = FPRegister(reg_id, kDRegSize); 616 __ Ldr(reg, MemOperand(sp, stack_index)); 617 return kArm64WordSize; 618} 619 620void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const { 621 stream << Arm64ManagedRegister::FromXRegister(XRegister(reg)); 622} 623 624void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const { 625 stream << Arm64ManagedRegister::FromDRegister(DRegister(reg)); 626} 627 628void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) { 629 if (constant->IsIntConstant() || constant->IsLongConstant()) { 630 __ Mov(Register(destination), 631 constant->IsIntConstant() ? constant->AsIntConstant()->GetValue() 632 : constant->AsLongConstant()->GetValue()); 633 } else if (constant->IsFloatConstant()) { 634 __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue()); 635 } else { 636 DCHECK(constant->IsDoubleConstant()); 637 __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue()); 638 } 639} 640 641 642static bool CoherentConstantAndType(Location constant, Primitive::Type type) { 643 DCHECK(constant.IsConstant()); 644 HConstant* cst = constant.GetConstant(); 645 return (cst->IsIntConstant() && type == Primitive::kPrimInt) || 646 (cst->IsLongConstant() && type == Primitive::kPrimLong) || 647 (cst->IsFloatConstant() && type == Primitive::kPrimFloat) || 648 (cst->IsDoubleConstant() && type == Primitive::kPrimDouble); 649} 650 651void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) { 652 if (source.Equals(destination)) { 653 return; 654 } 655 656 // A valid move can always be inferred from the destination and source 657 // locations. When moving from and to a register, the argument type can be 658 // used to generate 32bit instead of 64bit moves. In debug mode we also 659 // checks the coherency of the locations and the type. 660 bool unspecified_type = (type == Primitive::kPrimVoid); 661 662 if (destination.IsRegister() || destination.IsFpuRegister()) { 663 if (unspecified_type) { 664 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr; 665 if (source.IsStackSlot() || 666 (src_cst != nullptr && (src_cst->IsIntConstant() || src_cst->IsFloatConstant()))) { 667 // For stack slots and 32bit constants, a 64bit type is appropriate. 668 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; 669 } else { 670 // If the source is a double stack slot or a 64bit constant, a 64bit 671 // type is appropriate. Else the source is a register, and since the 672 // type has not been specified, we chose a 64bit type to force a 64bit 673 // move. 674 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; 675 } 676 } 677 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) || 678 (destination.IsRegister() && !Primitive::IsFloatingPointType(type))); 679 CPURegister dst = CPURegisterFrom(destination, type); 680 if (source.IsStackSlot() || source.IsDoubleStackSlot()) { 681 DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot()); 682 __ Ldr(dst, StackOperandFrom(source)); 683 } else if (source.IsConstant()) { 684 DCHECK(CoherentConstantAndType(source, type)); 685 MoveConstant(dst, source.GetConstant()); 686 } else { 687 if (destination.IsRegister()) { 688 __ Mov(Register(dst), RegisterFrom(source, type)); 689 } else { 690 __ Fmov(FPRegister(dst), FPRegisterFrom(source, type)); 691 } 692 } 693 694 } else { // The destination is not a register. It must be a stack slot. 695 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot()); 696 if (source.IsRegister() || source.IsFpuRegister()) { 697 if (unspecified_type) { 698 if (source.IsRegister()) { 699 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; 700 } else { 701 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; 702 } 703 } 704 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) && 705 (source.IsFpuRegister() == Primitive::IsFloatingPointType(type))); 706 __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination)); 707 } else if (source.IsConstant()) { 708 DCHECK(unspecified_type || CoherentConstantAndType(source, type)); 709 UseScratchRegisterScope temps(GetVIXLAssembler()); 710 HConstant* src_cst = source.GetConstant(); 711 CPURegister temp; 712 if (src_cst->IsIntConstant()) { 713 temp = temps.AcquireW(); 714 } else if (src_cst->IsLongConstant()) { 715 temp = temps.AcquireX(); 716 } else if (src_cst->IsFloatConstant()) { 717 temp = temps.AcquireS(); 718 } else { 719 DCHECK(src_cst->IsDoubleConstant()); 720 temp = temps.AcquireD(); 721 } 722 MoveConstant(temp, src_cst); 723 __ Str(temp, StackOperandFrom(destination)); 724 } else { 725 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot()); 726 DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot()); 727 UseScratchRegisterScope temps(GetVIXLAssembler()); 728 // There is generally less pressure on FP registers. 729 FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS(); 730 __ Ldr(temp, StackOperandFrom(source)); 731 __ Str(temp, StackOperandFrom(destination)); 732 } 733 } 734} 735 736void CodeGeneratorARM64::SwapLocations(Location loc1, Location loc2) { 737 DCHECK(!loc1.IsConstant()); 738 DCHECK(!loc2.IsConstant()); 739 740 if (loc1.Equals(loc2)) { 741 return; 742 } 743 744 UseScratchRegisterScope temps(GetAssembler()->vixl_masm_); 745 746 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot(); 747 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot(); 748 bool is_fp_reg1 = loc1.IsFpuRegister(); 749 bool is_fp_reg2 = loc2.IsFpuRegister(); 750 751 if (loc2.IsRegister() && loc1.IsRegister()) { 752 Register r1 = XRegisterFrom(loc1); 753 Register r2 = XRegisterFrom(loc2); 754 Register tmp = temps.AcquireSameSizeAs(r1); 755 __ Mov(tmp, r2); 756 __ Mov(r2, r1); 757 __ Mov(r1, tmp); 758 } else if (is_fp_reg2 && is_fp_reg1) { 759 FPRegister r1 = DRegisterFrom(loc1); 760 FPRegister r2 = DRegisterFrom(loc2); 761 FPRegister tmp = temps.AcquireSameSizeAs(r1); 762 __ Fmov(tmp, r2); 763 __ Fmov(r2, r1); 764 __ Fmov(r1, tmp); 765 } else if (is_slot1 != is_slot2) { 766 MemOperand mem = StackOperandFrom(is_slot1 ? loc1 : loc2); 767 Location reg_loc = is_slot1 ? loc2 : loc1; 768 CPURegister reg, tmp; 769 if (reg_loc.IsFpuRegister()) { 770 reg = DRegisterFrom(reg_loc); 771 tmp = temps.AcquireD(); 772 } else { 773 reg = XRegisterFrom(reg_loc); 774 tmp = temps.AcquireX(); 775 } 776 __ Ldr(tmp, mem); 777 __ Str(reg, mem); 778 if (reg_loc.IsFpuRegister()) { 779 __ Fmov(FPRegister(reg), FPRegister(tmp)); 780 } else { 781 __ Mov(Register(reg), Register(tmp)); 782 } 783 } else if (is_slot1 && is_slot2) { 784 MemOperand mem1 = StackOperandFrom(loc1); 785 MemOperand mem2 = StackOperandFrom(loc2); 786 Register tmp1 = loc1.IsStackSlot() ? temps.AcquireW() : temps.AcquireX(); 787 Register tmp2 = temps.AcquireSameSizeAs(tmp1); 788 __ Ldr(tmp1, mem1); 789 __ Ldr(tmp2, mem2); 790 __ Str(tmp1, mem2); 791 __ Str(tmp2, mem1); 792 } else { 793 LOG(FATAL) << "Unimplemented"; 794 } 795} 796 797void CodeGeneratorARM64::Load(Primitive::Type type, 798 CPURegister dst, 799 const MemOperand& src) { 800 switch (type) { 801 case Primitive::kPrimBoolean: 802 __ Ldrb(Register(dst), src); 803 break; 804 case Primitive::kPrimByte: 805 __ Ldrsb(Register(dst), src); 806 break; 807 case Primitive::kPrimShort: 808 __ Ldrsh(Register(dst), src); 809 break; 810 case Primitive::kPrimChar: 811 __ Ldrh(Register(dst), src); 812 break; 813 case Primitive::kPrimInt: 814 case Primitive::kPrimNot: 815 case Primitive::kPrimLong: 816 case Primitive::kPrimFloat: 817 case Primitive::kPrimDouble: 818 DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type)); 819 __ Ldr(dst, src); 820 break; 821 case Primitive::kPrimVoid: 822 LOG(FATAL) << "Unreachable type " << type; 823 } 824} 825 826void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, 827 CPURegister dst, 828 const MemOperand& src) { 829 UseScratchRegisterScope temps(GetVIXLAssembler()); 830 Register temp_base = temps.AcquireX(); 831 Primitive::Type type = instruction->GetType(); 832 833 DCHECK(!src.IsPreIndex()); 834 DCHECK(!src.IsPostIndex()); 835 836 // TODO(vixl): Let the MacroAssembler handle MemOperand. 837 __ Add(temp_base, src.base(), OperandFromMemOperand(src)); 838 MemOperand base = MemOperand(temp_base); 839 switch (type) { 840 case Primitive::kPrimBoolean: 841 __ Ldarb(Register(dst), base); 842 MaybeRecordImplicitNullCheck(instruction); 843 break; 844 case Primitive::kPrimByte: 845 __ Ldarb(Register(dst), base); 846 MaybeRecordImplicitNullCheck(instruction); 847 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); 848 break; 849 case Primitive::kPrimChar: 850 __ Ldarh(Register(dst), base); 851 MaybeRecordImplicitNullCheck(instruction); 852 break; 853 case Primitive::kPrimShort: 854 __ Ldarh(Register(dst), base); 855 MaybeRecordImplicitNullCheck(instruction); 856 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); 857 break; 858 case Primitive::kPrimInt: 859 case Primitive::kPrimNot: 860 case Primitive::kPrimLong: 861 DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type)); 862 __ Ldar(Register(dst), base); 863 MaybeRecordImplicitNullCheck(instruction); 864 break; 865 case Primitive::kPrimFloat: 866 case Primitive::kPrimDouble: { 867 DCHECK(dst.IsFPRegister()); 868 DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type)); 869 870 Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); 871 __ Ldar(temp, base); 872 MaybeRecordImplicitNullCheck(instruction); 873 __ Fmov(FPRegister(dst), temp); 874 break; 875 } 876 case Primitive::kPrimVoid: 877 LOG(FATAL) << "Unreachable type " << type; 878 } 879} 880 881void CodeGeneratorARM64::Store(Primitive::Type type, 882 CPURegister src, 883 const MemOperand& dst) { 884 switch (type) { 885 case Primitive::kPrimBoolean: 886 case Primitive::kPrimByte: 887 __ Strb(Register(src), dst); 888 break; 889 case Primitive::kPrimChar: 890 case Primitive::kPrimShort: 891 __ Strh(Register(src), dst); 892 break; 893 case Primitive::kPrimInt: 894 case Primitive::kPrimNot: 895 case Primitive::kPrimLong: 896 case Primitive::kPrimFloat: 897 case Primitive::kPrimDouble: 898 DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type)); 899 __ Str(src, dst); 900 break; 901 case Primitive::kPrimVoid: 902 LOG(FATAL) << "Unreachable type " << type; 903 } 904} 905 906void CodeGeneratorARM64::StoreRelease(Primitive::Type type, 907 CPURegister src, 908 const MemOperand& dst) { 909 UseScratchRegisterScope temps(GetVIXLAssembler()); 910 Register temp_base = temps.AcquireX(); 911 912 DCHECK(!dst.IsPreIndex()); 913 DCHECK(!dst.IsPostIndex()); 914 915 // TODO(vixl): Let the MacroAssembler handle this. 916 Operand op = OperandFromMemOperand(dst); 917 __ Add(temp_base, dst.base(), op); 918 MemOperand base = MemOperand(temp_base); 919 switch (type) { 920 case Primitive::kPrimBoolean: 921 case Primitive::kPrimByte: 922 __ Stlrb(Register(src), base); 923 break; 924 case Primitive::kPrimChar: 925 case Primitive::kPrimShort: 926 __ Stlrh(Register(src), base); 927 break; 928 case Primitive::kPrimInt: 929 case Primitive::kPrimNot: 930 case Primitive::kPrimLong: 931 DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type)); 932 __ Stlr(Register(src), base); 933 break; 934 case Primitive::kPrimFloat: 935 case Primitive::kPrimDouble: { 936 DCHECK(src.IsFPRegister()); 937 DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type)); 938 939 Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); 940 __ Fmov(temp, FPRegister(src)); 941 __ Stlr(temp, base); 942 break; 943 } 944 case Primitive::kPrimVoid: 945 LOG(FATAL) << "Unreachable type " << type; 946 } 947} 948 949void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) { 950 DCHECK(current_method.IsW()); 951 __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset)); 952} 953 954void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset, 955 HInstruction* instruction, 956 uint32_t dex_pc) { 957 __ Ldr(lr, MemOperand(tr, entry_point_offset)); 958 __ Blr(lr); 959 if (instruction != nullptr) { 960 RecordPcInfo(instruction, dex_pc); 961 DCHECK(instruction->IsSuspendCheck() 962 || instruction->IsBoundsCheck() 963 || instruction->IsNullCheck() 964 || instruction->IsDivZeroCheck() 965 || !IsLeafMethod()); 966 } 967} 968 969void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, 970 vixl::Register class_reg) { 971 UseScratchRegisterScope temps(GetVIXLAssembler()); 972 Register temp = temps.AcquireW(); 973 size_t status_offset = mirror::Class::StatusOffset().SizeValue(); 974 975 // Even if the initialized flag is set, we need to ensure consistent memory ordering. 976 if (kUseAcquireRelease) { 977 // TODO(vixl): Let the MacroAssembler handle MemOperand. 978 __ Add(temp, class_reg, status_offset); 979 __ Ldar(temp, HeapOperand(temp)); 980 __ Cmp(temp, mirror::Class::kStatusInitialized); 981 __ B(lt, slow_path->GetEntryLabel()); 982 } else { 983 __ Ldr(temp, HeapOperand(class_reg, status_offset)); 984 __ Cmp(temp, mirror::Class::kStatusInitialized); 985 __ B(lt, slow_path->GetEntryLabel()); 986 __ Dmb(InnerShareable, BarrierReads); 987 } 988 __ Bind(slow_path->GetExitLabel()); 989} 990 991void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) { 992 BarrierType type = BarrierAll; 993 994 switch (kind) { 995 case MemBarrierKind::kAnyAny: 996 case MemBarrierKind::kAnyStore: { 997 type = BarrierAll; 998 break; 999 } 1000 case MemBarrierKind::kLoadAny: { 1001 type = BarrierReads; 1002 break; 1003 } 1004 case MemBarrierKind::kStoreStore: { 1005 type = BarrierWrites; 1006 break; 1007 } 1008 default: 1009 LOG(FATAL) << "Unexpected memory barrier " << kind; 1010 } 1011 __ Dmb(InnerShareable, type); 1012} 1013 1014void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction, 1015 HBasicBlock* successor) { 1016 SuspendCheckSlowPathARM64* slow_path = 1017 new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor); 1018 codegen_->AddSlowPath(slow_path); 1019 UseScratchRegisterScope temps(codegen_->GetVIXLAssembler()); 1020 Register temp = temps.AcquireW(); 1021 1022 __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue())); 1023 if (successor == nullptr) { 1024 __ Cbnz(temp, slow_path->GetEntryLabel()); 1025 __ Bind(slow_path->GetReturnLabel()); 1026 } else { 1027 __ Cbz(temp, codegen_->GetLabelOf(successor)); 1028 __ B(slow_path->GetEntryLabel()); 1029 // slow_path will return to GetLabelOf(successor). 1030 } 1031} 1032 1033InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph, 1034 CodeGeneratorARM64* codegen) 1035 : HGraphVisitor(graph), 1036 assembler_(codegen->GetAssembler()), 1037 codegen_(codegen) {} 1038 1039#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \ 1040 /* No unimplemented IR. */ 1041 1042#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode 1043 1044enum UnimplementedInstructionBreakCode { 1045 // Using a base helps identify when we hit such breakpoints. 1046 UnimplementedInstructionBreakCodeBaseCode = 0x900, 1047#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name), 1048 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION) 1049#undef ENUM_UNIMPLEMENTED_INSTRUCTION 1050}; 1051 1052#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \ 1053 void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \ 1054 UNUSED(instr); \ 1055 __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \ 1056 } \ 1057 void LocationsBuilderARM64::Visit##name(H##name* instr) { \ 1058 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \ 1059 locations->SetOut(Location::Any()); \ 1060 } 1061 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS) 1062#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS 1063 1064#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE 1065#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION 1066 1067void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) { 1068 DCHECK_EQ(instr->InputCount(), 2U); 1069 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1070 Primitive::Type type = instr->GetResultType(); 1071 switch (type) { 1072 case Primitive::kPrimInt: 1073 case Primitive::kPrimLong: 1074 locations->SetInAt(0, Location::RequiresRegister()); 1075 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); 1076 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1077 break; 1078 1079 case Primitive::kPrimFloat: 1080 case Primitive::kPrimDouble: 1081 locations->SetInAt(0, Location::RequiresFpuRegister()); 1082 locations->SetInAt(1, Location::RequiresFpuRegister()); 1083 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1084 break; 1085 1086 default: 1087 LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type; 1088 } 1089} 1090 1091void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { 1092 Primitive::Type type = instr->GetType(); 1093 1094 switch (type) { 1095 case Primitive::kPrimInt: 1096 case Primitive::kPrimLong: { 1097 Register dst = OutputRegister(instr); 1098 Register lhs = InputRegisterAt(instr, 0); 1099 Operand rhs = InputOperandAt(instr, 1); 1100 if (instr->IsAdd()) { 1101 __ Add(dst, lhs, rhs); 1102 } else if (instr->IsAnd()) { 1103 __ And(dst, lhs, rhs); 1104 } else if (instr->IsOr()) { 1105 __ Orr(dst, lhs, rhs); 1106 } else if (instr->IsSub()) { 1107 __ Sub(dst, lhs, rhs); 1108 } else { 1109 DCHECK(instr->IsXor()); 1110 __ Eor(dst, lhs, rhs); 1111 } 1112 break; 1113 } 1114 case Primitive::kPrimFloat: 1115 case Primitive::kPrimDouble: { 1116 FPRegister dst = OutputFPRegister(instr); 1117 FPRegister lhs = InputFPRegisterAt(instr, 0); 1118 FPRegister rhs = InputFPRegisterAt(instr, 1); 1119 if (instr->IsAdd()) { 1120 __ Fadd(dst, lhs, rhs); 1121 } else if (instr->IsSub()) { 1122 __ Fsub(dst, lhs, rhs); 1123 } else { 1124 LOG(FATAL) << "Unexpected floating-point binary operation"; 1125 } 1126 break; 1127 } 1128 default: 1129 LOG(FATAL) << "Unexpected binary operation type " << type; 1130 } 1131} 1132 1133void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { 1134 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1135 1136 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1137 Primitive::Type type = instr->GetResultType(); 1138 switch (type) { 1139 case Primitive::kPrimInt: 1140 case Primitive::kPrimLong: { 1141 locations->SetInAt(0, Location::RequiresRegister()); 1142 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); 1143 locations->SetOut(Location::RequiresRegister()); 1144 break; 1145 } 1146 default: 1147 LOG(FATAL) << "Unexpected shift type " << type; 1148 } 1149} 1150 1151void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) { 1152 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1153 1154 Primitive::Type type = instr->GetType(); 1155 switch (type) { 1156 case Primitive::kPrimInt: 1157 case Primitive::kPrimLong: { 1158 Register dst = OutputRegister(instr); 1159 Register lhs = InputRegisterAt(instr, 0); 1160 Operand rhs = InputOperandAt(instr, 1); 1161 if (rhs.IsImmediate()) { 1162 uint32_t shift_value = (type == Primitive::kPrimInt) 1163 ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue) 1164 : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue); 1165 if (instr->IsShl()) { 1166 __ Lsl(dst, lhs, shift_value); 1167 } else if (instr->IsShr()) { 1168 __ Asr(dst, lhs, shift_value); 1169 } else { 1170 __ Lsr(dst, lhs, shift_value); 1171 } 1172 } else { 1173 Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W(); 1174 1175 if (instr->IsShl()) { 1176 __ Lsl(dst, lhs, rhs_reg); 1177 } else if (instr->IsShr()) { 1178 __ Asr(dst, lhs, rhs_reg); 1179 } else { 1180 __ Lsr(dst, lhs, rhs_reg); 1181 } 1182 } 1183 break; 1184 } 1185 default: 1186 LOG(FATAL) << "Unexpected shift operation type " << type; 1187 } 1188} 1189 1190void LocationsBuilderARM64::VisitAdd(HAdd* instruction) { 1191 HandleBinaryOp(instruction); 1192} 1193 1194void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) { 1195 HandleBinaryOp(instruction); 1196} 1197 1198void LocationsBuilderARM64::VisitAnd(HAnd* instruction) { 1199 HandleBinaryOp(instruction); 1200} 1201 1202void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { 1203 HandleBinaryOp(instruction); 1204} 1205 1206void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { 1207 LocationSummary* locations = 1208 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1209 locations->SetInAt(0, Location::RequiresRegister()); 1210 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1211 locations->SetOut(Location::RequiresRegister()); 1212} 1213 1214void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { 1215 LocationSummary* locations = instruction->GetLocations(); 1216 Primitive::Type type = instruction->GetType(); 1217 Register obj = InputRegisterAt(instruction, 0); 1218 Location index = locations->InAt(1); 1219 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value(); 1220 MemOperand source = HeapOperand(obj); 1221 UseScratchRegisterScope temps(GetVIXLAssembler()); 1222 1223 if (index.IsConstant()) { 1224 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type); 1225 source = HeapOperand(obj, offset); 1226 } else { 1227 Register temp = temps.AcquireSameSizeAs(obj); 1228 Register index_reg = RegisterFrom(index, Primitive::kPrimInt); 1229 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type))); 1230 source = HeapOperand(temp, offset); 1231 } 1232 1233 codegen_->Load(type, OutputCPURegister(instruction), source); 1234 codegen_->MaybeRecordImplicitNullCheck(instruction); 1235} 1236 1237void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) { 1238 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1239 locations->SetInAt(0, Location::RequiresRegister()); 1240 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1241} 1242 1243void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) { 1244 __ Ldr(OutputRegister(instruction), 1245 HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset())); 1246 codegen_->MaybeRecordImplicitNullCheck(instruction); 1247} 1248 1249void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { 1250 Primitive::Type value_type = instruction->GetComponentType(); 1251 bool is_object = value_type == Primitive::kPrimNot; 1252 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1253 instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall); 1254 if (is_object) { 1255 InvokeRuntimeCallingConvention calling_convention; 1256 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 1257 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); 1258 locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); 1259 } else { 1260 locations->SetInAt(0, Location::RequiresRegister()); 1261 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1262 locations->SetInAt(2, Location::RequiresRegister()); 1263 } 1264} 1265 1266void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { 1267 Primitive::Type value_type = instruction->GetComponentType(); 1268 if (value_type == Primitive::kPrimNot) { 1269 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc()); 1270 CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>(); 1271 } else { 1272 LocationSummary* locations = instruction->GetLocations(); 1273 Register obj = InputRegisterAt(instruction, 0); 1274 CPURegister value = InputCPURegisterAt(instruction, 2); 1275 Location index = locations->InAt(1); 1276 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value(); 1277 MemOperand destination = HeapOperand(obj); 1278 UseScratchRegisterScope temps(GetVIXLAssembler()); 1279 1280 if (index.IsConstant()) { 1281 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type); 1282 destination = HeapOperand(obj, offset); 1283 } else { 1284 Register temp = temps.AcquireSameSizeAs(obj); 1285 Register index_reg = InputRegisterAt(instruction, 1); 1286 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type))); 1287 destination = HeapOperand(temp, offset); 1288 } 1289 1290 codegen_->Store(value_type, value, destination); 1291 codegen_->MaybeRecordImplicitNullCheck(instruction); 1292 } 1293} 1294 1295void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1296 LocationSummary* locations = 1297 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1298 locations->SetInAt(0, Location::RequiresRegister()); 1299 locations->SetInAt(1, Location::RequiresRegister()); 1300 if (instruction->HasUses()) { 1301 locations->SetOut(Location::SameAsFirstInput()); 1302 } 1303} 1304 1305void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1306 LocationSummary* locations = instruction->GetLocations(); 1307 BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64( 1308 instruction, locations->InAt(0), locations->InAt(1)); 1309 codegen_->AddSlowPath(slow_path); 1310 1311 __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1)); 1312 __ B(slow_path->GetEntryLabel(), hs); 1313} 1314 1315void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { 1316 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1317 instruction, LocationSummary::kCallOnSlowPath); 1318 locations->SetInAt(0, Location::RequiresRegister()); 1319 locations->SetInAt(1, Location::RequiresRegister()); 1320 locations->AddTemp(Location::RequiresRegister()); 1321} 1322 1323void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { 1324 LocationSummary* locations = instruction->GetLocations(); 1325 Register obj = InputRegisterAt(instruction, 0);; 1326 Register cls = InputRegisterAt(instruction, 1);; 1327 Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0)); 1328 1329 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1330 instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc()); 1331 codegen_->AddSlowPath(slow_path); 1332 1333 // TODO: avoid this check if we know obj is not null. 1334 __ Cbz(obj, slow_path->GetExitLabel()); 1335 // Compare the class of `obj` with `cls`. 1336 __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset())); 1337 __ Cmp(obj_cls, cls); 1338 __ B(ne, slow_path->GetEntryLabel()); 1339 __ Bind(slow_path->GetExitLabel()); 1340} 1341 1342void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { 1343 LocationSummary* locations = 1344 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); 1345 locations->SetInAt(0, Location::RequiresRegister()); 1346 if (check->HasUses()) { 1347 locations->SetOut(Location::SameAsFirstInput()); 1348 } 1349} 1350 1351void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) { 1352 // We assume the class is not null. 1353 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 1354 check->GetLoadClass(), check, check->GetDexPc(), true); 1355 codegen_->AddSlowPath(slow_path); 1356 GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); 1357} 1358 1359void LocationsBuilderARM64::VisitCompare(HCompare* compare) { 1360 LocationSummary* locations = 1361 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); 1362 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1363 switch (in_type) { 1364 case Primitive::kPrimLong: { 1365 locations->SetInAt(0, Location::RequiresRegister()); 1366 locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1))); 1367 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1368 break; 1369 } 1370 case Primitive::kPrimFloat: 1371 case Primitive::kPrimDouble: { 1372 locations->SetInAt(0, Location::RequiresFpuRegister()); 1373 locations->SetInAt(1, Location::RequiresFpuRegister()); 1374 locations->SetOut(Location::RequiresRegister()); 1375 break; 1376 } 1377 default: 1378 LOG(FATAL) << "Unexpected type for compare operation " << in_type; 1379 } 1380} 1381 1382void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { 1383 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1384 1385 // 0 if: left == right 1386 // 1 if: left > right 1387 // -1 if: left < right 1388 switch (in_type) { 1389 case Primitive::kPrimLong: { 1390 Register result = OutputRegister(compare); 1391 Register left = InputRegisterAt(compare, 0); 1392 Operand right = InputOperandAt(compare, 1); 1393 1394 __ Cmp(left, right); 1395 __ Cset(result, ne); 1396 __ Cneg(result, result, lt); 1397 break; 1398 } 1399 case Primitive::kPrimFloat: 1400 case Primitive::kPrimDouble: { 1401 Register result = OutputRegister(compare); 1402 FPRegister left = InputFPRegisterAt(compare, 0); 1403 FPRegister right = InputFPRegisterAt(compare, 1); 1404 1405 __ Fcmp(left, right); 1406 if (compare->IsGtBias()) { 1407 __ Cset(result, ne); 1408 } else { 1409 __ Csetm(result, ne); 1410 } 1411 __ Cneg(result, result, compare->IsGtBias() ? mi : gt); 1412 break; 1413 } 1414 default: 1415 LOG(FATAL) << "Unimplemented compare type " << in_type; 1416 } 1417} 1418 1419void LocationsBuilderARM64::VisitCondition(HCondition* instruction) { 1420 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1421 locations->SetInAt(0, Location::RequiresRegister()); 1422 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1423 if (instruction->NeedsMaterialization()) { 1424 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1425 } 1426} 1427 1428void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) { 1429 if (!instruction->NeedsMaterialization()) { 1430 return; 1431 } 1432 1433 LocationSummary* locations = instruction->GetLocations(); 1434 Register lhs = InputRegisterAt(instruction, 0); 1435 Operand rhs = InputOperandAt(instruction, 1); 1436 Register res = RegisterFrom(locations->Out(), instruction->GetType()); 1437 Condition cond = ARM64Condition(instruction->GetCondition()); 1438 1439 __ Cmp(lhs, rhs); 1440 __ Cset(res, cond); 1441} 1442 1443#define FOR_EACH_CONDITION_INSTRUCTION(M) \ 1444 M(Equal) \ 1445 M(NotEqual) \ 1446 M(LessThan) \ 1447 M(LessThanOrEqual) \ 1448 M(GreaterThan) \ 1449 M(GreaterThanOrEqual) 1450#define DEFINE_CONDITION_VISITORS(Name) \ 1451void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \ 1452void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } 1453FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS) 1454#undef DEFINE_CONDITION_VISITORS 1455#undef FOR_EACH_CONDITION_INSTRUCTION 1456 1457void LocationsBuilderARM64::VisitDiv(HDiv* div) { 1458 LocationSummary* locations = 1459 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); 1460 switch (div->GetResultType()) { 1461 case Primitive::kPrimInt: 1462 case Primitive::kPrimLong: 1463 locations->SetInAt(0, Location::RequiresRegister()); 1464 locations->SetInAt(1, Location::RequiresRegister()); 1465 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1466 break; 1467 1468 case Primitive::kPrimFloat: 1469 case Primitive::kPrimDouble: 1470 locations->SetInAt(0, Location::RequiresFpuRegister()); 1471 locations->SetInAt(1, Location::RequiresFpuRegister()); 1472 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1473 break; 1474 1475 default: 1476 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 1477 } 1478} 1479 1480void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) { 1481 Primitive::Type type = div->GetResultType(); 1482 switch (type) { 1483 case Primitive::kPrimInt: 1484 case Primitive::kPrimLong: 1485 __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1)); 1486 break; 1487 1488 case Primitive::kPrimFloat: 1489 case Primitive::kPrimDouble: 1490 __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1)); 1491 break; 1492 1493 default: 1494 LOG(FATAL) << "Unexpected div type " << type; 1495 } 1496} 1497 1498void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1499 LocationSummary* locations = 1500 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1501 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); 1502 if (instruction->HasUses()) { 1503 locations->SetOut(Location::SameAsFirstInput()); 1504 } 1505} 1506 1507void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1508 SlowPathCodeARM64* slow_path = 1509 new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction); 1510 codegen_->AddSlowPath(slow_path); 1511 Location value = instruction->GetLocations()->InAt(0); 1512 1513 Primitive::Type type = instruction->GetType(); 1514 1515 if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) { 1516 LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck."; 1517 return; 1518 } 1519 1520 if (value.IsConstant()) { 1521 int64_t divisor = Int64ConstantFrom(value); 1522 if (divisor == 0) { 1523 __ B(slow_path->GetEntryLabel()); 1524 } else { 1525 // A division by a non-null constant is valid. We don't need to perform 1526 // any check, so simply fall through. 1527 } 1528 } else { 1529 __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); 1530 } 1531} 1532 1533void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1534 LocationSummary* locations = 1535 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1536 locations->SetOut(Location::ConstantLocation(constant)); 1537} 1538 1539void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1540 UNUSED(constant); 1541 // Will be generated at use site. 1542} 1543 1544void LocationsBuilderARM64::VisitExit(HExit* exit) { 1545 exit->SetLocations(nullptr); 1546} 1547 1548void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) { 1549 UNUSED(exit); 1550 if (kIsDebugBuild) { 1551 down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable"); 1552 __ Brk(__LINE__); // TODO: Introduce special markers for such code locations. 1553 } 1554} 1555 1556void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) { 1557 LocationSummary* locations = 1558 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1559 locations->SetOut(Location::ConstantLocation(constant)); 1560} 1561 1562void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) { 1563 UNUSED(constant); 1564 // Will be generated at use site. 1565} 1566 1567void LocationsBuilderARM64::VisitGoto(HGoto* got) { 1568 got->SetLocations(nullptr); 1569} 1570 1571void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) { 1572 HBasicBlock* successor = got->GetSuccessor(); 1573 DCHECK(!successor->IsExitBlock()); 1574 HBasicBlock* block = got->GetBlock(); 1575 HInstruction* previous = got->GetPrevious(); 1576 HLoopInformation* info = block->GetLoopInformation(); 1577 1578 if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) { 1579 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck()); 1580 GenerateSuspendCheck(info->GetSuspendCheck(), successor); 1581 return; 1582 } 1583 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { 1584 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); 1585 } 1586 if (!codegen_->GoesToNextBlock(block, successor)) { 1587 __ B(codegen_->GetLabelOf(successor)); 1588 } 1589} 1590 1591void LocationsBuilderARM64::VisitIf(HIf* if_instr) { 1592 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); 1593 HInstruction* cond = if_instr->InputAt(0); 1594 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 1595 locations->SetInAt(0, Location::RequiresRegister()); 1596 } 1597} 1598 1599void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { 1600 HInstruction* cond = if_instr->InputAt(0); 1601 HCondition* condition = cond->AsCondition(); 1602 vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor()); 1603 vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); 1604 1605 if (cond->IsIntConstant()) { 1606 int32_t cond_value = cond->AsIntConstant()->GetValue(); 1607 if (cond_value == 1) { 1608 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) { 1609 __ B(true_target); 1610 } 1611 return; 1612 } else { 1613 DCHECK_EQ(cond_value, 0); 1614 } 1615 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) { 1616 // The condition instruction has been materialized, compare the output to 0. 1617 Location cond_val = if_instr->GetLocations()->InAt(0); 1618 DCHECK(cond_val.IsRegister()); 1619 __ Cbnz(InputRegisterAt(if_instr, 0), true_target); 1620 } else { 1621 // The condition instruction has not been materialized, use its inputs as 1622 // the comparison and its condition as the branch condition. 1623 Register lhs = InputRegisterAt(condition, 0); 1624 Operand rhs = InputOperandAt(condition, 1); 1625 Condition arm64_cond = ARM64Condition(condition->GetCondition()); 1626 if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) { 1627 if (arm64_cond == eq) { 1628 __ Cbz(lhs, true_target); 1629 } else { 1630 __ Cbnz(lhs, true_target); 1631 } 1632 } else { 1633 __ Cmp(lhs, rhs); 1634 __ B(arm64_cond, true_target); 1635 } 1636 } 1637 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) { 1638 __ B(false_target); 1639 } 1640} 1641 1642void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1643 LocationSummary* locations = 1644 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1645 locations->SetInAt(0, Location::RequiresRegister()); 1646 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1647} 1648 1649void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1650 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 1651 1652 if (instruction->IsVolatile()) { 1653 if (kUseAcquireRelease) { 1654 // NB: LoadAcquire will record the pc info if needed. 1655 codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field); 1656 } else { 1657 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 1658 codegen_->MaybeRecordImplicitNullCheck(instruction); 1659 // For IRIW sequential consistency kLoadAny is not sufficient. 1660 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 1661 } 1662 } else { 1663 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 1664 codegen_->MaybeRecordImplicitNullCheck(instruction); 1665 } 1666} 1667 1668void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1669 LocationSummary* locations = 1670 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1671 locations->SetInAt(0, Location::RequiresRegister()); 1672 locations->SetInAt(1, Location::RequiresRegister()); 1673} 1674 1675void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1676 Register obj = InputRegisterAt(instruction, 0); 1677 CPURegister value = InputCPURegisterAt(instruction, 1); 1678 Offset offset = instruction->GetFieldOffset(); 1679 Primitive::Type field_type = instruction->GetFieldType(); 1680 1681 if (instruction->IsVolatile()) { 1682 if (kUseAcquireRelease) { 1683 codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset)); 1684 codegen_->MaybeRecordImplicitNullCheck(instruction); 1685 } else { 1686 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 1687 codegen_->Store(field_type, value, HeapOperand(obj, offset)); 1688 codegen_->MaybeRecordImplicitNullCheck(instruction); 1689 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 1690 } 1691 } else { 1692 codegen_->Store(field_type, value, HeapOperand(obj, offset)); 1693 codegen_->MaybeRecordImplicitNullCheck(instruction); 1694 } 1695 1696 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 1697 codegen_->MarkGCCard(obj, Register(value)); 1698 } 1699} 1700 1701void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { 1702 LocationSummary::CallKind call_kind = 1703 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; 1704 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); 1705 locations->SetInAt(0, Location::RequiresRegister()); 1706 locations->SetInAt(1, Location::RequiresRegister()); 1707 // The output does overlap inputs. 1708 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); 1709} 1710 1711void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { 1712 LocationSummary* locations = instruction->GetLocations(); 1713 Register obj = InputRegisterAt(instruction, 0);; 1714 Register cls = InputRegisterAt(instruction, 1);; 1715 Register out = OutputRegister(instruction); 1716 1717 vixl::Label done; 1718 1719 // Return 0 if `obj` is null. 1720 // TODO: Avoid this check if we know `obj` is not null. 1721 __ Mov(out, 0); 1722 __ Cbz(obj, &done); 1723 1724 // Compare the class of `obj` with `cls`. 1725 __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset())); 1726 __ Cmp(out, cls); 1727 if (instruction->IsClassFinal()) { 1728 // Classes must be equal for the instanceof to succeed. 1729 __ Cset(out, eq); 1730 } else { 1731 // If the classes are not equal, we go into a slow path. 1732 DCHECK(locations->OnlyCallsOnSlowPath()); 1733 SlowPathCodeARM64* slow_path = 1734 new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1735 instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); 1736 codegen_->AddSlowPath(slow_path); 1737 __ B(ne, slow_path->GetEntryLabel()); 1738 __ Mov(out, 1); 1739 __ Bind(slow_path->GetExitLabel()); 1740 } 1741 1742 __ Bind(&done); 1743} 1744 1745void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) { 1746 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 1747 locations->SetOut(Location::ConstantLocation(constant)); 1748} 1749 1750void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) { 1751 // Will be generated at use site. 1752 UNUSED(constant); 1753} 1754 1755void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) { 1756 LocationSummary* locations = 1757 new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); 1758 locations->AddTemp(LocationFrom(x0)); 1759 1760 InvokeDexCallingConventionVisitor calling_convention_visitor; 1761 for (size_t i = 0; i < invoke->InputCount(); i++) { 1762 HInstruction* input = invoke->InputAt(i); 1763 locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType())); 1764 } 1765 1766 Primitive::Type return_type = invoke->GetType(); 1767 if (return_type != Primitive::kPrimVoid) { 1768 locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type)); 1769 } 1770} 1771 1772void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1773 HandleInvoke(invoke); 1774} 1775 1776void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1777 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. 1778 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1779 uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + 1780 (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); 1781 Location receiver = invoke->GetLocations()->InAt(0); 1782 Offset class_offset = mirror::Object::ClassOffset(); 1783 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 1784 1785 // The register ip1 is required to be used for the hidden argument in 1786 // art_quick_imt_conflict_trampoline, so prevent VIXL from using it. 1787 UseScratchRegisterScope scratch_scope(GetVIXLAssembler()); 1788 scratch_scope.Exclude(ip1); 1789 __ Mov(ip1, invoke->GetDexMethodIndex()); 1790 1791 // temp = object->GetClass(); 1792 if (receiver.IsStackSlot()) { 1793 __ Ldr(temp, StackOperandFrom(receiver)); 1794 __ Ldr(temp, HeapOperand(temp, class_offset)); 1795 } else { 1796 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 1797 } 1798 codegen_->MaybeRecordImplicitNullCheck(invoke); 1799 // temp = temp->GetImtEntryAt(method_offset); 1800 __ Ldr(temp, HeapOperand(temp, method_offset)); 1801 // lr = temp->GetEntryPoint(); 1802 __ Ldr(lr, HeapOperand(temp, entry_point)); 1803 // lr(); 1804 __ Blr(lr); 1805 DCHECK(!codegen_->IsLeafMethod()); 1806 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1807} 1808 1809void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1810 IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena()); 1811 if (intrinsic.TryDispatch(invoke)) { 1812 return; 1813 } 1814 1815 HandleInvoke(invoke); 1816} 1817 1818void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1819 IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena()); 1820 if (intrinsic.TryDispatch(invoke)) { 1821 return; 1822 } 1823 1824 HandleInvoke(invoke); 1825} 1826 1827static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codegen) { 1828 if (invoke->GetLocations()->Intrinsified()) { 1829 IntrinsicCodeGeneratorARM64 intrinsic(codegen); 1830 intrinsic.Dispatch(invoke); 1831 return true; 1832 } 1833 return false; 1834} 1835 1836void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) { 1837 // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention. 1838 DCHECK(temp.Is(kArtMethodRegister)); 1839 size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() + 1840 invoke->GetDexMethodIndex() * kHeapRefSize; 1841 1842 // TODO: Implement all kinds of calls: 1843 // 1) boot -> boot 1844 // 2) app -> boot 1845 // 3) app -> app 1846 // 1847 // Currently we implement the app -> app logic, which looks up in the resolve cache. 1848 1849 if (!invoke->IsRecursive()) { 1850 // temp = method; 1851 LoadCurrentMethod(temp); 1852 // temp = temp->dex_cache_resolved_methods_; 1853 __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset())); 1854 // temp = temp[index_in_cache]; 1855 __ Ldr(temp, HeapOperand(temp, index_in_cache)); 1856 // lr = temp->entry_point_from_quick_compiled_code_; 1857 __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 1858 kArm64WordSize))); 1859 // lr(); 1860 __ Blr(lr); 1861 } else { 1862 __ Bl(&frame_entry_label_); 1863 } 1864 1865 RecordPcInfo(invoke, invoke->GetDexPc()); 1866 DCHECK(!IsLeafMethod()); 1867} 1868 1869void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1870 if (TryGenerateIntrinsicCode(invoke, codegen_)) { 1871 return; 1872 } 1873 1874 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1875 codegen_->GenerateStaticOrDirectCall(invoke, temp); 1876} 1877 1878void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1879 if (TryGenerateIntrinsicCode(invoke, codegen_)) { 1880 return; 1881 } 1882 1883 LocationSummary* locations = invoke->GetLocations(); 1884 Location receiver = locations->InAt(0); 1885 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1886 size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + 1887 invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); 1888 Offset class_offset = mirror::Object::ClassOffset(); 1889 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 1890 1891 // temp = object->GetClass(); 1892 if (receiver.IsStackSlot()) { 1893 __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex())); 1894 __ Ldr(temp, HeapOperand(temp, class_offset)); 1895 } else { 1896 DCHECK(receiver.IsRegister()); 1897 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 1898 } 1899 codegen_->MaybeRecordImplicitNullCheck(invoke); 1900 // temp = temp->GetMethodAt(method_offset); 1901 __ Ldr(temp, HeapOperand(temp, method_offset)); 1902 // lr = temp->GetEntryPoint(); 1903 __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue())); 1904 // lr(); 1905 __ Blr(lr); 1906 DCHECK(!codegen_->IsLeafMethod()); 1907 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1908} 1909 1910void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { 1911 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath 1912 : LocationSummary::kNoCall; 1913 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); 1914 locations->SetOut(Location::RequiresRegister()); 1915} 1916 1917void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { 1918 Register out = OutputRegister(cls); 1919 if (cls->IsReferrersClass()) { 1920 DCHECK(!cls->CanCallRuntime()); 1921 DCHECK(!cls->MustGenerateClinitCheck()); 1922 codegen_->LoadCurrentMethod(out); 1923 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 1924 } else { 1925 DCHECK(cls->CanCallRuntime()); 1926 codegen_->LoadCurrentMethod(out); 1927 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset())); 1928 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); 1929 1930 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 1931 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); 1932 codegen_->AddSlowPath(slow_path); 1933 __ Cbz(out, slow_path->GetEntryLabel()); 1934 if (cls->MustGenerateClinitCheck()) { 1935 GenerateClassInitializationCheck(slow_path, out); 1936 } else { 1937 __ Bind(slow_path->GetExitLabel()); 1938 } 1939 } 1940} 1941 1942void LocationsBuilderARM64::VisitLoadException(HLoadException* load) { 1943 LocationSummary* locations = 1944 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); 1945 locations->SetOut(Location::RequiresRegister()); 1946} 1947 1948void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) { 1949 MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value()); 1950 __ Ldr(OutputRegister(instruction), exception); 1951 __ Str(wzr, exception); 1952} 1953 1954void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) { 1955 load->SetLocations(nullptr); 1956} 1957 1958void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) { 1959 // Nothing to do, this is driven by the code generator. 1960 UNUSED(load); 1961} 1962 1963void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { 1964 LocationSummary* locations = 1965 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath); 1966 locations->SetOut(Location::RequiresRegister()); 1967} 1968 1969void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { 1970 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load); 1971 codegen_->AddSlowPath(slow_path); 1972 1973 Register out = OutputRegister(load); 1974 codegen_->LoadCurrentMethod(out); 1975 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 1976 __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset())); 1977 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); 1978 __ Cbz(out, slow_path->GetEntryLabel()); 1979 __ Bind(slow_path->GetExitLabel()); 1980} 1981 1982void LocationsBuilderARM64::VisitLocal(HLocal* local) { 1983 local->SetLocations(nullptr); 1984} 1985 1986void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) { 1987 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock()); 1988} 1989 1990void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { 1991 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 1992 locations->SetOut(Location::ConstantLocation(constant)); 1993} 1994 1995void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) { 1996 // Will be generated at use site. 1997 UNUSED(constant); 1998} 1999 2000void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 2001 LocationSummary* locations = 2002 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2003 InvokeRuntimeCallingConvention calling_convention; 2004 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 2005} 2006 2007void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 2008 codegen_->InvokeRuntime(instruction->IsEnter() 2009 ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject), 2010 instruction, 2011 instruction->GetDexPc()); 2012 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>(); 2013} 2014 2015void LocationsBuilderARM64::VisitMul(HMul* mul) { 2016 LocationSummary* locations = 2017 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); 2018 switch (mul->GetResultType()) { 2019 case Primitive::kPrimInt: 2020 case Primitive::kPrimLong: 2021 locations->SetInAt(0, Location::RequiresRegister()); 2022 locations->SetInAt(1, Location::RequiresRegister()); 2023 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2024 break; 2025 2026 case Primitive::kPrimFloat: 2027 case Primitive::kPrimDouble: 2028 locations->SetInAt(0, Location::RequiresFpuRegister()); 2029 locations->SetInAt(1, Location::RequiresFpuRegister()); 2030 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2031 break; 2032 2033 default: 2034 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2035 } 2036} 2037 2038void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) { 2039 switch (mul->GetResultType()) { 2040 case Primitive::kPrimInt: 2041 case Primitive::kPrimLong: 2042 __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1)); 2043 break; 2044 2045 case Primitive::kPrimFloat: 2046 case Primitive::kPrimDouble: 2047 __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1)); 2048 break; 2049 2050 default: 2051 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2052 } 2053} 2054 2055void LocationsBuilderARM64::VisitNeg(HNeg* neg) { 2056 LocationSummary* locations = 2057 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); 2058 switch (neg->GetResultType()) { 2059 case Primitive::kPrimInt: 2060 case Primitive::kPrimLong: 2061 locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0))); 2062 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2063 break; 2064 2065 case Primitive::kPrimFloat: 2066 case Primitive::kPrimDouble: 2067 locations->SetInAt(0, Location::RequiresFpuRegister()); 2068 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2069 break; 2070 2071 default: 2072 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2073 } 2074} 2075 2076void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) { 2077 switch (neg->GetResultType()) { 2078 case Primitive::kPrimInt: 2079 case Primitive::kPrimLong: 2080 __ Neg(OutputRegister(neg), InputOperandAt(neg, 0)); 2081 break; 2082 2083 case Primitive::kPrimFloat: 2084 case Primitive::kPrimDouble: 2085 __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0)); 2086 break; 2087 2088 default: 2089 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2090 } 2091} 2092 2093void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) { 2094 LocationSummary* locations = 2095 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2096 InvokeRuntimeCallingConvention calling_convention; 2097 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2098 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2))); 2099 locations->SetOut(LocationFrom(x0)); 2100 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); 2101 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2102 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2103} 2104 2105void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { 2106 LocationSummary* locations = instruction->GetLocations(); 2107 InvokeRuntimeCallingConvention calling_convention; 2108 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2109 DCHECK(type_index.Is(w0)); 2110 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2111 DCHECK(current_method.Is(w2)); 2112 codegen_->LoadCurrentMethod(current_method); 2113 __ Mov(type_index, instruction->GetTypeIndex()); 2114 codegen_->InvokeRuntime( 2115 QUICK_ENTRY_POINT(pAllocArrayWithAccessCheck), instruction, instruction->GetDexPc()); 2116 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2117 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2118} 2119 2120void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { 2121 LocationSummary* locations = 2122 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2123 InvokeRuntimeCallingConvention calling_convention; 2124 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2125 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1))); 2126 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); 2127 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2128} 2129 2130void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { 2131 LocationSummary* locations = instruction->GetLocations(); 2132 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2133 DCHECK(type_index.Is(w0)); 2134 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2135 DCHECK(current_method.Is(w1)); 2136 codegen_->LoadCurrentMethod(current_method); 2137 __ Mov(type_index, instruction->GetTypeIndex()); 2138 codegen_->InvokeRuntime( 2139 QUICK_ENTRY_POINT(pAllocObjectWithAccessCheck), instruction, instruction->GetDexPc()); 2140 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2141} 2142 2143void LocationsBuilderARM64::VisitNot(HNot* instruction) { 2144 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2145 locations->SetInAt(0, Location::RequiresRegister()); 2146 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2147} 2148 2149void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) { 2150 switch (instruction->InputAt(0)->GetType()) { 2151 case Primitive::kPrimInt: 2152 case Primitive::kPrimLong: 2153 __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0)); 2154 break; 2155 2156 default: 2157 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType(); 2158 } 2159} 2160 2161void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) { 2162 LocationSummary* locations = 2163 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2164 locations->SetInAt(0, Location::RequiresRegister()); 2165 if (instruction->HasUses()) { 2166 locations->SetOut(Location::SameAsFirstInput()); 2167 } 2168} 2169 2170void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { 2171 if (codegen_->CanMoveNullCheckToUser(instruction)) { 2172 return; 2173 } 2174 Location obj = instruction->GetLocations()->InAt(0); 2175 2176 __ Ldr(wzr, HeapOperandFrom(obj, Offset(0))); 2177 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 2178} 2179 2180void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) { 2181 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction); 2182 codegen_->AddSlowPath(slow_path); 2183 2184 LocationSummary* locations = instruction->GetLocations(); 2185 Location obj = locations->InAt(0); 2186 2187 __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel()); 2188} 2189 2190void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) { 2191 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) { 2192 GenerateImplicitNullCheck(instruction); 2193 } else { 2194 GenerateExplicitNullCheck(instruction); 2195 } 2196} 2197 2198void LocationsBuilderARM64::VisitOr(HOr* instruction) { 2199 HandleBinaryOp(instruction); 2200} 2201 2202void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) { 2203 HandleBinaryOp(instruction); 2204} 2205 2206void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { 2207 LOG(FATAL) << "Unreachable"; 2208} 2209 2210void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) { 2211 codegen_->GetMoveResolver()->EmitNativeCode(instruction); 2212} 2213 2214void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) { 2215 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2216 Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); 2217 if (location.IsStackSlot()) { 2218 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2219 } else if (location.IsDoubleStackSlot()) { 2220 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2221 } 2222 locations->SetOut(location); 2223} 2224 2225void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) { 2226 // Nothing to do, the parameter is already at its location. 2227 UNUSED(instruction); 2228} 2229 2230void LocationsBuilderARM64::VisitPhi(HPhi* instruction) { 2231 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2232 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 2233 locations->SetInAt(i, Location::Any()); 2234 } 2235 locations->SetOut(Location::Any()); 2236} 2237 2238void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) { 2239 UNUSED(instruction); 2240 LOG(FATAL) << "Unreachable"; 2241} 2242 2243void LocationsBuilderARM64::VisitRem(HRem* rem) { 2244 Primitive::Type type = rem->GetResultType(); 2245 LocationSummary::CallKind call_kind = 2246 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall; 2247 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); 2248 2249 switch (type) { 2250 case Primitive::kPrimInt: 2251 case Primitive::kPrimLong: 2252 locations->SetInAt(0, Location::RequiresRegister()); 2253 locations->SetInAt(1, Location::RequiresRegister()); 2254 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2255 break; 2256 2257 case Primitive::kPrimFloat: 2258 case Primitive::kPrimDouble: { 2259 InvokeRuntimeCallingConvention calling_convention; 2260 locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); 2261 locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1))); 2262 locations->SetOut(calling_convention.GetReturnLocation(type)); 2263 2264 break; 2265 } 2266 2267 default: 2268 LOG(FATAL) << "Unexpected rem type " << type; 2269 } 2270} 2271 2272void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) { 2273 Primitive::Type type = rem->GetResultType(); 2274 2275 switch (type) { 2276 case Primitive::kPrimInt: 2277 case Primitive::kPrimLong: { 2278 UseScratchRegisterScope temps(GetVIXLAssembler()); 2279 Register dividend = InputRegisterAt(rem, 0); 2280 Register divisor = InputRegisterAt(rem, 1); 2281 Register output = OutputRegister(rem); 2282 Register temp = temps.AcquireSameSizeAs(output); 2283 2284 __ Sdiv(temp, dividend, divisor); 2285 __ Msub(output, temp, divisor, dividend); 2286 break; 2287 } 2288 2289 case Primitive::kPrimFloat: 2290 case Primitive::kPrimDouble: { 2291 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf) 2292 : QUICK_ENTRY_POINT(pFmod); 2293 codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc()); 2294 break; 2295 } 2296 2297 default: 2298 LOG(FATAL) << "Unexpected rem type " << type; 2299 } 2300} 2301 2302void LocationsBuilderARM64::VisitReturn(HReturn* instruction) { 2303 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2304 Primitive::Type return_type = instruction->InputAt(0)->GetType(); 2305 locations->SetInAt(0, ARM64ReturnLocation(return_type)); 2306} 2307 2308void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) { 2309 UNUSED(instruction); 2310 codegen_->GenerateFrameExit(); 2311 __ Ret(); 2312} 2313 2314void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) { 2315 instruction->SetLocations(nullptr); 2316} 2317 2318void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) { 2319 UNUSED(instruction); 2320 codegen_->GenerateFrameExit(); 2321 __ Ret(); 2322} 2323 2324void LocationsBuilderARM64::VisitShl(HShl* shl) { 2325 HandleShift(shl); 2326} 2327 2328void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) { 2329 HandleShift(shl); 2330} 2331 2332void LocationsBuilderARM64::VisitShr(HShr* shr) { 2333 HandleShift(shr); 2334} 2335 2336void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) { 2337 HandleShift(shr); 2338} 2339 2340void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) { 2341 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store); 2342 Primitive::Type field_type = store->InputAt(1)->GetType(); 2343 switch (field_type) { 2344 case Primitive::kPrimNot: 2345 case Primitive::kPrimBoolean: 2346 case Primitive::kPrimByte: 2347 case Primitive::kPrimChar: 2348 case Primitive::kPrimShort: 2349 case Primitive::kPrimInt: 2350 case Primitive::kPrimFloat: 2351 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2352 break; 2353 2354 case Primitive::kPrimLong: 2355 case Primitive::kPrimDouble: 2356 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2357 break; 2358 2359 default: 2360 LOG(FATAL) << "Unimplemented local type " << field_type; 2361 } 2362} 2363 2364void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) { 2365 UNUSED(store); 2366} 2367 2368void LocationsBuilderARM64::VisitSub(HSub* instruction) { 2369 HandleBinaryOp(instruction); 2370} 2371 2372void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) { 2373 HandleBinaryOp(instruction); 2374} 2375 2376void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2377 LocationSummary* locations = 2378 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2379 locations->SetInAt(0, Location::RequiresRegister()); 2380 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2381} 2382 2383void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2384 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 2385 2386 if (instruction->IsVolatile()) { 2387 if (kUseAcquireRelease) { 2388 // NB: LoadAcquire will record the pc info if needed. 2389 codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field); 2390 } else { 2391 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 2392 // For IRIW sequential consistency kLoadAny is not sufficient. 2393 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 2394 } 2395 } else { 2396 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 2397 } 2398} 2399 2400void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2401 LocationSummary* locations = 2402 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2403 locations->SetInAt(0, Location::RequiresRegister()); 2404 locations->SetInAt(1, Location::RequiresRegister()); 2405} 2406 2407void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2408 Register cls = InputRegisterAt(instruction, 0); 2409 CPURegister value = InputCPURegisterAt(instruction, 1); 2410 Offset offset = instruction->GetFieldOffset(); 2411 Primitive::Type field_type = instruction->GetFieldType(); 2412 2413 if (instruction->IsVolatile()) { 2414 if (kUseAcquireRelease) { 2415 codegen_->StoreRelease(field_type, value, HeapOperand(cls, offset)); 2416 } else { 2417 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 2418 codegen_->Store(field_type, value, HeapOperand(cls, offset)); 2419 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 2420 } 2421 } else { 2422 codegen_->Store(field_type, value, HeapOperand(cls, offset)); 2423 } 2424 2425 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 2426 codegen_->MarkGCCard(cls, Register(value)); 2427 } 2428} 2429 2430void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2431 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); 2432} 2433 2434void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2435 HBasicBlock* block = instruction->GetBlock(); 2436 if (block->GetLoopInformation() != nullptr) { 2437 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); 2438 // The back edge will generate the suspend check. 2439 return; 2440 } 2441 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { 2442 // The goto will generate the suspend check. 2443 return; 2444 } 2445 GenerateSuspendCheck(instruction, nullptr); 2446} 2447 2448void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) { 2449 temp->SetLocations(nullptr); 2450} 2451 2452void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) { 2453 // Nothing to do, this is driven by the code generator. 2454 UNUSED(temp); 2455} 2456 2457void LocationsBuilderARM64::VisitThrow(HThrow* instruction) { 2458 LocationSummary* locations = 2459 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2460 InvokeRuntimeCallingConvention calling_convention; 2461 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 2462} 2463 2464void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) { 2465 codegen_->InvokeRuntime( 2466 QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc()); 2467 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>(); 2468} 2469 2470void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) { 2471 LocationSummary* locations = 2472 new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); 2473 Primitive::Type input_type = conversion->GetInputType(); 2474 Primitive::Type result_type = conversion->GetResultType(); 2475 DCHECK_NE(input_type, result_type); 2476 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) || 2477 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) { 2478 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; 2479 } 2480 2481 if (Primitive::IsFloatingPointType(input_type)) { 2482 locations->SetInAt(0, Location::RequiresFpuRegister()); 2483 } else { 2484 locations->SetInAt(0, Location::RequiresRegister()); 2485 } 2486 2487 if (Primitive::IsFloatingPointType(result_type)) { 2488 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2489 } else { 2490 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2491 } 2492} 2493 2494void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) { 2495 Primitive::Type result_type = conversion->GetResultType(); 2496 Primitive::Type input_type = conversion->GetInputType(); 2497 2498 DCHECK_NE(input_type, result_type); 2499 2500 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) { 2501 int result_size = Primitive::ComponentSize(result_type); 2502 int input_size = Primitive::ComponentSize(input_type); 2503 int min_size = std::min(result_size, input_size); 2504 Register output = OutputRegister(conversion); 2505 Register source = InputRegisterAt(conversion, 0); 2506 if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) { 2507 __ Ubfx(output, source, 0, result_size * kBitsPerByte); 2508 } else if ((result_type == Primitive::kPrimChar) || 2509 ((input_type == Primitive::kPrimChar) && (result_size > input_size))) { 2510 __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2511 } else { 2512 __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2513 } 2514 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) { 2515 __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0)); 2516 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) { 2517 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong); 2518 __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0)); 2519 } else if (Primitive::IsFloatingPointType(result_type) && 2520 Primitive::IsFloatingPointType(input_type)) { 2521 __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0)); 2522 } else { 2523 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type 2524 << " to " << result_type; 2525 } 2526} 2527 2528void LocationsBuilderARM64::VisitUShr(HUShr* ushr) { 2529 HandleShift(ushr); 2530} 2531 2532void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) { 2533 HandleShift(ushr); 2534} 2535 2536void LocationsBuilderARM64::VisitXor(HXor* instruction) { 2537 HandleBinaryOp(instruction); 2538} 2539 2540void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) { 2541 HandleBinaryOp(instruction); 2542} 2543 2544#undef __ 2545#undef QUICK_ENTRY_POINT 2546 2547} // namespace arm64 2548} // namespace art 2549