code_generator_arm64.cc revision 1cc7dbabd03e0a6c09d68161417a21bd6f9df371
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator_arm64.h" 18 19#include "entrypoints/quick/quick_entrypoints.h" 20#include "entrypoints/quick/quick_entrypoints_enum.h" 21#include "gc/accounting/card_table.h" 22#include "mirror/array-inl.h" 23#include "mirror/art_method.h" 24#include "mirror/class.h" 25#include "thread.h" 26#include "utils/arm64/assembler_arm64.h" 27#include "utils/assembler.h" 28#include "utils/stack_checks.h" 29 30 31using namespace vixl; // NOLINT(build/namespaces) 32 33#ifdef __ 34#error "ARM64 Codegen VIXL macro-assembler macro already defined." 35#endif 36 37 38namespace art { 39 40namespace arm64 { 41 42static constexpr bool kExplicitStackOverflowCheck = false; 43static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>); 44static constexpr int kCurrentMethodStackOffset = 0; 45 46namespace { 47 48bool IsFPType(Primitive::Type type) { 49 return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble; 50} 51 52bool IsIntegralType(Primitive::Type type) { 53 switch (type) { 54 case Primitive::kPrimByte: 55 case Primitive::kPrimChar: 56 case Primitive::kPrimShort: 57 case Primitive::kPrimInt: 58 case Primitive::kPrimLong: 59 return true; 60 default: 61 return false; 62 } 63} 64 65bool Is64BitType(Primitive::Type type) { 66 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 67} 68 69// Convenience helpers to ease conversion to and from VIXL operands. 70static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32), 71 "Unexpected values for register codes."); 72 73int VIXLRegCodeFromART(int code) { 74 if (code == SP) { 75 return vixl::kSPRegInternalCode; 76 } 77 if (code == XZR) { 78 return vixl::kZeroRegCode; 79 } 80 return code; 81} 82 83int ARTRegCodeFromVIXL(int code) { 84 if (code == vixl::kSPRegInternalCode) { 85 return SP; 86 } 87 if (code == vixl::kZeroRegCode) { 88 return XZR; 89 } 90 return code; 91} 92 93Register XRegisterFrom(Location location) { 94 DCHECK(location.IsRegister()); 95 return Register::XRegFromCode(VIXLRegCodeFromART(location.reg())); 96} 97 98Register WRegisterFrom(Location location) { 99 DCHECK(location.IsRegister()); 100 return Register::WRegFromCode(VIXLRegCodeFromART(location.reg())); 101} 102 103Register RegisterFrom(Location location, Primitive::Type type) { 104 DCHECK(type != Primitive::kPrimVoid && !IsFPType(type)); 105 return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location); 106} 107 108Register OutputRegister(HInstruction* instr) { 109 return RegisterFrom(instr->GetLocations()->Out(), instr->GetType()); 110} 111 112Register InputRegisterAt(HInstruction* instr, int input_index) { 113 return RegisterFrom(instr->GetLocations()->InAt(input_index), 114 instr->InputAt(input_index)->GetType()); 115} 116 117FPRegister DRegisterFrom(Location location) { 118 DCHECK(location.IsFpuRegister()); 119 return FPRegister::DRegFromCode(location.reg()); 120} 121 122FPRegister SRegisterFrom(Location location) { 123 DCHECK(location.IsFpuRegister()); 124 return FPRegister::SRegFromCode(location.reg()); 125} 126 127FPRegister FPRegisterFrom(Location location, Primitive::Type type) { 128 DCHECK(IsFPType(type)); 129 return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location); 130} 131 132FPRegister OutputFPRegister(HInstruction* instr) { 133 return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType()); 134} 135 136FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) { 137 return FPRegisterFrom(instr->GetLocations()->InAt(input_index), 138 instr->InputAt(input_index)->GetType()); 139} 140 141CPURegister CPURegisterFrom(Location location, Primitive::Type type) { 142 return IsFPType(type) ? CPURegister(FPRegisterFrom(location, type)) 143 : CPURegister(RegisterFrom(location, type)); 144} 145 146CPURegister OutputCPURegister(HInstruction* instr) { 147 return IsFPType(instr->GetType()) ? static_cast<CPURegister>(OutputFPRegister(instr)) 148 : static_cast<CPURegister>(OutputRegister(instr)); 149} 150 151CPURegister InputCPURegisterAt(HInstruction* instr, int index) { 152 return IsFPType(instr->InputAt(index)->GetType()) 153 ? static_cast<CPURegister>(InputFPRegisterAt(instr, index)) 154 : static_cast<CPURegister>(InputRegisterAt(instr, index)); 155} 156 157int64_t Int64ConstantFrom(Location location) { 158 HConstant* instr = location.GetConstant(); 159 return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue() 160 : instr->AsLongConstant()->GetValue(); 161} 162 163Operand OperandFrom(Location location, Primitive::Type type) { 164 if (location.IsRegister()) { 165 return Operand(RegisterFrom(location, type)); 166 } else { 167 return Operand(Int64ConstantFrom(location)); 168 } 169} 170 171Operand InputOperandAt(HInstruction* instr, int input_index) { 172 return OperandFrom(instr->GetLocations()->InAt(input_index), 173 instr->InputAt(input_index)->GetType()); 174} 175 176MemOperand StackOperandFrom(Location location) { 177 return MemOperand(sp, location.GetStackIndex()); 178} 179 180MemOperand HeapOperand(const Register& base, size_t offset = 0) { 181 // A heap reference must be 32bit, so fit in a W register. 182 DCHECK(base.IsW()); 183 return MemOperand(base.X(), offset); 184} 185 186MemOperand HeapOperand(const Register& base, Offset offset) { 187 return HeapOperand(base, offset.SizeValue()); 188} 189 190MemOperand HeapOperandFrom(Location location, Offset offset) { 191 return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset); 192} 193 194Location LocationFrom(const Register& reg) { 195 return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code())); 196} 197 198Location LocationFrom(const FPRegister& fpreg) { 199 return Location::FpuRegisterLocation(fpreg.code()); 200} 201 202} // namespace 203 204inline Condition ARM64Condition(IfCondition cond) { 205 switch (cond) { 206 case kCondEQ: return eq; 207 case kCondNE: return ne; 208 case kCondLT: return lt; 209 case kCondLE: return le; 210 case kCondGT: return gt; 211 case kCondGE: return ge; 212 default: 213 LOG(FATAL) << "Unknown if condition"; 214 } 215 return nv; // Unreachable. 216} 217 218Location ARM64ReturnLocation(Primitive::Type return_type) { 219 DCHECK_NE(return_type, Primitive::kPrimVoid); 220 // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the 221 // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`, 222 // but we use the exact registers for clarity. 223 if (return_type == Primitive::kPrimFloat) { 224 return LocationFrom(s0); 225 } else if (return_type == Primitive::kPrimDouble) { 226 return LocationFrom(d0); 227 } else if (return_type == Primitive::kPrimLong) { 228 return LocationFrom(x0); 229 } else { 230 return LocationFrom(w0); 231 } 232} 233 234static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 }; 235static constexpr size_t kRuntimeParameterCoreRegistersLength = 236 arraysize(kRuntimeParameterCoreRegisters); 237static const FPRegister kRuntimeParameterFpuRegisters[] = { }; 238static constexpr size_t kRuntimeParameterFpuRegistersLength = 0; 239 240class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> { 241 public: 242 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); 243 244 InvokeRuntimeCallingConvention() 245 : CallingConvention(kRuntimeParameterCoreRegisters, 246 kRuntimeParameterCoreRegistersLength, 247 kRuntimeParameterFpuRegisters, 248 kRuntimeParameterFpuRegistersLength) {} 249 250 Location GetReturnLocation(Primitive::Type return_type); 251 252 private: 253 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 254}; 255 256Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) { 257 return ARM64ReturnLocation(return_type); 258} 259 260#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> 261#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value() 262 263class SlowPathCodeARM64 : public SlowPathCode { 264 public: 265 SlowPathCodeARM64() : entry_label_(), exit_label_() {} 266 267 vixl::Label* GetEntryLabel() { return &entry_label_; } 268 vixl::Label* GetExitLabel() { return &exit_label_; } 269 270 private: 271 vixl::Label entry_label_; 272 vixl::Label exit_label_; 273 274 DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64); 275}; 276 277class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { 278 public: 279 BoundsCheckSlowPathARM64(HBoundsCheck* instruction, 280 Location index_location, 281 Location length_location) 282 : instruction_(instruction), 283 index_location_(index_location), 284 length_location_(length_location) {} 285 286 287 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 288 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 289 __ Bind(GetEntryLabel()); 290 // We're moving two locations to locations that could overlap, so we need a parallel 291 // move resolver. 292 InvokeRuntimeCallingConvention calling_convention; 293 codegen->EmitParallelMoves( 294 index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), 295 length_location_, LocationFrom(calling_convention.GetRegisterAt(1))); 296 arm64_codegen->InvokeRuntime( 297 QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc()); 298 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); 299 } 300 301 private: 302 HBoundsCheck* const instruction_; 303 const Location index_location_; 304 const Location length_location_; 305 306 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64); 307}; 308 309class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { 310 public: 311 explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {} 312 313 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 314 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 315 __ Bind(GetEntryLabel()); 316 arm64_codegen->InvokeRuntime( 317 QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc()); 318 CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); 319 } 320 321 private: 322 HDivZeroCheck* const instruction_; 323 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64); 324}; 325 326class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { 327 public: 328 LoadClassSlowPathARM64(HLoadClass* cls, 329 HInstruction* at, 330 uint32_t dex_pc, 331 bool do_clinit) 332 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { 333 DCHECK(at->IsLoadClass() || at->IsClinitCheck()); 334 } 335 336 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 337 LocationSummary* locations = at_->GetLocations(); 338 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 339 340 __ Bind(GetEntryLabel()); 341 codegen->SaveLiveRegisters(locations); 342 343 InvokeRuntimeCallingConvention calling_convention; 344 __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex()); 345 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 346 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) 347 : QUICK_ENTRY_POINT(pInitializeType); 348 arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_); 349 if (do_clinit_) { 350 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*>(); 351 } else { 352 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t, mirror::ArtMethod*>(); 353 } 354 355 // Move the class to the desired location. 356 Location out = locations->Out(); 357 if (out.IsValid()) { 358 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); 359 Primitive::Type type = at_->GetType(); 360 arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type); 361 } 362 363 codegen->RestoreLiveRegisters(locations); 364 __ B(GetExitLabel()); 365 } 366 367 private: 368 // The class this slow path will load. 369 HLoadClass* const cls_; 370 371 // The instruction where this slow path is happening. 372 // (Might be the load class or an initialization check). 373 HInstruction* const at_; 374 375 // The dex PC of `at_`. 376 const uint32_t dex_pc_; 377 378 // Whether to initialize the class. 379 const bool do_clinit_; 380 381 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64); 382}; 383 384class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { 385 public: 386 explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {} 387 388 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 389 LocationSummary* locations = instruction_->GetLocations(); 390 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 391 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 392 393 __ Bind(GetEntryLabel()); 394 codegen->SaveLiveRegisters(locations); 395 396 InvokeRuntimeCallingConvention calling_convention; 397 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 398 __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex()); 399 arm64_codegen->InvokeRuntime( 400 QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc()); 401 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t, mirror::ArtMethod*>(); 402 Primitive::Type type = instruction_->GetType(); 403 arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type); 404 405 codegen->RestoreLiveRegisters(locations); 406 __ B(GetExitLabel()); 407 } 408 409 private: 410 HLoadString* const instruction_; 411 412 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64); 413}; 414 415class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { 416 public: 417 explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {} 418 419 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 420 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 421 __ Bind(GetEntryLabel()); 422 arm64_codegen->InvokeRuntime( 423 QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc()); 424 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); 425 } 426 427 private: 428 HNullCheck* const instruction_; 429 430 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64); 431}; 432 433class StackOverflowCheckSlowPathARM64 : public SlowPathCodeARM64 { 434 public: 435 StackOverflowCheckSlowPathARM64() {} 436 437 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 438 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 439 __ Bind(GetEntryLabel()); 440 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowStackOverflow), nullptr, 0); 441 CheckEntrypointTypes<kQuickThrowStackOverflow, void, void>(); 442 } 443 444 private: 445 DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathARM64); 446}; 447 448class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { 449 public: 450 explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction, 451 HBasicBlock* successor) 452 : instruction_(instruction), successor_(successor) {} 453 454 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 455 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 456 __ Bind(GetEntryLabel()); 457 codegen->SaveLiveRegisters(instruction_->GetLocations()); 458 arm64_codegen->InvokeRuntime( 459 QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc()); 460 CheckEntrypointTypes<kQuickTestSuspend, void, void>(); 461 codegen->RestoreLiveRegisters(instruction_->GetLocations()); 462 if (successor_ == nullptr) { 463 __ B(GetReturnLabel()); 464 } else { 465 __ B(arm64_codegen->GetLabelOf(successor_)); 466 } 467 } 468 469 vixl::Label* GetReturnLabel() { 470 DCHECK(successor_ == nullptr); 471 return &return_label_; 472 } 473 474 private: 475 HSuspendCheck* const instruction_; 476 // If not null, the block to branch to after the suspend check. 477 HBasicBlock* const successor_; 478 479 // If `successor_` is null, the label to branch to after the suspend check. 480 vixl::Label return_label_; 481 482 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64); 483}; 484 485class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { 486 public: 487 TypeCheckSlowPathARM64(HInstruction* instruction, 488 Location class_to_check, 489 Location object_class, 490 uint32_t dex_pc) 491 : instruction_(instruction), 492 class_to_check_(class_to_check), 493 object_class_(object_class), 494 dex_pc_(dex_pc) {} 495 496 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 497 LocationSummary* locations = instruction_->GetLocations(); 498 DCHECK(instruction_->IsCheckCast() 499 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 500 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 501 502 __ Bind(GetEntryLabel()); 503 codegen->SaveLiveRegisters(locations); 504 505 // We're moving two locations to locations that could overlap, so we need a parallel 506 // move resolver. 507 InvokeRuntimeCallingConvention calling_convention; 508 codegen->EmitParallelMoves( 509 class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), 510 object_class_, LocationFrom(calling_convention.GetRegisterAt(1))); 511 512 if (instruction_->IsInstanceOf()) { 513 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_); 514 Primitive::Type ret_type = instruction_->GetType(); 515 Location ret_loc = calling_convention.GetReturnLocation(ret_type); 516 arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); 517 CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t, 518 const mirror::Class*, const mirror::Class*>(); 519 } else { 520 DCHECK(instruction_->IsCheckCast()); 521 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_); 522 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); 523 } 524 525 codegen->RestoreLiveRegisters(locations); 526 __ B(GetExitLabel()); 527 } 528 529 private: 530 HInstruction* const instruction_; 531 const Location class_to_check_; 532 const Location object_class_; 533 uint32_t dex_pc_; 534 535 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64); 536}; 537 538#undef __ 539 540Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) { 541 Location next_location; 542 if (type == Primitive::kPrimVoid) { 543 LOG(FATAL) << "Unreachable type " << type; 544 } 545 546 if (IsFPType(type) && (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) { 547 next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++)); 548 } else if (!IsFPType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) { 549 next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++)); 550 } else { 551 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_); 552 next_location = Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) 553 : Location::StackSlot(stack_offset); 554 } 555 556 // Space on the stack is reserved for all arguments. 557 stack_index_ += Is64BitType(type) ? 2 : 1; 558 return next_location; 559} 560 561CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph) 562 : CodeGenerator(graph, 563 kNumberOfAllocatableRegisters, 564 kNumberOfAllocatableFPRegisters, 565 kNumberOfAllocatableRegisterPairs), 566 block_labels_(nullptr), 567 location_builder_(graph, this), 568 instruction_visitor_(graph, this), 569 move_resolver_(graph->GetArena(), this) {} 570 571#undef __ 572#define __ GetVIXLAssembler()-> 573 574void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { 575 // Ensure we emit the literal pool. 576 __ FinalizeCode(); 577 CodeGenerator::Finalize(allocator); 578} 579 580void ParallelMoveResolverARM64::EmitMove(size_t index) { 581 MoveOperands* move = moves_.Get(index); 582 codegen_->MoveLocation(move->GetDestination(), move->GetSource()); 583} 584 585void ParallelMoveResolverARM64::EmitSwap(size_t index) { 586 MoveOperands* move = moves_.Get(index); 587 codegen_->SwapLocations(move->GetDestination(), move->GetSource()); 588} 589 590void ParallelMoveResolverARM64::RestoreScratch(int reg) { 591 __ Pop(Register(VIXLRegCodeFromART(reg), kXRegSize)); 592} 593 594void ParallelMoveResolverARM64::SpillScratch(int reg) { 595 __ Push(Register(VIXLRegCodeFromART(reg), kXRegSize)); 596} 597 598void CodeGeneratorARM64::GenerateFrameEntry() { 599 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod(); 600 if (do_overflow_check) { 601 UseScratchRegisterScope temps(GetVIXLAssembler()); 602 Register temp = temps.AcquireX(); 603 if (kExplicitStackOverflowCheck) { 604 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM64(); 605 AddSlowPath(slow_path); 606 607 __ Ldr(temp, MemOperand(tr, Thread::StackEndOffset<kArm64WordSize>().Int32Value())); 608 __ Cmp(sp, temp); 609 __ B(lo, slow_path->GetEntryLabel()); 610 } else { 611 __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64))); 612 __ Ldr(wzr, MemOperand(temp, 0)); 613 RecordPcInfo(nullptr, 0); 614 } 615 } 616 617 CPURegList preserved_regs = GetFramePreservedRegisters(); 618 int frame_size = GetFrameSize(); 619 core_spill_mask_ |= preserved_regs.list(); 620 621 __ Str(w0, MemOperand(sp, -frame_size, PreIndex)); 622 __ PokeCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes()); 623 624 // Stack layout: 625 // sp[frame_size - 8] : lr. 626 // ... : other preserved registers. 627 // sp[frame_size - regs_size]: first preserved register. 628 // ... : reserved frame space. 629 // sp[0] : current method. 630} 631 632void CodeGeneratorARM64::GenerateFrameExit() { 633 int frame_size = GetFrameSize(); 634 CPURegList preserved_regs = GetFramePreservedRegisters(); 635 __ PeekCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes()); 636 __ Drop(frame_size); 637} 638 639void CodeGeneratorARM64::Bind(HBasicBlock* block) { 640 __ Bind(GetLabelOf(block)); 641} 642 643void CodeGeneratorARM64::Move(HInstruction* instruction, 644 Location location, 645 HInstruction* move_for) { 646 LocationSummary* locations = instruction->GetLocations(); 647 if (locations != nullptr && locations->Out().Equals(location)) { 648 return; 649 } 650 651 Primitive::Type type = instruction->GetType(); 652 DCHECK_NE(type, Primitive::kPrimVoid); 653 654 if (instruction->IsIntConstant() || instruction->IsLongConstant()) { 655 int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue() 656 : instruction->AsLongConstant()->GetValue(); 657 if (location.IsRegister()) { 658 Register dst = RegisterFrom(location, type); 659 DCHECK((instruction->IsIntConstant() && dst.Is32Bits()) || 660 (instruction->IsLongConstant() && dst.Is64Bits())); 661 __ Mov(dst, value); 662 } else { 663 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot()); 664 UseScratchRegisterScope temps(GetVIXLAssembler()); 665 Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX(); 666 __ Mov(temp, value); 667 __ Str(temp, StackOperandFrom(location)); 668 } 669 } else if (instruction->IsTemporary()) { 670 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 671 MoveLocation(location, temp_location, type); 672 } else if (instruction->IsLoadLocal()) { 673 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal()); 674 if (Is64BitType(type)) { 675 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type); 676 } else { 677 MoveLocation(location, Location::StackSlot(stack_slot), type); 678 } 679 680 } else { 681 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary()); 682 MoveLocation(location, locations->Out(), type); 683 } 684} 685 686size_t CodeGeneratorARM64::FrameEntrySpillSize() const { 687 return GetFramePreservedRegistersSize(); 688} 689 690Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const { 691 Primitive::Type type = load->GetType(); 692 693 switch (type) { 694 case Primitive::kPrimNot: 695 case Primitive::kPrimInt: 696 case Primitive::kPrimFloat: 697 return Location::StackSlot(GetStackSlot(load->GetLocal())); 698 699 case Primitive::kPrimLong: 700 case Primitive::kPrimDouble: 701 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 702 703 case Primitive::kPrimBoolean: 704 case Primitive::kPrimByte: 705 case Primitive::kPrimChar: 706 case Primitive::kPrimShort: 707 case Primitive::kPrimVoid: 708 LOG(FATAL) << "Unexpected type " << type; 709 } 710 711 LOG(FATAL) << "Unreachable"; 712 return Location::NoLocation(); 713} 714 715void CodeGeneratorARM64::MarkGCCard(Register object, Register value) { 716 UseScratchRegisterScope temps(GetVIXLAssembler()); 717 Register card = temps.AcquireX(); 718 Register temp = temps.AcquireW(); // Index within the CardTable - 32bit. 719 vixl::Label done; 720 __ Cbz(value, &done); 721 __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value())); 722 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); 723 __ Strb(card, MemOperand(card, temp.X())); 724 __ Bind(&done); 725} 726 727void CodeGeneratorARM64::SetupBlockedRegisters() const { 728 // Block reserved registers: 729 // ip0 (VIXL temporary) 730 // ip1 (VIXL temporary) 731 // tr 732 // lr 733 // sp is not part of the allocatable registers, so we don't need to block it. 734 // TODO: Avoid blocking callee-saved registers, and instead preserve them 735 // where necessary. 736 CPURegList reserved_core_registers = vixl_reserved_core_registers; 737 reserved_core_registers.Combine(runtime_reserved_core_registers); 738 reserved_core_registers.Combine(quick_callee_saved_registers); 739 while (!reserved_core_registers.IsEmpty()) { 740 blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true; 741 } 742 CPURegList reserved_fp_registers = vixl_reserved_fp_registers; 743 reserved_fp_registers.Combine(CPURegList::GetCalleeSavedFP()); 744 while (!reserved_core_registers.IsEmpty()) { 745 blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true; 746 } 747} 748 749Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const { 750 if (type == Primitive::kPrimVoid) { 751 LOG(FATAL) << "Unreachable type " << type; 752 } 753 754 if (IsFPType(type)) { 755 ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters); 756 DCHECK_NE(reg, -1); 757 return Location::FpuRegisterLocation(reg); 758 } else { 759 ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters); 760 DCHECK_NE(reg, -1); 761 return Location::RegisterLocation(reg); 762 } 763} 764 765size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { 766 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 767 __ Str(reg, MemOperand(sp, stack_index)); 768 return kArm64WordSize; 769} 770 771size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { 772 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 773 __ Ldr(reg, MemOperand(sp, stack_index)); 774 return kArm64WordSize; 775} 776 777size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 778 FPRegister reg = FPRegister(reg_id, kDRegSize); 779 __ Str(reg, MemOperand(sp, stack_index)); 780 return kArm64WordSize; 781} 782 783size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 784 FPRegister reg = FPRegister(reg_id, kDRegSize); 785 __ Ldr(reg, MemOperand(sp, stack_index)); 786 return kArm64WordSize; 787} 788 789void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const { 790 stream << Arm64ManagedRegister::FromXRegister(XRegister(reg)); 791} 792 793void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const { 794 stream << Arm64ManagedRegister::FromDRegister(DRegister(reg)); 795} 796 797void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) { 798 if (constant->IsIntConstant() || constant->IsLongConstant()) { 799 __ Mov(Register(destination), 800 constant->IsIntConstant() ? constant->AsIntConstant()->GetValue() 801 : constant->AsLongConstant()->GetValue()); 802 } else if (constant->IsFloatConstant()) { 803 __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue()); 804 } else { 805 DCHECK(constant->IsDoubleConstant()); 806 __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue()); 807 } 808} 809 810 811static bool CoherentConstantAndType(Location constant, Primitive::Type type) { 812 DCHECK(constant.IsConstant()); 813 HConstant* cst = constant.GetConstant(); 814 return (cst->IsIntConstant() && type == Primitive::kPrimInt) || 815 (cst->IsLongConstant() && type == Primitive::kPrimLong) || 816 (cst->IsFloatConstant() && type == Primitive::kPrimFloat) || 817 (cst->IsDoubleConstant() && type == Primitive::kPrimDouble); 818} 819 820void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) { 821 if (source.Equals(destination)) { 822 return; 823 } 824 825 // A valid move can always be inferred from the destination and source 826 // locations. When moving from and to a register, the argument type can be 827 // used to generate 32bit instead of 64bit moves. In debug mode we also 828 // checks the coherency of the locations and the type. 829 bool unspecified_type = (type == Primitive::kPrimVoid); 830 831 if (destination.IsRegister() || destination.IsFpuRegister()) { 832 if (unspecified_type) { 833 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr; 834 if (source.IsStackSlot() || 835 (src_cst != nullptr && (src_cst->IsIntConstant() || src_cst->IsFloatConstant()))) { 836 // For stack slots and 32bit constants, a 64bit type is appropriate. 837 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; 838 } else { 839 // If the source is a double stack slot or a 64bit constant, a 64bit 840 // type is appropriate. Else the source is a register, and since the 841 // type has not been specified, we chose a 64bit type to force a 64bit 842 // move. 843 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; 844 } 845 } 846 DCHECK((destination.IsFpuRegister() && IsFPType(type)) || 847 (destination.IsRegister() && !IsFPType(type))); 848 CPURegister dst = CPURegisterFrom(destination, type); 849 if (source.IsStackSlot() || source.IsDoubleStackSlot()) { 850 DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot()); 851 __ Ldr(dst, StackOperandFrom(source)); 852 } else if (source.IsConstant()) { 853 DCHECK(CoherentConstantAndType(source, type)); 854 MoveConstant(dst, source.GetConstant()); 855 } else { 856 if (destination.IsRegister()) { 857 __ Mov(Register(dst), RegisterFrom(source, type)); 858 } else { 859 __ Fmov(FPRegister(dst), FPRegisterFrom(source, type)); 860 } 861 } 862 863 } else { // The destination is not a register. It must be a stack slot. 864 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot()); 865 if (source.IsRegister() || source.IsFpuRegister()) { 866 if (unspecified_type) { 867 if (source.IsRegister()) { 868 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; 869 } else { 870 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; 871 } 872 } 873 DCHECK((destination.IsDoubleStackSlot() == Is64BitType(type)) && 874 (source.IsFpuRegister() == IsFPType(type))); 875 __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination)); 876 } else if (source.IsConstant()) { 877 DCHECK(unspecified_type || CoherentConstantAndType(source, type)); 878 UseScratchRegisterScope temps(GetVIXLAssembler()); 879 HConstant* src_cst = source.GetConstant(); 880 CPURegister temp; 881 if (src_cst->IsIntConstant()) { 882 temp = temps.AcquireW(); 883 } else if (src_cst->IsLongConstant()) { 884 temp = temps.AcquireX(); 885 } else if (src_cst->IsFloatConstant()) { 886 temp = temps.AcquireS(); 887 } else { 888 DCHECK(src_cst->IsDoubleConstant()); 889 temp = temps.AcquireD(); 890 } 891 MoveConstant(temp, src_cst); 892 __ Str(temp, StackOperandFrom(destination)); 893 } else { 894 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot()); 895 DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot()); 896 UseScratchRegisterScope temps(GetVIXLAssembler()); 897 // There is generally less pressure on FP registers. 898 FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS(); 899 __ Ldr(temp, StackOperandFrom(source)); 900 __ Str(temp, StackOperandFrom(destination)); 901 } 902 } 903} 904 905void CodeGeneratorARM64::SwapLocations(Location loc1, Location loc2) { 906 DCHECK(!loc1.IsConstant()); 907 DCHECK(!loc2.IsConstant()); 908 909 if (loc1.Equals(loc2)) { 910 return; 911 } 912 913 UseScratchRegisterScope temps(GetAssembler()->vixl_masm_); 914 915 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot(); 916 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot(); 917 bool is_fp_reg1 = loc1.IsFpuRegister(); 918 bool is_fp_reg2 = loc2.IsFpuRegister(); 919 920 if (loc2.IsRegister() && loc1.IsRegister()) { 921 Register r1 = XRegisterFrom(loc1); 922 Register r2 = XRegisterFrom(loc2); 923 Register tmp = temps.AcquireSameSizeAs(r1); 924 __ Mov(tmp, r2); 925 __ Mov(r2, r1); 926 __ Mov(r1, tmp); 927 } else if (is_fp_reg2 && is_fp_reg1) { 928 FPRegister r1 = DRegisterFrom(loc1); 929 FPRegister r2 = DRegisterFrom(loc2); 930 FPRegister tmp = temps.AcquireSameSizeAs(r1); 931 __ Fmov(tmp, r2); 932 __ Fmov(r2, r1); 933 __ Fmov(r1, tmp); 934 } else if (is_slot1 != is_slot2) { 935 MemOperand mem = StackOperandFrom(is_slot1 ? loc1 : loc2); 936 Location reg_loc = is_slot1 ? loc2 : loc1; 937 CPURegister reg, tmp; 938 if (reg_loc.IsFpuRegister()) { 939 reg = DRegisterFrom(reg_loc); 940 tmp = temps.AcquireD(); 941 } else { 942 reg = XRegisterFrom(reg_loc); 943 tmp = temps.AcquireX(); 944 } 945 __ Ldr(tmp, mem); 946 __ Str(reg, mem); 947 if (reg_loc.IsFpuRegister()) { 948 __ Fmov(FPRegister(reg), FPRegister(tmp)); 949 } else { 950 __ Mov(Register(reg), Register(tmp)); 951 } 952 } else if (is_slot1 && is_slot2) { 953 MemOperand mem1 = StackOperandFrom(loc1); 954 MemOperand mem2 = StackOperandFrom(loc2); 955 Register tmp1 = loc1.IsStackSlot() ? temps.AcquireW() : temps.AcquireX(); 956 Register tmp2 = temps.AcquireSameSizeAs(tmp1); 957 __ Ldr(tmp1, mem1); 958 __ Ldr(tmp2, mem2); 959 __ Str(tmp1, mem2); 960 __ Str(tmp2, mem1); 961 } else { 962 LOG(FATAL) << "Unimplemented"; 963 } 964} 965 966void CodeGeneratorARM64::Load(Primitive::Type type, 967 vixl::CPURegister dst, 968 const vixl::MemOperand& src) { 969 switch (type) { 970 case Primitive::kPrimBoolean: 971 __ Ldrb(Register(dst), src); 972 break; 973 case Primitive::kPrimByte: 974 __ Ldrsb(Register(dst), src); 975 break; 976 case Primitive::kPrimShort: 977 __ Ldrsh(Register(dst), src); 978 break; 979 case Primitive::kPrimChar: 980 __ Ldrh(Register(dst), src); 981 break; 982 case Primitive::kPrimInt: 983 case Primitive::kPrimNot: 984 case Primitive::kPrimLong: 985 case Primitive::kPrimFloat: 986 case Primitive::kPrimDouble: 987 DCHECK(dst.Is64Bits() == Is64BitType(type)); 988 __ Ldr(dst, src); 989 break; 990 case Primitive::kPrimVoid: 991 LOG(FATAL) << "Unreachable type " << type; 992 } 993} 994 995void CodeGeneratorARM64::Store(Primitive::Type type, 996 vixl::CPURegister rt, 997 const vixl::MemOperand& dst) { 998 switch (type) { 999 case Primitive::kPrimBoolean: 1000 case Primitive::kPrimByte: 1001 __ Strb(Register(rt), dst); 1002 break; 1003 case Primitive::kPrimChar: 1004 case Primitive::kPrimShort: 1005 __ Strh(Register(rt), dst); 1006 break; 1007 case Primitive::kPrimInt: 1008 case Primitive::kPrimNot: 1009 case Primitive::kPrimLong: 1010 case Primitive::kPrimFloat: 1011 case Primitive::kPrimDouble: 1012 DCHECK(rt.Is64Bits() == Is64BitType(type)); 1013 __ Str(rt, dst); 1014 break; 1015 case Primitive::kPrimVoid: 1016 LOG(FATAL) << "Unreachable type " << type; 1017 } 1018} 1019 1020void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) { 1021 DCHECK(current_method.IsW()); 1022 __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset)); 1023} 1024 1025void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset, 1026 HInstruction* instruction, 1027 uint32_t dex_pc) { 1028 __ Ldr(lr, MemOperand(tr, entry_point_offset)); 1029 __ Blr(lr); 1030 if (instruction != nullptr) { 1031 RecordPcInfo(instruction, dex_pc); 1032 DCHECK(instruction->IsSuspendCheck() 1033 || instruction->IsBoundsCheck() 1034 || instruction->IsNullCheck() 1035 || instruction->IsDivZeroCheck() 1036 || !IsLeafMethod()); 1037 } 1038} 1039 1040void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, 1041 vixl::Register class_reg) { 1042 UseScratchRegisterScope temps(GetVIXLAssembler()); 1043 Register temp = temps.AcquireW(); 1044 __ Ldr(temp, HeapOperand(class_reg, mirror::Class::StatusOffset())); 1045 __ Cmp(temp, mirror::Class::kStatusInitialized); 1046 __ B(lt, slow_path->GetEntryLabel()); 1047 // Even if the initialized flag is set, we need to ensure consistent memory ordering. 1048 __ Dmb(InnerShareable, BarrierReads); 1049 __ Bind(slow_path->GetExitLabel()); 1050} 1051 1052void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction, 1053 HBasicBlock* successor) { 1054 SuspendCheckSlowPathARM64* slow_path = 1055 new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor); 1056 codegen_->AddSlowPath(slow_path); 1057 UseScratchRegisterScope temps(codegen_->GetVIXLAssembler()); 1058 Register temp = temps.AcquireW(); 1059 1060 __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue())); 1061 if (successor == nullptr) { 1062 __ Cbnz(temp, slow_path->GetEntryLabel()); 1063 __ Bind(slow_path->GetReturnLabel()); 1064 } else { 1065 __ Cbz(temp, codegen_->GetLabelOf(successor)); 1066 __ B(slow_path->GetEntryLabel()); 1067 // slow_path will return to GetLabelOf(successor). 1068 } 1069} 1070 1071InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph, 1072 CodeGeneratorARM64* codegen) 1073 : HGraphVisitor(graph), 1074 assembler_(codegen->GetAssembler()), 1075 codegen_(codegen) {} 1076 1077#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \ 1078 /* No unimplemented IR. */ 1079 1080#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode 1081 1082enum UnimplementedInstructionBreakCode { 1083 // Using a base helps identify when we hit such breakpoints. 1084 UnimplementedInstructionBreakCodeBaseCode = 0x900, 1085#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name), 1086 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION) 1087#undef ENUM_UNIMPLEMENTED_INSTRUCTION 1088}; 1089 1090#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \ 1091 void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \ 1092 UNUSED(instr); \ 1093 __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \ 1094 } \ 1095 void LocationsBuilderARM64::Visit##name(H##name* instr) { \ 1096 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \ 1097 locations->SetOut(Location::Any()); \ 1098 } 1099 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS) 1100#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS 1101 1102#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE 1103#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION 1104 1105void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) { 1106 DCHECK_EQ(instr->InputCount(), 2U); 1107 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1108 Primitive::Type type = instr->GetResultType(); 1109 switch (type) { 1110 case Primitive::kPrimInt: 1111 case Primitive::kPrimLong: 1112 locations->SetInAt(0, Location::RequiresRegister()); 1113 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); 1114 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1115 break; 1116 1117 case Primitive::kPrimFloat: 1118 case Primitive::kPrimDouble: 1119 locations->SetInAt(0, Location::RequiresFpuRegister()); 1120 locations->SetInAt(1, Location::RequiresFpuRegister()); 1121 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1122 break; 1123 1124 default: 1125 LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type; 1126 } 1127} 1128 1129void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { 1130 Primitive::Type type = instr->GetType(); 1131 1132 switch (type) { 1133 case Primitive::kPrimInt: 1134 case Primitive::kPrimLong: { 1135 Register dst = OutputRegister(instr); 1136 Register lhs = InputRegisterAt(instr, 0); 1137 Operand rhs = InputOperandAt(instr, 1); 1138 if (instr->IsAdd()) { 1139 __ Add(dst, lhs, rhs); 1140 } else if (instr->IsAnd()) { 1141 __ And(dst, lhs, rhs); 1142 } else if (instr->IsOr()) { 1143 __ Orr(dst, lhs, rhs); 1144 } else if (instr->IsSub()) { 1145 __ Sub(dst, lhs, rhs); 1146 } else { 1147 DCHECK(instr->IsXor()); 1148 __ Eor(dst, lhs, rhs); 1149 } 1150 break; 1151 } 1152 case Primitive::kPrimFloat: 1153 case Primitive::kPrimDouble: { 1154 FPRegister dst = OutputFPRegister(instr); 1155 FPRegister lhs = InputFPRegisterAt(instr, 0); 1156 FPRegister rhs = InputFPRegisterAt(instr, 1); 1157 if (instr->IsAdd()) { 1158 __ Fadd(dst, lhs, rhs); 1159 } else if (instr->IsSub()) { 1160 __ Fsub(dst, lhs, rhs); 1161 } else { 1162 LOG(FATAL) << "Unexpected floating-point binary operation"; 1163 } 1164 break; 1165 } 1166 default: 1167 LOG(FATAL) << "Unexpected binary operation type " << type; 1168 } 1169} 1170 1171void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { 1172 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1173 1174 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1175 Primitive::Type type = instr->GetResultType(); 1176 switch (type) { 1177 case Primitive::kPrimInt: 1178 case Primitive::kPrimLong: { 1179 locations->SetInAt(0, Location::RequiresRegister()); 1180 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); 1181 locations->SetOut(Location::RequiresRegister()); 1182 break; 1183 } 1184 default: 1185 LOG(FATAL) << "Unexpected shift type " << type; 1186 } 1187} 1188 1189void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) { 1190 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1191 1192 Primitive::Type type = instr->GetType(); 1193 switch (type) { 1194 case Primitive::kPrimInt: 1195 case Primitive::kPrimLong: { 1196 Register dst = OutputRegister(instr); 1197 Register lhs = InputRegisterAt(instr, 0); 1198 Operand rhs = InputOperandAt(instr, 1); 1199 if (rhs.IsImmediate()) { 1200 uint32_t shift_value = (type == Primitive::kPrimInt) 1201 ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue) 1202 : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue); 1203 if (instr->IsShl()) { 1204 __ Lsl(dst, lhs, shift_value); 1205 } else if (instr->IsShr()) { 1206 __ Asr(dst, lhs, shift_value); 1207 } else { 1208 __ Lsr(dst, lhs, shift_value); 1209 } 1210 } else { 1211 Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W(); 1212 1213 if (instr->IsShl()) { 1214 __ Lsl(dst, lhs, rhs_reg); 1215 } else if (instr->IsShr()) { 1216 __ Asr(dst, lhs, rhs_reg); 1217 } else { 1218 __ Lsr(dst, lhs, rhs_reg); 1219 } 1220 } 1221 break; 1222 } 1223 default: 1224 LOG(FATAL) << "Unexpected shift operation type " << type; 1225 } 1226} 1227 1228void LocationsBuilderARM64::VisitAdd(HAdd* instruction) { 1229 HandleBinaryOp(instruction); 1230} 1231 1232void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) { 1233 HandleBinaryOp(instruction); 1234} 1235 1236void LocationsBuilderARM64::VisitAnd(HAnd* instruction) { 1237 HandleBinaryOp(instruction); 1238} 1239 1240void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { 1241 HandleBinaryOp(instruction); 1242} 1243 1244void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { 1245 LocationSummary* locations = 1246 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1247 locations->SetInAt(0, Location::RequiresRegister()); 1248 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1249 locations->SetOut(Location::RequiresRegister()); 1250} 1251 1252void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { 1253 LocationSummary* locations = instruction->GetLocations(); 1254 Primitive::Type type = instruction->GetType(); 1255 Register obj = InputRegisterAt(instruction, 0); 1256 Location index = locations->InAt(1); 1257 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value(); 1258 MemOperand source = HeapOperand(obj); 1259 UseScratchRegisterScope temps(GetVIXLAssembler()); 1260 1261 if (index.IsConstant()) { 1262 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type); 1263 source = HeapOperand(obj, offset); 1264 } else { 1265 Register temp = temps.AcquireSameSizeAs(obj); 1266 Register index_reg = RegisterFrom(index, Primitive::kPrimInt); 1267 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type))); 1268 source = HeapOperand(temp, offset); 1269 } 1270 1271 codegen_->Load(type, OutputCPURegister(instruction), source); 1272} 1273 1274void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) { 1275 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1276 locations->SetInAt(0, Location::RequiresRegister()); 1277 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1278} 1279 1280void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) { 1281 __ Ldr(OutputRegister(instruction), 1282 HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset())); 1283} 1284 1285void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { 1286 Primitive::Type value_type = instruction->GetComponentType(); 1287 bool is_object = value_type == Primitive::kPrimNot; 1288 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1289 instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall); 1290 if (is_object) { 1291 InvokeRuntimeCallingConvention calling_convention; 1292 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 1293 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); 1294 locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); 1295 } else { 1296 locations->SetInAt(0, Location::RequiresRegister()); 1297 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1298 locations->SetInAt(2, Location::RequiresRegister()); 1299 } 1300} 1301 1302void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { 1303 Primitive::Type value_type = instruction->GetComponentType(); 1304 if (value_type == Primitive::kPrimNot) { 1305 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc()); 1306 CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>(); 1307 } else { 1308 LocationSummary* locations = instruction->GetLocations(); 1309 Register obj = InputRegisterAt(instruction, 0); 1310 CPURegister value = InputCPURegisterAt(instruction, 2); 1311 Location index = locations->InAt(1); 1312 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value(); 1313 MemOperand destination = HeapOperand(obj); 1314 UseScratchRegisterScope temps(GetVIXLAssembler()); 1315 1316 if (index.IsConstant()) { 1317 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type); 1318 destination = HeapOperand(obj, offset); 1319 } else { 1320 Register temp = temps.AcquireSameSizeAs(obj); 1321 Register index_reg = InputRegisterAt(instruction, 1); 1322 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type))); 1323 destination = HeapOperand(temp, offset); 1324 } 1325 1326 codegen_->Store(value_type, value, destination); 1327 } 1328} 1329 1330void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1331 LocationSummary* locations = 1332 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1333 locations->SetInAt(0, Location::RequiresRegister()); 1334 locations->SetInAt(1, Location::RequiresRegister()); 1335 if (instruction->HasUses()) { 1336 locations->SetOut(Location::SameAsFirstInput()); 1337 } 1338} 1339 1340void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1341 LocationSummary* locations = instruction->GetLocations(); 1342 BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64( 1343 instruction, locations->InAt(0), locations->InAt(1)); 1344 codegen_->AddSlowPath(slow_path); 1345 1346 __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1)); 1347 __ B(slow_path->GetEntryLabel(), hs); 1348} 1349 1350void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { 1351 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1352 instruction, LocationSummary::kCallOnSlowPath); 1353 locations->SetInAt(0, Location::RequiresRegister()); 1354 locations->SetInAt(1, Location::RequiresRegister()); 1355 locations->AddTemp(Location::RequiresRegister()); 1356} 1357 1358void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { 1359 LocationSummary* locations = instruction->GetLocations(); 1360 Register obj = InputRegisterAt(instruction, 0);; 1361 Register cls = InputRegisterAt(instruction, 1);; 1362 Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0)); 1363 1364 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1365 instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc()); 1366 codegen_->AddSlowPath(slow_path); 1367 1368 // TODO: avoid this check if we know obj is not null. 1369 __ Cbz(obj, slow_path->GetExitLabel()); 1370 // Compare the class of `obj` with `cls`. 1371 __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset())); 1372 __ Cmp(obj_cls, cls); 1373 __ B(ne, slow_path->GetEntryLabel()); 1374 __ Bind(slow_path->GetExitLabel()); 1375} 1376 1377void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { 1378 LocationSummary* locations = 1379 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); 1380 locations->SetInAt(0, Location::RequiresRegister()); 1381 if (check->HasUses()) { 1382 locations->SetOut(Location::SameAsFirstInput()); 1383 } 1384} 1385 1386void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) { 1387 // We assume the class is not null. 1388 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 1389 check->GetLoadClass(), check, check->GetDexPc(), true); 1390 codegen_->AddSlowPath(slow_path); 1391 GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); 1392} 1393 1394void LocationsBuilderARM64::VisitCompare(HCompare* compare) { 1395 LocationSummary* locations = 1396 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); 1397 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1398 switch (in_type) { 1399 case Primitive::kPrimLong: { 1400 locations->SetInAt(0, Location::RequiresRegister()); 1401 locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1))); 1402 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1403 break; 1404 } 1405 case Primitive::kPrimFloat: 1406 case Primitive::kPrimDouble: { 1407 locations->SetInAt(0, Location::RequiresFpuRegister()); 1408 locations->SetInAt(1, Location::RequiresFpuRegister()); 1409 locations->SetOut(Location::RequiresRegister()); 1410 break; 1411 } 1412 default: 1413 LOG(FATAL) << "Unexpected type for compare operation " << in_type; 1414 } 1415} 1416 1417void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { 1418 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1419 1420 // 0 if: left == right 1421 // 1 if: left > right 1422 // -1 if: left < right 1423 switch (in_type) { 1424 case Primitive::kPrimLong: { 1425 Register result = OutputRegister(compare); 1426 Register left = InputRegisterAt(compare, 0); 1427 Operand right = InputOperandAt(compare, 1); 1428 1429 __ Cmp(left, right); 1430 __ Cset(result, ne); 1431 __ Cneg(result, result, lt); 1432 break; 1433 } 1434 case Primitive::kPrimFloat: 1435 case Primitive::kPrimDouble: { 1436 Register result = OutputRegister(compare); 1437 FPRegister left = InputFPRegisterAt(compare, 0); 1438 FPRegister right = InputFPRegisterAt(compare, 1); 1439 1440 __ Fcmp(left, right); 1441 if (compare->IsGtBias()) { 1442 __ Cset(result, ne); 1443 } else { 1444 __ Csetm(result, ne); 1445 } 1446 __ Cneg(result, result, compare->IsGtBias() ? mi : gt); 1447 break; 1448 } 1449 default: 1450 LOG(FATAL) << "Unimplemented compare type " << in_type; 1451 } 1452} 1453 1454void LocationsBuilderARM64::VisitCondition(HCondition* instruction) { 1455 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1456 locations->SetInAt(0, Location::RequiresRegister()); 1457 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1458 if (instruction->NeedsMaterialization()) { 1459 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1460 } 1461} 1462 1463void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) { 1464 if (!instruction->NeedsMaterialization()) { 1465 return; 1466 } 1467 1468 LocationSummary* locations = instruction->GetLocations(); 1469 Register lhs = InputRegisterAt(instruction, 0); 1470 Operand rhs = InputOperandAt(instruction, 1); 1471 Register res = RegisterFrom(locations->Out(), instruction->GetType()); 1472 Condition cond = ARM64Condition(instruction->GetCondition()); 1473 1474 __ Cmp(lhs, rhs); 1475 __ Cset(res, cond); 1476} 1477 1478#define FOR_EACH_CONDITION_INSTRUCTION(M) \ 1479 M(Equal) \ 1480 M(NotEqual) \ 1481 M(LessThan) \ 1482 M(LessThanOrEqual) \ 1483 M(GreaterThan) \ 1484 M(GreaterThanOrEqual) 1485#define DEFINE_CONDITION_VISITORS(Name) \ 1486void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \ 1487void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } 1488FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS) 1489#undef DEFINE_CONDITION_VISITORS 1490#undef FOR_EACH_CONDITION_INSTRUCTION 1491 1492void LocationsBuilderARM64::VisitDiv(HDiv* div) { 1493 LocationSummary* locations = 1494 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); 1495 switch (div->GetResultType()) { 1496 case Primitive::kPrimInt: 1497 case Primitive::kPrimLong: 1498 locations->SetInAt(0, Location::RequiresRegister()); 1499 locations->SetInAt(1, Location::RequiresRegister()); 1500 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1501 break; 1502 1503 case Primitive::kPrimFloat: 1504 case Primitive::kPrimDouble: 1505 locations->SetInAt(0, Location::RequiresFpuRegister()); 1506 locations->SetInAt(1, Location::RequiresFpuRegister()); 1507 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1508 break; 1509 1510 default: 1511 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 1512 } 1513} 1514 1515void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) { 1516 Primitive::Type type = div->GetResultType(); 1517 switch (type) { 1518 case Primitive::kPrimInt: 1519 case Primitive::kPrimLong: 1520 __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1)); 1521 break; 1522 1523 case Primitive::kPrimFloat: 1524 case Primitive::kPrimDouble: 1525 __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1)); 1526 break; 1527 1528 default: 1529 LOG(FATAL) << "Unexpected div type " << type; 1530 } 1531} 1532 1533void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1534 LocationSummary* locations = 1535 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1536 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); 1537 if (instruction->HasUses()) { 1538 locations->SetOut(Location::SameAsFirstInput()); 1539 } 1540} 1541 1542void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1543 SlowPathCodeARM64* slow_path = 1544 new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction); 1545 codegen_->AddSlowPath(slow_path); 1546 Location value = instruction->GetLocations()->InAt(0); 1547 1548 Primitive::Type type = instruction->GetType(); 1549 1550 if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) { 1551 LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck."; 1552 return; 1553 } 1554 1555 if (value.IsConstant()) { 1556 int64_t divisor = Int64ConstantFrom(value); 1557 if (divisor == 0) { 1558 __ B(slow_path->GetEntryLabel()); 1559 } else { 1560 // A division by a non-null constant is valid. We don't need to perform 1561 // any check, so simply fall through. 1562 } 1563 } else { 1564 __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); 1565 } 1566} 1567 1568void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1569 LocationSummary* locations = 1570 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1571 locations->SetOut(Location::ConstantLocation(constant)); 1572} 1573 1574void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1575 UNUSED(constant); 1576 // Will be generated at use site. 1577} 1578 1579void LocationsBuilderARM64::VisitExit(HExit* exit) { 1580 exit->SetLocations(nullptr); 1581} 1582 1583void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) { 1584 UNUSED(exit); 1585 if (kIsDebugBuild) { 1586 down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable"); 1587 __ Brk(__LINE__); // TODO: Introduce special markers for such code locations. 1588 } 1589} 1590 1591void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) { 1592 LocationSummary* locations = 1593 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1594 locations->SetOut(Location::ConstantLocation(constant)); 1595} 1596 1597void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) { 1598 UNUSED(constant); 1599 // Will be generated at use site. 1600} 1601 1602void LocationsBuilderARM64::VisitGoto(HGoto* got) { 1603 got->SetLocations(nullptr); 1604} 1605 1606void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) { 1607 HBasicBlock* successor = got->GetSuccessor(); 1608 DCHECK(!successor->IsExitBlock()); 1609 HBasicBlock* block = got->GetBlock(); 1610 HInstruction* previous = got->GetPrevious(); 1611 HLoopInformation* info = block->GetLoopInformation(); 1612 1613 if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) { 1614 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck()); 1615 GenerateSuspendCheck(info->GetSuspendCheck(), successor); 1616 return; 1617 } 1618 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { 1619 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); 1620 } 1621 if (!codegen_->GoesToNextBlock(block, successor)) { 1622 __ B(codegen_->GetLabelOf(successor)); 1623 } 1624} 1625 1626void LocationsBuilderARM64::VisitIf(HIf* if_instr) { 1627 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); 1628 HInstruction* cond = if_instr->InputAt(0); 1629 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 1630 locations->SetInAt(0, Location::RequiresRegister()); 1631 } 1632} 1633 1634void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { 1635 HInstruction* cond = if_instr->InputAt(0); 1636 HCondition* condition = cond->AsCondition(); 1637 vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor()); 1638 vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); 1639 1640 if (cond->IsIntConstant()) { 1641 int32_t cond_value = cond->AsIntConstant()->GetValue(); 1642 if (cond_value == 1) { 1643 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) { 1644 __ B(true_target); 1645 } 1646 return; 1647 } else { 1648 DCHECK_EQ(cond_value, 0); 1649 } 1650 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) { 1651 // The condition instruction has been materialized, compare the output to 0. 1652 Location cond_val = if_instr->GetLocations()->InAt(0); 1653 DCHECK(cond_val.IsRegister()); 1654 __ Cbnz(InputRegisterAt(if_instr, 0), true_target); 1655 } else { 1656 // The condition instruction has not been materialized, use its inputs as 1657 // the comparison and its condition as the branch condition. 1658 Register lhs = InputRegisterAt(condition, 0); 1659 Operand rhs = InputOperandAt(condition, 1); 1660 Condition arm64_cond = ARM64Condition(condition->GetCondition()); 1661 if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) { 1662 if (arm64_cond == eq) { 1663 __ Cbz(lhs, true_target); 1664 } else { 1665 __ Cbnz(lhs, true_target); 1666 } 1667 } else { 1668 __ Cmp(lhs, rhs); 1669 __ B(arm64_cond, true_target); 1670 } 1671 } 1672 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) { 1673 __ B(false_target); 1674 } 1675} 1676 1677void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1678 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1679 locations->SetInAt(0, Location::RequiresRegister()); 1680 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1681} 1682 1683void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1684 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 1685 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 1686} 1687 1688void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1689 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1690 locations->SetInAt(0, Location::RequiresRegister()); 1691 locations->SetInAt(1, Location::RequiresRegister()); 1692} 1693 1694void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1695 Primitive::Type field_type = instruction->GetFieldType(); 1696 CPURegister value = InputCPURegisterAt(instruction, 1); 1697 Register obj = InputRegisterAt(instruction, 0); 1698 codegen_->Store(field_type, value, HeapOperand(obj, instruction->GetFieldOffset())); 1699 if (field_type == Primitive::kPrimNot) { 1700 codegen_->MarkGCCard(obj, Register(value)); 1701 } 1702} 1703 1704void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { 1705 LocationSummary::CallKind call_kind = 1706 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; 1707 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); 1708 locations->SetInAt(0, Location::RequiresRegister()); 1709 locations->SetInAt(1, Location::RequiresRegister()); 1710 locations->SetOut(Location::RequiresRegister(), true); // The output does overlap inputs. 1711} 1712 1713void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { 1714 LocationSummary* locations = instruction->GetLocations(); 1715 Register obj = InputRegisterAt(instruction, 0);; 1716 Register cls = InputRegisterAt(instruction, 1);; 1717 Register out = OutputRegister(instruction); 1718 1719 vixl::Label done; 1720 1721 // Return 0 if `obj` is null. 1722 // TODO: Avoid this check if we know `obj` is not null. 1723 __ Mov(out, 0); 1724 __ Cbz(obj, &done); 1725 1726 // Compare the class of `obj` with `cls`. 1727 __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset())); 1728 __ Cmp(out, cls); 1729 if (instruction->IsClassFinal()) { 1730 // Classes must be equal for the instanceof to succeed. 1731 __ Cset(out, eq); 1732 } else { 1733 // If the classes are not equal, we go into a slow path. 1734 DCHECK(locations->OnlyCallsOnSlowPath()); 1735 SlowPathCodeARM64* slow_path = 1736 new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1737 instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); 1738 codegen_->AddSlowPath(slow_path); 1739 __ B(ne, slow_path->GetEntryLabel()); 1740 __ Mov(out, 1); 1741 __ Bind(slow_path->GetExitLabel()); 1742 } 1743 1744 __ Bind(&done); 1745} 1746 1747void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) { 1748 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 1749 locations->SetOut(Location::ConstantLocation(constant)); 1750} 1751 1752void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) { 1753 // Will be generated at use site. 1754 UNUSED(constant); 1755} 1756 1757void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) { 1758 LocationSummary* locations = 1759 new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); 1760 locations->AddTemp(LocationFrom(x0)); 1761 1762 InvokeDexCallingConventionVisitor calling_convention_visitor; 1763 for (size_t i = 0; i < invoke->InputCount(); i++) { 1764 HInstruction* input = invoke->InputAt(i); 1765 locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType())); 1766 } 1767 1768 Primitive::Type return_type = invoke->GetType(); 1769 if (return_type != Primitive::kPrimVoid) { 1770 locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type)); 1771 } 1772} 1773 1774void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1775 HandleInvoke(invoke); 1776} 1777 1778void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1779 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. 1780 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1781 uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + 1782 (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); 1783 Location receiver = invoke->GetLocations()->InAt(0); 1784 Offset class_offset = mirror::Object::ClassOffset(); 1785 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 1786 1787 // The register ip1 is required to be used for the hidden argument in 1788 // art_quick_imt_conflict_trampoline, so prevent VIXL from using it. 1789 UseScratchRegisterScope scratch_scope(GetVIXLAssembler()); 1790 scratch_scope.Exclude(ip1); 1791 __ Mov(ip1, invoke->GetDexMethodIndex()); 1792 1793 // temp = object->GetClass(); 1794 if (receiver.IsStackSlot()) { 1795 __ Ldr(temp, StackOperandFrom(receiver)); 1796 __ Ldr(temp, HeapOperand(temp, class_offset)); 1797 } else { 1798 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 1799 } 1800 // temp = temp->GetImtEntryAt(method_offset); 1801 __ Ldr(temp, HeapOperand(temp, method_offset)); 1802 // lr = temp->GetEntryPoint(); 1803 __ Ldr(lr, HeapOperand(temp, entry_point)); 1804 // lr(); 1805 __ Blr(lr); 1806 DCHECK(!codegen_->IsLeafMethod()); 1807 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1808} 1809 1810void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1811 HandleInvoke(invoke); 1812} 1813 1814void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1815 HandleInvoke(invoke); 1816} 1817 1818void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1819 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1820 // Make sure that ArtMethod* is passed in W0 as per the calling convention 1821 DCHECK(temp.Is(w0)); 1822 size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() + 1823 invoke->GetIndexInDexCache() * kHeapRefSize; 1824 1825 // TODO: Implement all kinds of calls: 1826 // 1) boot -> boot 1827 // 2) app -> boot 1828 // 3) app -> app 1829 // 1830 // Currently we implement the app -> app logic, which looks up in the resolve cache. 1831 1832 // temp = method; 1833 codegen_->LoadCurrentMethod(temp); 1834 // temp = temp->dex_cache_resolved_methods_; 1835 __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset())); 1836 // temp = temp[index_in_cache]; 1837 __ Ldr(temp, HeapOperand(temp, index_in_cache)); 1838 // lr = temp->entry_point_from_quick_compiled_code_; 1839 __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 1840 kArm64WordSize))); 1841 // lr(); 1842 __ Blr(lr); 1843 1844 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1845 DCHECK(!codegen_->IsLeafMethod()); 1846} 1847 1848void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1849 LocationSummary* locations = invoke->GetLocations(); 1850 Location receiver = locations->InAt(0); 1851 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1852 size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + 1853 invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); 1854 Offset class_offset = mirror::Object::ClassOffset(); 1855 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 1856 1857 // temp = object->GetClass(); 1858 if (receiver.IsStackSlot()) { 1859 __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex())); 1860 __ Ldr(temp, HeapOperand(temp, class_offset)); 1861 } else { 1862 DCHECK(receiver.IsRegister()); 1863 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 1864 } 1865 // temp = temp->GetMethodAt(method_offset); 1866 __ Ldr(temp, HeapOperand(temp, method_offset)); 1867 // lr = temp->GetEntryPoint(); 1868 __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue())); 1869 // lr(); 1870 __ Blr(lr); 1871 DCHECK(!codegen_->IsLeafMethod()); 1872 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1873} 1874 1875void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { 1876 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath 1877 : LocationSummary::kNoCall; 1878 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); 1879 locations->SetOut(Location::RequiresRegister()); 1880} 1881 1882void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { 1883 Register out = OutputRegister(cls); 1884 if (cls->IsReferrersClass()) { 1885 DCHECK(!cls->CanCallRuntime()); 1886 DCHECK(!cls->MustGenerateClinitCheck()); 1887 codegen_->LoadCurrentMethod(out); 1888 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 1889 } else { 1890 DCHECK(cls->CanCallRuntime()); 1891 codegen_->LoadCurrentMethod(out); 1892 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset())); 1893 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); 1894 1895 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 1896 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); 1897 codegen_->AddSlowPath(slow_path); 1898 __ Cbz(out, slow_path->GetEntryLabel()); 1899 if (cls->MustGenerateClinitCheck()) { 1900 GenerateClassInitializationCheck(slow_path, out); 1901 } else { 1902 __ Bind(slow_path->GetExitLabel()); 1903 } 1904 } 1905} 1906 1907void LocationsBuilderARM64::VisitLoadException(HLoadException* load) { 1908 LocationSummary* locations = 1909 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); 1910 locations->SetOut(Location::RequiresRegister()); 1911} 1912 1913void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) { 1914 MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value()); 1915 __ Ldr(OutputRegister(instruction), exception); 1916 __ Str(wzr, exception); 1917} 1918 1919void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) { 1920 load->SetLocations(nullptr); 1921} 1922 1923void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) { 1924 // Nothing to do, this is driven by the code generator. 1925 UNUSED(load); 1926} 1927 1928void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { 1929 LocationSummary* locations = 1930 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath); 1931 locations->SetOut(Location::RequiresRegister()); 1932} 1933 1934void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { 1935 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load); 1936 codegen_->AddSlowPath(slow_path); 1937 1938 Register out = OutputRegister(load); 1939 codegen_->LoadCurrentMethod(out); 1940 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 1941 __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset())); 1942 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); 1943 __ Cbz(out, slow_path->GetEntryLabel()); 1944 __ Bind(slow_path->GetExitLabel()); 1945} 1946 1947void LocationsBuilderARM64::VisitLocal(HLocal* local) { 1948 local->SetLocations(nullptr); 1949} 1950 1951void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) { 1952 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock()); 1953} 1954 1955void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { 1956 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 1957 locations->SetOut(Location::ConstantLocation(constant)); 1958} 1959 1960void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) { 1961 // Will be generated at use site. 1962 UNUSED(constant); 1963} 1964 1965void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 1966 LocationSummary* locations = 1967 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 1968 InvokeRuntimeCallingConvention calling_convention; 1969 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 1970} 1971 1972void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 1973 codegen_->InvokeRuntime(instruction->IsEnter() 1974 ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject), 1975 instruction, 1976 instruction->GetDexPc()); 1977 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>(); 1978} 1979 1980void LocationsBuilderARM64::VisitMul(HMul* mul) { 1981 LocationSummary* locations = 1982 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); 1983 switch (mul->GetResultType()) { 1984 case Primitive::kPrimInt: 1985 case Primitive::kPrimLong: 1986 locations->SetInAt(0, Location::RequiresRegister()); 1987 locations->SetInAt(1, Location::RequiresRegister()); 1988 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1989 break; 1990 1991 case Primitive::kPrimFloat: 1992 case Primitive::kPrimDouble: 1993 locations->SetInAt(0, Location::RequiresFpuRegister()); 1994 locations->SetInAt(1, Location::RequiresFpuRegister()); 1995 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1996 break; 1997 1998 default: 1999 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2000 } 2001} 2002 2003void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) { 2004 switch (mul->GetResultType()) { 2005 case Primitive::kPrimInt: 2006 case Primitive::kPrimLong: 2007 __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1)); 2008 break; 2009 2010 case Primitive::kPrimFloat: 2011 case Primitive::kPrimDouble: 2012 __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1)); 2013 break; 2014 2015 default: 2016 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2017 } 2018} 2019 2020void LocationsBuilderARM64::VisitNeg(HNeg* neg) { 2021 LocationSummary* locations = 2022 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); 2023 switch (neg->GetResultType()) { 2024 case Primitive::kPrimInt: 2025 case Primitive::kPrimLong: 2026 locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0))); 2027 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2028 break; 2029 2030 case Primitive::kPrimFloat: 2031 case Primitive::kPrimDouble: 2032 locations->SetInAt(0, Location::RequiresFpuRegister()); 2033 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2034 break; 2035 2036 default: 2037 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2038 } 2039} 2040 2041void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) { 2042 switch (neg->GetResultType()) { 2043 case Primitive::kPrimInt: 2044 case Primitive::kPrimLong: 2045 __ Neg(OutputRegister(neg), InputOperandAt(neg, 0)); 2046 break; 2047 2048 case Primitive::kPrimFloat: 2049 case Primitive::kPrimDouble: 2050 __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0)); 2051 break; 2052 2053 default: 2054 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2055 } 2056} 2057 2058void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) { 2059 LocationSummary* locations = 2060 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2061 InvokeRuntimeCallingConvention calling_convention; 2062 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2063 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2))); 2064 locations->SetOut(LocationFrom(x0)); 2065 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); 2066 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2067 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2068} 2069 2070void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { 2071 LocationSummary* locations = instruction->GetLocations(); 2072 InvokeRuntimeCallingConvention calling_convention; 2073 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2074 DCHECK(type_index.Is(w0)); 2075 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2076 DCHECK(current_method.Is(w2)); 2077 codegen_->LoadCurrentMethod(current_method); 2078 __ Mov(type_index, instruction->GetTypeIndex()); 2079 codegen_->InvokeRuntime( 2080 QUICK_ENTRY_POINT(pAllocArrayWithAccessCheck), instruction, instruction->GetDexPc()); 2081 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2082 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2083} 2084 2085void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { 2086 LocationSummary* locations = 2087 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2088 InvokeRuntimeCallingConvention calling_convention; 2089 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2090 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1))); 2091 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); 2092 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2093} 2094 2095void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { 2096 LocationSummary* locations = instruction->GetLocations(); 2097 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2098 DCHECK(type_index.Is(w0)); 2099 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2100 DCHECK(current_method.Is(w1)); 2101 codegen_->LoadCurrentMethod(current_method); 2102 __ Mov(type_index, instruction->GetTypeIndex()); 2103 codegen_->InvokeRuntime( 2104 QUICK_ENTRY_POINT(pAllocObjectWithAccessCheck), instruction, instruction->GetDexPc()); 2105 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2106} 2107 2108void LocationsBuilderARM64::VisitNot(HNot* instruction) { 2109 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2110 locations->SetInAt(0, Location::RequiresRegister()); 2111 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2112} 2113 2114void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) { 2115 switch (instruction->InputAt(0)->GetType()) { 2116 case Primitive::kPrimBoolean: 2117 __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), Operand(1)); 2118 break; 2119 2120 case Primitive::kPrimInt: 2121 case Primitive::kPrimLong: 2122 __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0)); 2123 break; 2124 2125 default: 2126 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType(); 2127 } 2128} 2129 2130void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) { 2131 LocationSummary* locations = 2132 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2133 locations->SetInAt(0, Location::RequiresRegister()); 2134 if (instruction->HasUses()) { 2135 locations->SetOut(Location::SameAsFirstInput()); 2136 } 2137} 2138 2139void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) { 2140 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction); 2141 codegen_->AddSlowPath(slow_path); 2142 2143 LocationSummary* locations = instruction->GetLocations(); 2144 Location obj = locations->InAt(0); 2145 if (obj.IsRegister()) { 2146 __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel()); 2147 } else { 2148 DCHECK(obj.IsConstant()) << obj; 2149 DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0); 2150 __ B(slow_path->GetEntryLabel()); 2151 } 2152} 2153 2154void LocationsBuilderARM64::VisitOr(HOr* instruction) { 2155 HandleBinaryOp(instruction); 2156} 2157 2158void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) { 2159 HandleBinaryOp(instruction); 2160} 2161 2162void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { 2163 LOG(FATAL) << "Unreachable"; 2164} 2165 2166void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) { 2167 codegen_->GetMoveResolver()->EmitNativeCode(instruction); 2168} 2169 2170void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) { 2171 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2172 Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); 2173 if (location.IsStackSlot()) { 2174 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2175 } else if (location.IsDoubleStackSlot()) { 2176 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2177 } 2178 locations->SetOut(location); 2179} 2180 2181void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) { 2182 // Nothing to do, the parameter is already at its location. 2183 UNUSED(instruction); 2184} 2185 2186void LocationsBuilderARM64::VisitPhi(HPhi* instruction) { 2187 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2188 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 2189 locations->SetInAt(i, Location::Any()); 2190 } 2191 locations->SetOut(Location::Any()); 2192} 2193 2194void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) { 2195 UNUSED(instruction); 2196 LOG(FATAL) << "Unreachable"; 2197} 2198 2199void LocationsBuilderARM64::VisitRem(HRem* rem) { 2200 LocationSummary* locations = 2201 new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall); 2202 switch (rem->GetResultType()) { 2203 case Primitive::kPrimInt: 2204 case Primitive::kPrimLong: 2205 locations->SetInAt(0, Location::RequiresRegister()); 2206 locations->SetInAt(1, Location::RequiresRegister()); 2207 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2208 break; 2209 2210 default: 2211 LOG(FATAL) << "Unexpected rem type " << rem->GetResultType(); 2212 } 2213} 2214 2215void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) { 2216 Primitive::Type type = rem->GetResultType(); 2217 switch (type) { 2218 case Primitive::kPrimInt: 2219 case Primitive::kPrimLong: { 2220 UseScratchRegisterScope temps(GetVIXLAssembler()); 2221 Register dividend = InputRegisterAt(rem, 0); 2222 Register divisor = InputRegisterAt(rem, 1); 2223 Register output = OutputRegister(rem); 2224 Register temp = temps.AcquireSameSizeAs(output); 2225 2226 __ Sdiv(temp, dividend, divisor); 2227 __ Msub(output, temp, divisor, dividend); 2228 break; 2229 } 2230 2231 default: 2232 LOG(FATAL) << "Unexpected rem type " << type; 2233 } 2234} 2235 2236void LocationsBuilderARM64::VisitReturn(HReturn* instruction) { 2237 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2238 Primitive::Type return_type = instruction->InputAt(0)->GetType(); 2239 locations->SetInAt(0, ARM64ReturnLocation(return_type)); 2240} 2241 2242void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) { 2243 UNUSED(instruction); 2244 codegen_->GenerateFrameExit(); 2245 __ Ret(); 2246} 2247 2248void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) { 2249 instruction->SetLocations(nullptr); 2250} 2251 2252void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) { 2253 UNUSED(instruction); 2254 codegen_->GenerateFrameExit(); 2255 __ Ret(); 2256} 2257 2258void LocationsBuilderARM64::VisitShl(HShl* shl) { 2259 HandleShift(shl); 2260} 2261 2262void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) { 2263 HandleShift(shl); 2264} 2265 2266void LocationsBuilderARM64::VisitShr(HShr* shr) { 2267 HandleShift(shr); 2268} 2269 2270void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) { 2271 HandleShift(shr); 2272} 2273 2274void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) { 2275 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store); 2276 Primitive::Type field_type = store->InputAt(1)->GetType(); 2277 switch (field_type) { 2278 case Primitive::kPrimNot: 2279 case Primitive::kPrimBoolean: 2280 case Primitive::kPrimByte: 2281 case Primitive::kPrimChar: 2282 case Primitive::kPrimShort: 2283 case Primitive::kPrimInt: 2284 case Primitive::kPrimFloat: 2285 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2286 break; 2287 2288 case Primitive::kPrimLong: 2289 case Primitive::kPrimDouble: 2290 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2291 break; 2292 2293 default: 2294 LOG(FATAL) << "Unimplemented local type " << field_type; 2295 } 2296} 2297 2298void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) { 2299 UNUSED(store); 2300} 2301 2302void LocationsBuilderARM64::VisitSub(HSub* instruction) { 2303 HandleBinaryOp(instruction); 2304} 2305 2306void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) { 2307 HandleBinaryOp(instruction); 2308} 2309 2310void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2311 LocationSummary* locations = 2312 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2313 locations->SetInAt(0, Location::RequiresRegister()); 2314 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2315} 2316 2317void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2318 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 2319 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 2320} 2321 2322void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2323 LocationSummary* locations = 2324 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2325 locations->SetInAt(0, Location::RequiresRegister()); 2326 locations->SetInAt(1, Location::RequiresRegister()); 2327} 2328 2329void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2330 CPURegister value = InputCPURegisterAt(instruction, 1); 2331 Register cls = InputRegisterAt(instruction, 0); 2332 Offset offset = instruction->GetFieldOffset(); 2333 Primitive::Type field_type = instruction->GetFieldType(); 2334 2335 codegen_->Store(field_type, value, HeapOperand(cls, offset)); 2336 if (field_type == Primitive::kPrimNot) { 2337 codegen_->MarkGCCard(cls, Register(value)); 2338 } 2339} 2340 2341void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2342 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); 2343} 2344 2345void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2346 HBasicBlock* block = instruction->GetBlock(); 2347 if (block->GetLoopInformation() != nullptr) { 2348 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); 2349 // The back edge will generate the suspend check. 2350 return; 2351 } 2352 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { 2353 // The goto will generate the suspend check. 2354 return; 2355 } 2356 GenerateSuspendCheck(instruction, nullptr); 2357} 2358 2359void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) { 2360 temp->SetLocations(nullptr); 2361} 2362 2363void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) { 2364 // Nothing to do, this is driven by the code generator. 2365 UNUSED(temp); 2366} 2367 2368void LocationsBuilderARM64::VisitThrow(HThrow* instruction) { 2369 LocationSummary* locations = 2370 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2371 InvokeRuntimeCallingConvention calling_convention; 2372 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 2373} 2374 2375void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) { 2376 codegen_->InvokeRuntime( 2377 QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc()); 2378 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>(); 2379} 2380 2381void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) { 2382 LocationSummary* locations = 2383 new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); 2384 Primitive::Type input_type = conversion->GetInputType(); 2385 Primitive::Type result_type = conversion->GetResultType(); 2386 DCHECK_NE(input_type, result_type); 2387 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) || 2388 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) { 2389 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; 2390 } 2391 2392 if (IsFPType(input_type)) { 2393 locations->SetInAt(0, Location::RequiresFpuRegister()); 2394 } else { 2395 locations->SetInAt(0, Location::RequiresRegister()); 2396 } 2397 2398 if (IsFPType(result_type)) { 2399 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2400 } else { 2401 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2402 } 2403} 2404 2405void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) { 2406 Primitive::Type result_type = conversion->GetResultType(); 2407 Primitive::Type input_type = conversion->GetInputType(); 2408 2409 DCHECK_NE(input_type, result_type); 2410 2411 if (IsIntegralType(result_type) && IsIntegralType(input_type)) { 2412 int result_size = Primitive::ComponentSize(result_type); 2413 int input_size = Primitive::ComponentSize(input_type); 2414 int min_size = std::min(result_size, input_size); 2415 Register output = OutputRegister(conversion); 2416 Register source = InputRegisterAt(conversion, 0); 2417 if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) { 2418 __ Ubfx(output, source, 0, result_size * kBitsPerByte); 2419 } else if ((result_type == Primitive::kPrimChar) || 2420 ((input_type == Primitive::kPrimChar) && (result_size > input_size))) { 2421 __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2422 } else { 2423 __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2424 } 2425 } else if (IsFPType(result_type) && IsIntegralType(input_type)) { 2426 __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0)); 2427 } else if (IsIntegralType(result_type) && IsFPType(input_type)) { 2428 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong); 2429 __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0)); 2430 } else if (IsFPType(result_type) && IsFPType(input_type)) { 2431 __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0)); 2432 } else { 2433 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type 2434 << " to " << result_type; 2435 } 2436} 2437 2438void LocationsBuilderARM64::VisitUShr(HUShr* ushr) { 2439 HandleShift(ushr); 2440} 2441 2442void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) { 2443 HandleShift(ushr); 2444} 2445 2446void LocationsBuilderARM64::VisitXor(HXor* instruction) { 2447 HandleBinaryOp(instruction); 2448} 2449 2450void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) { 2451 HandleBinaryOp(instruction); 2452} 2453 2454#undef __ 2455#undef QUICK_ENTRY_POINT 2456 2457} // namespace arm64 2458} // namespace art 2459