code_generator_arm64.cc revision 77520bca97ec44e3758510cebd0f20e3bb4584ea
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator_arm64.h" 18 19#include "entrypoints/quick/quick_entrypoints.h" 20#include "entrypoints/quick/quick_entrypoints_enum.h" 21#include "gc/accounting/card_table.h" 22#include "mirror/array-inl.h" 23#include "mirror/art_method.h" 24#include "mirror/class.h" 25#include "offsets.h" 26#include "thread.h" 27#include "utils/arm64/assembler_arm64.h" 28#include "utils/assembler.h" 29#include "utils/stack_checks.h" 30 31 32using namespace vixl; // NOLINT(build/namespaces) 33 34#ifdef __ 35#error "ARM64 Codegen VIXL macro-assembler macro already defined." 36#endif 37 38 39namespace art { 40 41namespace arm64 { 42 43// TODO: Tune the use of Load-Acquire, Store-Release vs Data Memory Barriers. 44// For now we prefer the use of load-acquire, store-release over explicit memory barriers. 45static constexpr bool kUseAcquireRelease = true; 46static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>); 47static constexpr int kCurrentMethodStackOffset = 0; 48 49namespace { 50 51bool IsFPType(Primitive::Type type) { 52 return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble; 53} 54 55bool IsIntegralType(Primitive::Type type) { 56 switch (type) { 57 case Primitive::kPrimByte: 58 case Primitive::kPrimChar: 59 case Primitive::kPrimShort: 60 case Primitive::kPrimInt: 61 case Primitive::kPrimLong: 62 return true; 63 default: 64 return false; 65 } 66} 67 68bool Is64BitType(Primitive::Type type) { 69 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 70} 71 72// Convenience helpers to ease conversion to and from VIXL operands. 73static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32), 74 "Unexpected values for register codes."); 75 76int VIXLRegCodeFromART(int code) { 77 if (code == SP) { 78 return vixl::kSPRegInternalCode; 79 } 80 if (code == XZR) { 81 return vixl::kZeroRegCode; 82 } 83 return code; 84} 85 86int ARTRegCodeFromVIXL(int code) { 87 if (code == vixl::kSPRegInternalCode) { 88 return SP; 89 } 90 if (code == vixl::kZeroRegCode) { 91 return XZR; 92 } 93 return code; 94} 95 96Register XRegisterFrom(Location location) { 97 DCHECK(location.IsRegister()); 98 return Register::XRegFromCode(VIXLRegCodeFromART(location.reg())); 99} 100 101Register WRegisterFrom(Location location) { 102 DCHECK(location.IsRegister()); 103 return Register::WRegFromCode(VIXLRegCodeFromART(location.reg())); 104} 105 106Register RegisterFrom(Location location, Primitive::Type type) { 107 DCHECK(type != Primitive::kPrimVoid && !IsFPType(type)); 108 return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location); 109} 110 111Register OutputRegister(HInstruction* instr) { 112 return RegisterFrom(instr->GetLocations()->Out(), instr->GetType()); 113} 114 115Register InputRegisterAt(HInstruction* instr, int input_index) { 116 return RegisterFrom(instr->GetLocations()->InAt(input_index), 117 instr->InputAt(input_index)->GetType()); 118} 119 120FPRegister DRegisterFrom(Location location) { 121 DCHECK(location.IsFpuRegister()); 122 return FPRegister::DRegFromCode(location.reg()); 123} 124 125FPRegister SRegisterFrom(Location location) { 126 DCHECK(location.IsFpuRegister()); 127 return FPRegister::SRegFromCode(location.reg()); 128} 129 130FPRegister FPRegisterFrom(Location location, Primitive::Type type) { 131 DCHECK(IsFPType(type)); 132 return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location); 133} 134 135FPRegister OutputFPRegister(HInstruction* instr) { 136 return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType()); 137} 138 139FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) { 140 return FPRegisterFrom(instr->GetLocations()->InAt(input_index), 141 instr->InputAt(input_index)->GetType()); 142} 143 144CPURegister CPURegisterFrom(Location location, Primitive::Type type) { 145 return IsFPType(type) ? CPURegister(FPRegisterFrom(location, type)) 146 : CPURegister(RegisterFrom(location, type)); 147} 148 149CPURegister OutputCPURegister(HInstruction* instr) { 150 return IsFPType(instr->GetType()) ? static_cast<CPURegister>(OutputFPRegister(instr)) 151 : static_cast<CPURegister>(OutputRegister(instr)); 152} 153 154CPURegister InputCPURegisterAt(HInstruction* instr, int index) { 155 return IsFPType(instr->InputAt(index)->GetType()) 156 ? static_cast<CPURegister>(InputFPRegisterAt(instr, index)) 157 : static_cast<CPURegister>(InputRegisterAt(instr, index)); 158} 159 160int64_t Int64ConstantFrom(Location location) { 161 HConstant* instr = location.GetConstant(); 162 return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue() 163 : instr->AsLongConstant()->GetValue(); 164} 165 166Operand OperandFrom(Location location, Primitive::Type type) { 167 if (location.IsRegister()) { 168 return Operand(RegisterFrom(location, type)); 169 } else { 170 return Operand(Int64ConstantFrom(location)); 171 } 172} 173 174Operand InputOperandAt(HInstruction* instr, int input_index) { 175 return OperandFrom(instr->GetLocations()->InAt(input_index), 176 instr->InputAt(input_index)->GetType()); 177} 178 179MemOperand StackOperandFrom(Location location) { 180 return MemOperand(sp, location.GetStackIndex()); 181} 182 183MemOperand HeapOperand(const Register& base, size_t offset = 0) { 184 // A heap reference must be 32bit, so fit in a W register. 185 DCHECK(base.IsW()); 186 return MemOperand(base.X(), offset); 187} 188 189MemOperand HeapOperand(const Register& base, Offset offset) { 190 return HeapOperand(base, offset.SizeValue()); 191} 192 193MemOperand HeapOperandFrom(Location location, Offset offset) { 194 return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset); 195} 196 197Location LocationFrom(const Register& reg) { 198 return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code())); 199} 200 201Location LocationFrom(const FPRegister& fpreg) { 202 return Location::FpuRegisterLocation(fpreg.code()); 203} 204 205} // namespace 206 207inline Condition ARM64Condition(IfCondition cond) { 208 switch (cond) { 209 case kCondEQ: return eq; 210 case kCondNE: return ne; 211 case kCondLT: return lt; 212 case kCondLE: return le; 213 case kCondGT: return gt; 214 case kCondGE: return ge; 215 default: 216 LOG(FATAL) << "Unknown if condition"; 217 } 218 return nv; // Unreachable. 219} 220 221Location ARM64ReturnLocation(Primitive::Type return_type) { 222 DCHECK_NE(return_type, Primitive::kPrimVoid); 223 // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the 224 // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`, 225 // but we use the exact registers for clarity. 226 if (return_type == Primitive::kPrimFloat) { 227 return LocationFrom(s0); 228 } else if (return_type == Primitive::kPrimDouble) { 229 return LocationFrom(d0); 230 } else if (return_type == Primitive::kPrimLong) { 231 return LocationFrom(x0); 232 } else { 233 return LocationFrom(w0); 234 } 235} 236 237static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 }; 238static constexpr size_t kRuntimeParameterCoreRegistersLength = 239 arraysize(kRuntimeParameterCoreRegisters); 240static const FPRegister kRuntimeParameterFpuRegisters[] = { d0, d1, d2, d3, d4, d5, d6, d7 }; 241static constexpr size_t kRuntimeParameterFpuRegistersLength = 242 arraysize(kRuntimeParameterCoreRegisters); 243 244class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> { 245 public: 246 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); 247 248 InvokeRuntimeCallingConvention() 249 : CallingConvention(kRuntimeParameterCoreRegisters, 250 kRuntimeParameterCoreRegistersLength, 251 kRuntimeParameterFpuRegisters, 252 kRuntimeParameterFpuRegistersLength) {} 253 254 Location GetReturnLocation(Primitive::Type return_type); 255 256 private: 257 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 258}; 259 260Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) { 261 return ARM64ReturnLocation(return_type); 262} 263 264#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> 265#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value() 266 267class SlowPathCodeARM64 : public SlowPathCode { 268 public: 269 SlowPathCodeARM64() : entry_label_(), exit_label_() {} 270 271 vixl::Label* GetEntryLabel() { return &entry_label_; } 272 vixl::Label* GetExitLabel() { return &exit_label_; } 273 274 private: 275 vixl::Label entry_label_; 276 vixl::Label exit_label_; 277 278 DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64); 279}; 280 281class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { 282 public: 283 BoundsCheckSlowPathARM64(HBoundsCheck* instruction, 284 Location index_location, 285 Location length_location) 286 : instruction_(instruction), 287 index_location_(index_location), 288 length_location_(length_location) {} 289 290 291 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 292 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 293 __ Bind(GetEntryLabel()); 294 // We're moving two locations to locations that could overlap, so we need a parallel 295 // move resolver. 296 InvokeRuntimeCallingConvention calling_convention; 297 codegen->EmitParallelMoves( 298 index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), 299 length_location_, LocationFrom(calling_convention.GetRegisterAt(1))); 300 arm64_codegen->InvokeRuntime( 301 QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc()); 302 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); 303 } 304 305 private: 306 HBoundsCheck* const instruction_; 307 const Location index_location_; 308 const Location length_location_; 309 310 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64); 311}; 312 313class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { 314 public: 315 explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {} 316 317 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 318 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 319 __ Bind(GetEntryLabel()); 320 arm64_codegen->InvokeRuntime( 321 QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc()); 322 CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); 323 } 324 325 private: 326 HDivZeroCheck* const instruction_; 327 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64); 328}; 329 330class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { 331 public: 332 LoadClassSlowPathARM64(HLoadClass* cls, 333 HInstruction* at, 334 uint32_t dex_pc, 335 bool do_clinit) 336 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { 337 DCHECK(at->IsLoadClass() || at->IsClinitCheck()); 338 } 339 340 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 341 LocationSummary* locations = at_->GetLocations(); 342 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 343 344 __ Bind(GetEntryLabel()); 345 codegen->SaveLiveRegisters(locations); 346 347 InvokeRuntimeCallingConvention calling_convention; 348 __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex()); 349 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 350 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) 351 : QUICK_ENTRY_POINT(pInitializeType); 352 arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_); 353 if (do_clinit_) { 354 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*>(); 355 } else { 356 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t, mirror::ArtMethod*>(); 357 } 358 359 // Move the class to the desired location. 360 Location out = locations->Out(); 361 if (out.IsValid()) { 362 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); 363 Primitive::Type type = at_->GetType(); 364 arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type); 365 } 366 367 codegen->RestoreLiveRegisters(locations); 368 __ B(GetExitLabel()); 369 } 370 371 private: 372 // The class this slow path will load. 373 HLoadClass* const cls_; 374 375 // The instruction where this slow path is happening. 376 // (Might be the load class or an initialization check). 377 HInstruction* const at_; 378 379 // The dex PC of `at_`. 380 const uint32_t dex_pc_; 381 382 // Whether to initialize the class. 383 const bool do_clinit_; 384 385 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64); 386}; 387 388class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { 389 public: 390 explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {} 391 392 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 393 LocationSummary* locations = instruction_->GetLocations(); 394 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 395 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 396 397 __ Bind(GetEntryLabel()); 398 codegen->SaveLiveRegisters(locations); 399 400 InvokeRuntimeCallingConvention calling_convention; 401 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 402 __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex()); 403 arm64_codegen->InvokeRuntime( 404 QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc()); 405 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t, mirror::ArtMethod*>(); 406 Primitive::Type type = instruction_->GetType(); 407 arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type); 408 409 codegen->RestoreLiveRegisters(locations); 410 __ B(GetExitLabel()); 411 } 412 413 private: 414 HLoadString* const instruction_; 415 416 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64); 417}; 418 419class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { 420 public: 421 explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {} 422 423 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 424 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 425 __ Bind(GetEntryLabel()); 426 arm64_codegen->InvokeRuntime( 427 QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc()); 428 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); 429 } 430 431 private: 432 HNullCheck* const instruction_; 433 434 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64); 435}; 436 437class StackOverflowCheckSlowPathARM64 : public SlowPathCodeARM64 { 438 public: 439 StackOverflowCheckSlowPathARM64() {} 440 441 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 442 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 443 __ Bind(GetEntryLabel()); 444 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowStackOverflow), nullptr, 0); 445 CheckEntrypointTypes<kQuickThrowStackOverflow, void, void*>(); 446 } 447 448 private: 449 DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathARM64); 450}; 451 452class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { 453 public: 454 explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction, 455 HBasicBlock* successor) 456 : instruction_(instruction), successor_(successor) {} 457 458 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 459 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 460 __ Bind(GetEntryLabel()); 461 codegen->SaveLiveRegisters(instruction_->GetLocations()); 462 arm64_codegen->InvokeRuntime( 463 QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc()); 464 CheckEntrypointTypes<kQuickTestSuspend, void, void>(); 465 codegen->RestoreLiveRegisters(instruction_->GetLocations()); 466 if (successor_ == nullptr) { 467 __ B(GetReturnLabel()); 468 } else { 469 __ B(arm64_codegen->GetLabelOf(successor_)); 470 } 471 } 472 473 vixl::Label* GetReturnLabel() { 474 DCHECK(successor_ == nullptr); 475 return &return_label_; 476 } 477 478 private: 479 HSuspendCheck* const instruction_; 480 // If not null, the block to branch to after the suspend check. 481 HBasicBlock* const successor_; 482 483 // If `successor_` is null, the label to branch to after the suspend check. 484 vixl::Label return_label_; 485 486 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64); 487}; 488 489class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { 490 public: 491 TypeCheckSlowPathARM64(HInstruction* instruction, 492 Location class_to_check, 493 Location object_class, 494 uint32_t dex_pc) 495 : instruction_(instruction), 496 class_to_check_(class_to_check), 497 object_class_(object_class), 498 dex_pc_(dex_pc) {} 499 500 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 501 LocationSummary* locations = instruction_->GetLocations(); 502 DCHECK(instruction_->IsCheckCast() 503 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 504 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 505 506 __ Bind(GetEntryLabel()); 507 codegen->SaveLiveRegisters(locations); 508 509 // We're moving two locations to locations that could overlap, so we need a parallel 510 // move resolver. 511 InvokeRuntimeCallingConvention calling_convention; 512 codegen->EmitParallelMoves( 513 class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), 514 object_class_, LocationFrom(calling_convention.GetRegisterAt(1))); 515 516 if (instruction_->IsInstanceOf()) { 517 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_); 518 Primitive::Type ret_type = instruction_->GetType(); 519 Location ret_loc = calling_convention.GetReturnLocation(ret_type); 520 arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); 521 CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t, 522 const mirror::Class*, const mirror::Class*>(); 523 } else { 524 DCHECK(instruction_->IsCheckCast()); 525 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_); 526 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); 527 } 528 529 codegen->RestoreLiveRegisters(locations); 530 __ B(GetExitLabel()); 531 } 532 533 private: 534 HInstruction* const instruction_; 535 const Location class_to_check_; 536 const Location object_class_; 537 uint32_t dex_pc_; 538 539 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64); 540}; 541 542#undef __ 543 544Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) { 545 Location next_location; 546 if (type == Primitive::kPrimVoid) { 547 LOG(FATAL) << "Unreachable type " << type; 548 } 549 550 if (IsFPType(type) && (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) { 551 next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++)); 552 } else if (!IsFPType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) { 553 next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++)); 554 } else { 555 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_); 556 next_location = Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) 557 : Location::StackSlot(stack_offset); 558 } 559 560 // Space on the stack is reserved for all arguments. 561 stack_index_ += Is64BitType(type) ? 2 : 1; 562 return next_location; 563} 564 565CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, const CompilerOptions& compiler_options) 566 : CodeGenerator(graph, 567 kNumberOfAllocatableRegisters, 568 kNumberOfAllocatableFPRegisters, 569 kNumberOfAllocatableRegisterPairs, 570 compiler_options), 571 block_labels_(nullptr), 572 location_builder_(graph, this), 573 instruction_visitor_(graph, this), 574 move_resolver_(graph->GetArena(), this) {} 575 576#undef __ 577#define __ GetVIXLAssembler()-> 578 579void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { 580 // Ensure we emit the literal pool. 581 __ FinalizeCode(); 582 CodeGenerator::Finalize(allocator); 583} 584 585void ParallelMoveResolverARM64::EmitMove(size_t index) { 586 MoveOperands* move = moves_.Get(index); 587 codegen_->MoveLocation(move->GetDestination(), move->GetSource()); 588} 589 590void ParallelMoveResolverARM64::EmitSwap(size_t index) { 591 MoveOperands* move = moves_.Get(index); 592 codegen_->SwapLocations(move->GetDestination(), move->GetSource()); 593} 594 595void ParallelMoveResolverARM64::RestoreScratch(int reg) { 596 __ Pop(Register(VIXLRegCodeFromART(reg), kXRegSize)); 597} 598 599void ParallelMoveResolverARM64::SpillScratch(int reg) { 600 __ Push(Register(VIXLRegCodeFromART(reg), kXRegSize)); 601} 602 603void CodeGeneratorARM64::GenerateFrameEntry() { 604 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod(); 605 if (do_overflow_check) { 606 UseScratchRegisterScope temps(GetVIXLAssembler()); 607 Register temp = temps.AcquireX(); 608 if (GetCompilerOptions().GetImplicitStackOverflowChecks()) { 609 __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64))); 610 __ Ldr(wzr, MemOperand(temp, 0)); 611 RecordPcInfo(nullptr, 0); 612 } else { 613 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM64(); 614 AddSlowPath(slow_path); 615 616 __ Ldr(temp, MemOperand(tr, Thread::StackEndOffset<kArm64WordSize>().Int32Value())); 617 __ Cmp(sp, temp); 618 __ B(lo, slow_path->GetEntryLabel()); 619 } 620 } 621 622 CPURegList preserved_regs = GetFramePreservedRegisters(); 623 int frame_size = GetFrameSize(); 624 core_spill_mask_ |= preserved_regs.list(); 625 626 __ Str(w0, MemOperand(sp, -frame_size, PreIndex)); 627 __ PokeCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes()); 628 629 // Stack layout: 630 // sp[frame_size - 8] : lr. 631 // ... : other preserved registers. 632 // sp[frame_size - regs_size]: first preserved register. 633 // ... : reserved frame space. 634 // sp[0] : current method. 635} 636 637void CodeGeneratorARM64::GenerateFrameExit() { 638 int frame_size = GetFrameSize(); 639 CPURegList preserved_regs = GetFramePreservedRegisters(); 640 __ PeekCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes()); 641 __ Drop(frame_size); 642} 643 644void CodeGeneratorARM64::Bind(HBasicBlock* block) { 645 __ Bind(GetLabelOf(block)); 646} 647 648void CodeGeneratorARM64::Move(HInstruction* instruction, 649 Location location, 650 HInstruction* move_for) { 651 LocationSummary* locations = instruction->GetLocations(); 652 if (locations != nullptr && locations->Out().Equals(location)) { 653 return; 654 } 655 656 Primitive::Type type = instruction->GetType(); 657 DCHECK_NE(type, Primitive::kPrimVoid); 658 659 if (instruction->IsIntConstant() || instruction->IsLongConstant()) { 660 int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue() 661 : instruction->AsLongConstant()->GetValue(); 662 if (location.IsRegister()) { 663 Register dst = RegisterFrom(location, type); 664 DCHECK((instruction->IsIntConstant() && dst.Is32Bits()) || 665 (instruction->IsLongConstant() && dst.Is64Bits())); 666 __ Mov(dst, value); 667 } else { 668 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot()); 669 UseScratchRegisterScope temps(GetVIXLAssembler()); 670 Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX(); 671 __ Mov(temp, value); 672 __ Str(temp, StackOperandFrom(location)); 673 } 674 } else if (instruction->IsTemporary()) { 675 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 676 MoveLocation(location, temp_location, type); 677 } else if (instruction->IsLoadLocal()) { 678 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal()); 679 if (Is64BitType(type)) { 680 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type); 681 } else { 682 MoveLocation(location, Location::StackSlot(stack_slot), type); 683 } 684 685 } else { 686 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary()); 687 MoveLocation(location, locations->Out(), type); 688 } 689} 690 691size_t CodeGeneratorARM64::FrameEntrySpillSize() const { 692 return GetFramePreservedRegistersSize(); 693} 694 695Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const { 696 Primitive::Type type = load->GetType(); 697 698 switch (type) { 699 case Primitive::kPrimNot: 700 case Primitive::kPrimInt: 701 case Primitive::kPrimFloat: 702 return Location::StackSlot(GetStackSlot(load->GetLocal())); 703 704 case Primitive::kPrimLong: 705 case Primitive::kPrimDouble: 706 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 707 708 case Primitive::kPrimBoolean: 709 case Primitive::kPrimByte: 710 case Primitive::kPrimChar: 711 case Primitive::kPrimShort: 712 case Primitive::kPrimVoid: 713 LOG(FATAL) << "Unexpected type " << type; 714 } 715 716 LOG(FATAL) << "Unreachable"; 717 return Location::NoLocation(); 718} 719 720void CodeGeneratorARM64::MarkGCCard(Register object, Register value) { 721 UseScratchRegisterScope temps(GetVIXLAssembler()); 722 Register card = temps.AcquireX(); 723 Register temp = temps.AcquireW(); // Index within the CardTable - 32bit. 724 vixl::Label done; 725 __ Cbz(value, &done); 726 __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value())); 727 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); 728 __ Strb(card, MemOperand(card, temp.X())); 729 __ Bind(&done); 730} 731 732void CodeGeneratorARM64::SetupBlockedRegisters() const { 733 // Block reserved registers: 734 // ip0 (VIXL temporary) 735 // ip1 (VIXL temporary) 736 // tr 737 // lr 738 // sp is not part of the allocatable registers, so we don't need to block it. 739 // TODO: Avoid blocking callee-saved registers, and instead preserve them 740 // where necessary. 741 CPURegList reserved_core_registers = vixl_reserved_core_registers; 742 reserved_core_registers.Combine(runtime_reserved_core_registers); 743 reserved_core_registers.Combine(quick_callee_saved_registers); 744 while (!reserved_core_registers.IsEmpty()) { 745 blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true; 746 } 747 CPURegList reserved_fp_registers = vixl_reserved_fp_registers; 748 reserved_fp_registers.Combine(CPURegList::GetCalleeSavedFP()); 749 while (!reserved_core_registers.IsEmpty()) { 750 blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true; 751 } 752} 753 754Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const { 755 if (type == Primitive::kPrimVoid) { 756 LOG(FATAL) << "Unreachable type " << type; 757 } 758 759 if (IsFPType(type)) { 760 ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters); 761 DCHECK_NE(reg, -1); 762 return Location::FpuRegisterLocation(reg); 763 } else { 764 ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters); 765 DCHECK_NE(reg, -1); 766 return Location::RegisterLocation(reg); 767 } 768} 769 770size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { 771 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 772 __ Str(reg, MemOperand(sp, stack_index)); 773 return kArm64WordSize; 774} 775 776size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { 777 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 778 __ Ldr(reg, MemOperand(sp, stack_index)); 779 return kArm64WordSize; 780} 781 782size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 783 FPRegister reg = FPRegister(reg_id, kDRegSize); 784 __ Str(reg, MemOperand(sp, stack_index)); 785 return kArm64WordSize; 786} 787 788size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 789 FPRegister reg = FPRegister(reg_id, kDRegSize); 790 __ Ldr(reg, MemOperand(sp, stack_index)); 791 return kArm64WordSize; 792} 793 794void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const { 795 stream << Arm64ManagedRegister::FromXRegister(XRegister(reg)); 796} 797 798void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const { 799 stream << Arm64ManagedRegister::FromDRegister(DRegister(reg)); 800} 801 802void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) { 803 if (constant->IsIntConstant() || constant->IsLongConstant()) { 804 __ Mov(Register(destination), 805 constant->IsIntConstant() ? constant->AsIntConstant()->GetValue() 806 : constant->AsLongConstant()->GetValue()); 807 } else if (constant->IsFloatConstant()) { 808 __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue()); 809 } else { 810 DCHECK(constant->IsDoubleConstant()); 811 __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue()); 812 } 813} 814 815 816static bool CoherentConstantAndType(Location constant, Primitive::Type type) { 817 DCHECK(constant.IsConstant()); 818 HConstant* cst = constant.GetConstant(); 819 return (cst->IsIntConstant() && type == Primitive::kPrimInt) || 820 (cst->IsLongConstant() && type == Primitive::kPrimLong) || 821 (cst->IsFloatConstant() && type == Primitive::kPrimFloat) || 822 (cst->IsDoubleConstant() && type == Primitive::kPrimDouble); 823} 824 825void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) { 826 if (source.Equals(destination)) { 827 return; 828 } 829 830 // A valid move can always be inferred from the destination and source 831 // locations. When moving from and to a register, the argument type can be 832 // used to generate 32bit instead of 64bit moves. In debug mode we also 833 // checks the coherency of the locations and the type. 834 bool unspecified_type = (type == Primitive::kPrimVoid); 835 836 if (destination.IsRegister() || destination.IsFpuRegister()) { 837 if (unspecified_type) { 838 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr; 839 if (source.IsStackSlot() || 840 (src_cst != nullptr && (src_cst->IsIntConstant() || src_cst->IsFloatConstant()))) { 841 // For stack slots and 32bit constants, a 64bit type is appropriate. 842 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; 843 } else { 844 // If the source is a double stack slot or a 64bit constant, a 64bit 845 // type is appropriate. Else the source is a register, and since the 846 // type has not been specified, we chose a 64bit type to force a 64bit 847 // move. 848 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; 849 } 850 } 851 DCHECK((destination.IsFpuRegister() && IsFPType(type)) || 852 (destination.IsRegister() && !IsFPType(type))); 853 CPURegister dst = CPURegisterFrom(destination, type); 854 if (source.IsStackSlot() || source.IsDoubleStackSlot()) { 855 DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot()); 856 __ Ldr(dst, StackOperandFrom(source)); 857 } else if (source.IsConstant()) { 858 DCHECK(CoherentConstantAndType(source, type)); 859 MoveConstant(dst, source.GetConstant()); 860 } else { 861 if (destination.IsRegister()) { 862 __ Mov(Register(dst), RegisterFrom(source, type)); 863 } else { 864 __ Fmov(FPRegister(dst), FPRegisterFrom(source, type)); 865 } 866 } 867 868 } else { // The destination is not a register. It must be a stack slot. 869 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot()); 870 if (source.IsRegister() || source.IsFpuRegister()) { 871 if (unspecified_type) { 872 if (source.IsRegister()) { 873 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; 874 } else { 875 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; 876 } 877 } 878 DCHECK((destination.IsDoubleStackSlot() == Is64BitType(type)) && 879 (source.IsFpuRegister() == IsFPType(type))); 880 __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination)); 881 } else if (source.IsConstant()) { 882 DCHECK(unspecified_type || CoherentConstantAndType(source, type)); 883 UseScratchRegisterScope temps(GetVIXLAssembler()); 884 HConstant* src_cst = source.GetConstant(); 885 CPURegister temp; 886 if (src_cst->IsIntConstant()) { 887 temp = temps.AcquireW(); 888 } else if (src_cst->IsLongConstant()) { 889 temp = temps.AcquireX(); 890 } else if (src_cst->IsFloatConstant()) { 891 temp = temps.AcquireS(); 892 } else { 893 DCHECK(src_cst->IsDoubleConstant()); 894 temp = temps.AcquireD(); 895 } 896 MoveConstant(temp, src_cst); 897 __ Str(temp, StackOperandFrom(destination)); 898 } else { 899 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot()); 900 DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot()); 901 UseScratchRegisterScope temps(GetVIXLAssembler()); 902 // There is generally less pressure on FP registers. 903 FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS(); 904 __ Ldr(temp, StackOperandFrom(source)); 905 __ Str(temp, StackOperandFrom(destination)); 906 } 907 } 908} 909 910void CodeGeneratorARM64::SwapLocations(Location loc1, Location loc2) { 911 DCHECK(!loc1.IsConstant()); 912 DCHECK(!loc2.IsConstant()); 913 914 if (loc1.Equals(loc2)) { 915 return; 916 } 917 918 UseScratchRegisterScope temps(GetAssembler()->vixl_masm_); 919 920 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot(); 921 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot(); 922 bool is_fp_reg1 = loc1.IsFpuRegister(); 923 bool is_fp_reg2 = loc2.IsFpuRegister(); 924 925 if (loc2.IsRegister() && loc1.IsRegister()) { 926 Register r1 = XRegisterFrom(loc1); 927 Register r2 = XRegisterFrom(loc2); 928 Register tmp = temps.AcquireSameSizeAs(r1); 929 __ Mov(tmp, r2); 930 __ Mov(r2, r1); 931 __ Mov(r1, tmp); 932 } else if (is_fp_reg2 && is_fp_reg1) { 933 FPRegister r1 = DRegisterFrom(loc1); 934 FPRegister r2 = DRegisterFrom(loc2); 935 FPRegister tmp = temps.AcquireSameSizeAs(r1); 936 __ Fmov(tmp, r2); 937 __ Fmov(r2, r1); 938 __ Fmov(r1, tmp); 939 } else if (is_slot1 != is_slot2) { 940 MemOperand mem = StackOperandFrom(is_slot1 ? loc1 : loc2); 941 Location reg_loc = is_slot1 ? loc2 : loc1; 942 CPURegister reg, tmp; 943 if (reg_loc.IsFpuRegister()) { 944 reg = DRegisterFrom(reg_loc); 945 tmp = temps.AcquireD(); 946 } else { 947 reg = XRegisterFrom(reg_loc); 948 tmp = temps.AcquireX(); 949 } 950 __ Ldr(tmp, mem); 951 __ Str(reg, mem); 952 if (reg_loc.IsFpuRegister()) { 953 __ Fmov(FPRegister(reg), FPRegister(tmp)); 954 } else { 955 __ Mov(Register(reg), Register(tmp)); 956 } 957 } else if (is_slot1 && is_slot2) { 958 MemOperand mem1 = StackOperandFrom(loc1); 959 MemOperand mem2 = StackOperandFrom(loc2); 960 Register tmp1 = loc1.IsStackSlot() ? temps.AcquireW() : temps.AcquireX(); 961 Register tmp2 = temps.AcquireSameSizeAs(tmp1); 962 __ Ldr(tmp1, mem1); 963 __ Ldr(tmp2, mem2); 964 __ Str(tmp1, mem2); 965 __ Str(tmp2, mem1); 966 } else { 967 LOG(FATAL) << "Unimplemented"; 968 } 969} 970 971void CodeGeneratorARM64::Load(Primitive::Type type, 972 CPURegister dst, 973 const MemOperand& src) { 974 switch (type) { 975 case Primitive::kPrimBoolean: 976 __ Ldrb(Register(dst), src); 977 break; 978 case Primitive::kPrimByte: 979 __ Ldrsb(Register(dst), src); 980 break; 981 case Primitive::kPrimShort: 982 __ Ldrsh(Register(dst), src); 983 break; 984 case Primitive::kPrimChar: 985 __ Ldrh(Register(dst), src); 986 break; 987 case Primitive::kPrimInt: 988 case Primitive::kPrimNot: 989 case Primitive::kPrimLong: 990 case Primitive::kPrimFloat: 991 case Primitive::kPrimDouble: 992 DCHECK_EQ(dst.Is64Bits(), Is64BitType(type)); 993 __ Ldr(dst, src); 994 break; 995 case Primitive::kPrimVoid: 996 LOG(FATAL) << "Unreachable type " << type; 997 } 998} 999 1000void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, 1001 CPURegister dst, 1002 const MemOperand& src) { 1003 UseScratchRegisterScope temps(GetVIXLAssembler()); 1004 Register temp_base = temps.AcquireX(); 1005 Primitive::Type type = instruction->GetType(); 1006 1007 DCHECK(!src.IsRegisterOffset()); 1008 DCHECK(!src.IsPreIndex()); 1009 DCHECK(!src.IsPostIndex()); 1010 1011 // TODO(vixl): Let the MacroAssembler handle MemOperand. 1012 __ Add(temp_base, src.base(), src.offset()); 1013 MemOperand base = MemOperand(temp_base); 1014 switch (type) { 1015 case Primitive::kPrimBoolean: 1016 __ Ldarb(Register(dst), base); 1017 MaybeRecordImplicitNullCheck(instruction); 1018 break; 1019 case Primitive::kPrimByte: 1020 __ Ldarb(Register(dst), base); 1021 MaybeRecordImplicitNullCheck(instruction); 1022 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); 1023 break; 1024 case Primitive::kPrimChar: 1025 __ Ldarh(Register(dst), base); 1026 MaybeRecordImplicitNullCheck(instruction); 1027 break; 1028 case Primitive::kPrimShort: 1029 __ Ldarh(Register(dst), base); 1030 MaybeRecordImplicitNullCheck(instruction); 1031 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); 1032 break; 1033 case Primitive::kPrimInt: 1034 case Primitive::kPrimNot: 1035 case Primitive::kPrimLong: 1036 DCHECK_EQ(dst.Is64Bits(), Is64BitType(type)); 1037 __ Ldar(Register(dst), base); 1038 MaybeRecordImplicitNullCheck(instruction); 1039 break; 1040 case Primitive::kPrimFloat: 1041 case Primitive::kPrimDouble: { 1042 DCHECK(dst.IsFPRegister()); 1043 DCHECK_EQ(dst.Is64Bits(), Is64BitType(type)); 1044 1045 Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); 1046 __ Ldar(temp, base); 1047 MaybeRecordImplicitNullCheck(instruction); 1048 __ Fmov(FPRegister(dst), temp); 1049 break; 1050 } 1051 case Primitive::kPrimVoid: 1052 LOG(FATAL) << "Unreachable type " << type; 1053 } 1054} 1055 1056void CodeGeneratorARM64::Store(Primitive::Type type, 1057 CPURegister src, 1058 const MemOperand& dst) { 1059 switch (type) { 1060 case Primitive::kPrimBoolean: 1061 case Primitive::kPrimByte: 1062 __ Strb(Register(src), dst); 1063 break; 1064 case Primitive::kPrimChar: 1065 case Primitive::kPrimShort: 1066 __ Strh(Register(src), dst); 1067 break; 1068 case Primitive::kPrimInt: 1069 case Primitive::kPrimNot: 1070 case Primitive::kPrimLong: 1071 case Primitive::kPrimFloat: 1072 case Primitive::kPrimDouble: 1073 DCHECK_EQ(src.Is64Bits(), Is64BitType(type)); 1074 __ Str(src, dst); 1075 break; 1076 case Primitive::kPrimVoid: 1077 LOG(FATAL) << "Unreachable type " << type; 1078 } 1079} 1080 1081void CodeGeneratorARM64::StoreRelease(Primitive::Type type, 1082 CPURegister src, 1083 const MemOperand& dst) { 1084 UseScratchRegisterScope temps(GetVIXLAssembler()); 1085 Register temp_base = temps.AcquireX(); 1086 1087 DCHECK(!dst.IsRegisterOffset()); 1088 DCHECK(!dst.IsPreIndex()); 1089 DCHECK(!dst.IsPostIndex()); 1090 1091 // TODO(vixl): Let the MacroAssembler handle this. 1092 __ Add(temp_base, dst.base(), dst.offset()); 1093 MemOperand base = MemOperand(temp_base); 1094 switch (type) { 1095 case Primitive::kPrimBoolean: 1096 case Primitive::kPrimByte: 1097 __ Stlrb(Register(src), base); 1098 break; 1099 case Primitive::kPrimChar: 1100 case Primitive::kPrimShort: 1101 __ Stlrh(Register(src), base); 1102 break; 1103 case Primitive::kPrimInt: 1104 case Primitive::kPrimNot: 1105 case Primitive::kPrimLong: 1106 DCHECK_EQ(src.Is64Bits(), Is64BitType(type)); 1107 __ Stlr(Register(src), base); 1108 break; 1109 case Primitive::kPrimFloat: 1110 case Primitive::kPrimDouble: { 1111 DCHECK(src.IsFPRegister()); 1112 DCHECK_EQ(src.Is64Bits(), Is64BitType(type)); 1113 1114 Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); 1115 __ Fmov(temp, FPRegister(src)); 1116 __ Stlr(temp, base); 1117 break; 1118 } 1119 case Primitive::kPrimVoid: 1120 LOG(FATAL) << "Unreachable type " << type; 1121 } 1122} 1123 1124void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) { 1125 DCHECK(current_method.IsW()); 1126 __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset)); 1127} 1128 1129void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset, 1130 HInstruction* instruction, 1131 uint32_t dex_pc) { 1132 __ Ldr(lr, MemOperand(tr, entry_point_offset)); 1133 __ Blr(lr); 1134 if (instruction != nullptr) { 1135 RecordPcInfo(instruction, dex_pc); 1136 DCHECK(instruction->IsSuspendCheck() 1137 || instruction->IsBoundsCheck() 1138 || instruction->IsNullCheck() 1139 || instruction->IsDivZeroCheck() 1140 || !IsLeafMethod()); 1141 } 1142} 1143 1144void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, 1145 vixl::Register class_reg) { 1146 UseScratchRegisterScope temps(GetVIXLAssembler()); 1147 Register temp = temps.AcquireW(); 1148 size_t status_offset = mirror::Class::StatusOffset().SizeValue(); 1149 1150 // Even if the initialized flag is set, we need to ensure consistent memory ordering. 1151 if (kUseAcquireRelease) { 1152 // TODO(vixl): Let the MacroAssembler handle MemOperand. 1153 __ Add(temp, class_reg, status_offset); 1154 __ Ldar(temp, HeapOperand(temp)); 1155 __ Cmp(temp, mirror::Class::kStatusInitialized); 1156 __ B(lt, slow_path->GetEntryLabel()); 1157 } else { 1158 __ Ldr(temp, HeapOperand(class_reg, status_offset)); 1159 __ Cmp(temp, mirror::Class::kStatusInitialized); 1160 __ B(lt, slow_path->GetEntryLabel()); 1161 __ Dmb(InnerShareable, BarrierReads); 1162 } 1163 __ Bind(slow_path->GetExitLabel()); 1164} 1165 1166void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) { 1167 BarrierType type = BarrierAll; 1168 1169 switch (kind) { 1170 case MemBarrierKind::kAnyAny: 1171 case MemBarrierKind::kAnyStore: { 1172 type = BarrierAll; 1173 break; 1174 } 1175 case MemBarrierKind::kLoadAny: { 1176 type = BarrierReads; 1177 break; 1178 } 1179 case MemBarrierKind::kStoreStore: { 1180 type = BarrierWrites; 1181 break; 1182 } 1183 default: 1184 LOG(FATAL) << "Unexpected memory barrier " << kind; 1185 } 1186 __ Dmb(InnerShareable, type); 1187} 1188 1189void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction, 1190 HBasicBlock* successor) { 1191 SuspendCheckSlowPathARM64* slow_path = 1192 new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor); 1193 codegen_->AddSlowPath(slow_path); 1194 UseScratchRegisterScope temps(codegen_->GetVIXLAssembler()); 1195 Register temp = temps.AcquireW(); 1196 1197 __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue())); 1198 if (successor == nullptr) { 1199 __ Cbnz(temp, slow_path->GetEntryLabel()); 1200 __ Bind(slow_path->GetReturnLabel()); 1201 } else { 1202 __ Cbz(temp, codegen_->GetLabelOf(successor)); 1203 __ B(slow_path->GetEntryLabel()); 1204 // slow_path will return to GetLabelOf(successor). 1205 } 1206} 1207 1208InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph, 1209 CodeGeneratorARM64* codegen) 1210 : HGraphVisitor(graph), 1211 assembler_(codegen->GetAssembler()), 1212 codegen_(codegen) {} 1213 1214#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \ 1215 /* No unimplemented IR. */ 1216 1217#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode 1218 1219enum UnimplementedInstructionBreakCode { 1220 // Using a base helps identify when we hit such breakpoints. 1221 UnimplementedInstructionBreakCodeBaseCode = 0x900, 1222#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name), 1223 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION) 1224#undef ENUM_UNIMPLEMENTED_INSTRUCTION 1225}; 1226 1227#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \ 1228 void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \ 1229 UNUSED(instr); \ 1230 __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \ 1231 } \ 1232 void LocationsBuilderARM64::Visit##name(H##name* instr) { \ 1233 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \ 1234 locations->SetOut(Location::Any()); \ 1235 } 1236 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS) 1237#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS 1238 1239#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE 1240#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION 1241 1242void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) { 1243 DCHECK_EQ(instr->InputCount(), 2U); 1244 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1245 Primitive::Type type = instr->GetResultType(); 1246 switch (type) { 1247 case Primitive::kPrimInt: 1248 case Primitive::kPrimLong: 1249 locations->SetInAt(0, Location::RequiresRegister()); 1250 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); 1251 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1252 break; 1253 1254 case Primitive::kPrimFloat: 1255 case Primitive::kPrimDouble: 1256 locations->SetInAt(0, Location::RequiresFpuRegister()); 1257 locations->SetInAt(1, Location::RequiresFpuRegister()); 1258 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1259 break; 1260 1261 default: 1262 LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type; 1263 } 1264} 1265 1266void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { 1267 Primitive::Type type = instr->GetType(); 1268 1269 switch (type) { 1270 case Primitive::kPrimInt: 1271 case Primitive::kPrimLong: { 1272 Register dst = OutputRegister(instr); 1273 Register lhs = InputRegisterAt(instr, 0); 1274 Operand rhs = InputOperandAt(instr, 1); 1275 if (instr->IsAdd()) { 1276 __ Add(dst, lhs, rhs); 1277 } else if (instr->IsAnd()) { 1278 __ And(dst, lhs, rhs); 1279 } else if (instr->IsOr()) { 1280 __ Orr(dst, lhs, rhs); 1281 } else if (instr->IsSub()) { 1282 __ Sub(dst, lhs, rhs); 1283 } else { 1284 DCHECK(instr->IsXor()); 1285 __ Eor(dst, lhs, rhs); 1286 } 1287 break; 1288 } 1289 case Primitive::kPrimFloat: 1290 case Primitive::kPrimDouble: { 1291 FPRegister dst = OutputFPRegister(instr); 1292 FPRegister lhs = InputFPRegisterAt(instr, 0); 1293 FPRegister rhs = InputFPRegisterAt(instr, 1); 1294 if (instr->IsAdd()) { 1295 __ Fadd(dst, lhs, rhs); 1296 } else if (instr->IsSub()) { 1297 __ Fsub(dst, lhs, rhs); 1298 } else { 1299 LOG(FATAL) << "Unexpected floating-point binary operation"; 1300 } 1301 break; 1302 } 1303 default: 1304 LOG(FATAL) << "Unexpected binary operation type " << type; 1305 } 1306} 1307 1308void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { 1309 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1310 1311 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1312 Primitive::Type type = instr->GetResultType(); 1313 switch (type) { 1314 case Primitive::kPrimInt: 1315 case Primitive::kPrimLong: { 1316 locations->SetInAt(0, Location::RequiresRegister()); 1317 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); 1318 locations->SetOut(Location::RequiresRegister()); 1319 break; 1320 } 1321 default: 1322 LOG(FATAL) << "Unexpected shift type " << type; 1323 } 1324} 1325 1326void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) { 1327 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1328 1329 Primitive::Type type = instr->GetType(); 1330 switch (type) { 1331 case Primitive::kPrimInt: 1332 case Primitive::kPrimLong: { 1333 Register dst = OutputRegister(instr); 1334 Register lhs = InputRegisterAt(instr, 0); 1335 Operand rhs = InputOperandAt(instr, 1); 1336 if (rhs.IsImmediate()) { 1337 uint32_t shift_value = (type == Primitive::kPrimInt) 1338 ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue) 1339 : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue); 1340 if (instr->IsShl()) { 1341 __ Lsl(dst, lhs, shift_value); 1342 } else if (instr->IsShr()) { 1343 __ Asr(dst, lhs, shift_value); 1344 } else { 1345 __ Lsr(dst, lhs, shift_value); 1346 } 1347 } else { 1348 Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W(); 1349 1350 if (instr->IsShl()) { 1351 __ Lsl(dst, lhs, rhs_reg); 1352 } else if (instr->IsShr()) { 1353 __ Asr(dst, lhs, rhs_reg); 1354 } else { 1355 __ Lsr(dst, lhs, rhs_reg); 1356 } 1357 } 1358 break; 1359 } 1360 default: 1361 LOG(FATAL) << "Unexpected shift operation type " << type; 1362 } 1363} 1364 1365void LocationsBuilderARM64::VisitAdd(HAdd* instruction) { 1366 HandleBinaryOp(instruction); 1367} 1368 1369void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) { 1370 HandleBinaryOp(instruction); 1371} 1372 1373void LocationsBuilderARM64::VisitAnd(HAnd* instruction) { 1374 HandleBinaryOp(instruction); 1375} 1376 1377void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { 1378 HandleBinaryOp(instruction); 1379} 1380 1381void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { 1382 LocationSummary* locations = 1383 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1384 locations->SetInAt(0, Location::RequiresRegister()); 1385 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1386 locations->SetOut(Location::RequiresRegister()); 1387} 1388 1389void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { 1390 LocationSummary* locations = instruction->GetLocations(); 1391 Primitive::Type type = instruction->GetType(); 1392 Register obj = InputRegisterAt(instruction, 0); 1393 Location index = locations->InAt(1); 1394 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value(); 1395 MemOperand source = HeapOperand(obj); 1396 UseScratchRegisterScope temps(GetVIXLAssembler()); 1397 1398 if (index.IsConstant()) { 1399 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type); 1400 source = HeapOperand(obj, offset); 1401 } else { 1402 Register temp = temps.AcquireSameSizeAs(obj); 1403 Register index_reg = RegisterFrom(index, Primitive::kPrimInt); 1404 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type))); 1405 source = HeapOperand(temp, offset); 1406 } 1407 1408 codegen_->Load(type, OutputCPURegister(instruction), source); 1409 codegen_->MaybeRecordImplicitNullCheck(instruction); 1410} 1411 1412void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) { 1413 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1414 locations->SetInAt(0, Location::RequiresRegister()); 1415 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1416} 1417 1418void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) { 1419 __ Ldr(OutputRegister(instruction), 1420 HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset())); 1421 codegen_->MaybeRecordImplicitNullCheck(instruction); 1422} 1423 1424void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { 1425 Primitive::Type value_type = instruction->GetComponentType(); 1426 bool is_object = value_type == Primitive::kPrimNot; 1427 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1428 instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall); 1429 if (is_object) { 1430 InvokeRuntimeCallingConvention calling_convention; 1431 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 1432 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); 1433 locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); 1434 } else { 1435 locations->SetInAt(0, Location::RequiresRegister()); 1436 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1437 locations->SetInAt(2, Location::RequiresRegister()); 1438 } 1439} 1440 1441void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { 1442 Primitive::Type value_type = instruction->GetComponentType(); 1443 if (value_type == Primitive::kPrimNot) { 1444 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc()); 1445 CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>(); 1446 } else { 1447 LocationSummary* locations = instruction->GetLocations(); 1448 Register obj = InputRegisterAt(instruction, 0); 1449 CPURegister value = InputCPURegisterAt(instruction, 2); 1450 Location index = locations->InAt(1); 1451 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value(); 1452 MemOperand destination = HeapOperand(obj); 1453 UseScratchRegisterScope temps(GetVIXLAssembler()); 1454 1455 if (index.IsConstant()) { 1456 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type); 1457 destination = HeapOperand(obj, offset); 1458 } else { 1459 Register temp = temps.AcquireSameSizeAs(obj); 1460 Register index_reg = InputRegisterAt(instruction, 1); 1461 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type))); 1462 destination = HeapOperand(temp, offset); 1463 } 1464 1465 codegen_->Store(value_type, value, destination); 1466 codegen_->MaybeRecordImplicitNullCheck(instruction); 1467 } 1468} 1469 1470void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1471 LocationSummary* locations = 1472 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1473 locations->SetInAt(0, Location::RequiresRegister()); 1474 locations->SetInAt(1, Location::RequiresRegister()); 1475 if (instruction->HasUses()) { 1476 locations->SetOut(Location::SameAsFirstInput()); 1477 } 1478} 1479 1480void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1481 LocationSummary* locations = instruction->GetLocations(); 1482 BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64( 1483 instruction, locations->InAt(0), locations->InAt(1)); 1484 codegen_->AddSlowPath(slow_path); 1485 1486 __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1)); 1487 __ B(slow_path->GetEntryLabel(), hs); 1488} 1489 1490void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { 1491 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1492 instruction, LocationSummary::kCallOnSlowPath); 1493 locations->SetInAt(0, Location::RequiresRegister()); 1494 locations->SetInAt(1, Location::RequiresRegister()); 1495 locations->AddTemp(Location::RequiresRegister()); 1496} 1497 1498void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { 1499 LocationSummary* locations = instruction->GetLocations(); 1500 Register obj = InputRegisterAt(instruction, 0);; 1501 Register cls = InputRegisterAt(instruction, 1);; 1502 Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0)); 1503 1504 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1505 instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc()); 1506 codegen_->AddSlowPath(slow_path); 1507 1508 // TODO: avoid this check if we know obj is not null. 1509 __ Cbz(obj, slow_path->GetExitLabel()); 1510 // Compare the class of `obj` with `cls`. 1511 __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset())); 1512 __ Cmp(obj_cls, cls); 1513 __ B(ne, slow_path->GetEntryLabel()); 1514 __ Bind(slow_path->GetExitLabel()); 1515} 1516 1517void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { 1518 LocationSummary* locations = 1519 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); 1520 locations->SetInAt(0, Location::RequiresRegister()); 1521 if (check->HasUses()) { 1522 locations->SetOut(Location::SameAsFirstInput()); 1523 } 1524} 1525 1526void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) { 1527 // We assume the class is not null. 1528 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 1529 check->GetLoadClass(), check, check->GetDexPc(), true); 1530 codegen_->AddSlowPath(slow_path); 1531 GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); 1532} 1533 1534void LocationsBuilderARM64::VisitCompare(HCompare* compare) { 1535 LocationSummary* locations = 1536 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); 1537 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1538 switch (in_type) { 1539 case Primitive::kPrimLong: { 1540 locations->SetInAt(0, Location::RequiresRegister()); 1541 locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1))); 1542 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1543 break; 1544 } 1545 case Primitive::kPrimFloat: 1546 case Primitive::kPrimDouble: { 1547 locations->SetInAt(0, Location::RequiresFpuRegister()); 1548 locations->SetInAt(1, Location::RequiresFpuRegister()); 1549 locations->SetOut(Location::RequiresRegister()); 1550 break; 1551 } 1552 default: 1553 LOG(FATAL) << "Unexpected type for compare operation " << in_type; 1554 } 1555} 1556 1557void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { 1558 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1559 1560 // 0 if: left == right 1561 // 1 if: left > right 1562 // -1 if: left < right 1563 switch (in_type) { 1564 case Primitive::kPrimLong: { 1565 Register result = OutputRegister(compare); 1566 Register left = InputRegisterAt(compare, 0); 1567 Operand right = InputOperandAt(compare, 1); 1568 1569 __ Cmp(left, right); 1570 __ Cset(result, ne); 1571 __ Cneg(result, result, lt); 1572 break; 1573 } 1574 case Primitive::kPrimFloat: 1575 case Primitive::kPrimDouble: { 1576 Register result = OutputRegister(compare); 1577 FPRegister left = InputFPRegisterAt(compare, 0); 1578 FPRegister right = InputFPRegisterAt(compare, 1); 1579 1580 __ Fcmp(left, right); 1581 if (compare->IsGtBias()) { 1582 __ Cset(result, ne); 1583 } else { 1584 __ Csetm(result, ne); 1585 } 1586 __ Cneg(result, result, compare->IsGtBias() ? mi : gt); 1587 break; 1588 } 1589 default: 1590 LOG(FATAL) << "Unimplemented compare type " << in_type; 1591 } 1592} 1593 1594void LocationsBuilderARM64::VisitCondition(HCondition* instruction) { 1595 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1596 locations->SetInAt(0, Location::RequiresRegister()); 1597 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1598 if (instruction->NeedsMaterialization()) { 1599 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1600 } 1601} 1602 1603void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) { 1604 if (!instruction->NeedsMaterialization()) { 1605 return; 1606 } 1607 1608 LocationSummary* locations = instruction->GetLocations(); 1609 Register lhs = InputRegisterAt(instruction, 0); 1610 Operand rhs = InputOperandAt(instruction, 1); 1611 Register res = RegisterFrom(locations->Out(), instruction->GetType()); 1612 Condition cond = ARM64Condition(instruction->GetCondition()); 1613 1614 __ Cmp(lhs, rhs); 1615 __ Cset(res, cond); 1616} 1617 1618#define FOR_EACH_CONDITION_INSTRUCTION(M) \ 1619 M(Equal) \ 1620 M(NotEqual) \ 1621 M(LessThan) \ 1622 M(LessThanOrEqual) \ 1623 M(GreaterThan) \ 1624 M(GreaterThanOrEqual) 1625#define DEFINE_CONDITION_VISITORS(Name) \ 1626void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \ 1627void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } 1628FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS) 1629#undef DEFINE_CONDITION_VISITORS 1630#undef FOR_EACH_CONDITION_INSTRUCTION 1631 1632void LocationsBuilderARM64::VisitDiv(HDiv* div) { 1633 LocationSummary* locations = 1634 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); 1635 switch (div->GetResultType()) { 1636 case Primitive::kPrimInt: 1637 case Primitive::kPrimLong: 1638 locations->SetInAt(0, Location::RequiresRegister()); 1639 locations->SetInAt(1, Location::RequiresRegister()); 1640 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1641 break; 1642 1643 case Primitive::kPrimFloat: 1644 case Primitive::kPrimDouble: 1645 locations->SetInAt(0, Location::RequiresFpuRegister()); 1646 locations->SetInAt(1, Location::RequiresFpuRegister()); 1647 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1648 break; 1649 1650 default: 1651 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 1652 } 1653} 1654 1655void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) { 1656 Primitive::Type type = div->GetResultType(); 1657 switch (type) { 1658 case Primitive::kPrimInt: 1659 case Primitive::kPrimLong: 1660 __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1)); 1661 break; 1662 1663 case Primitive::kPrimFloat: 1664 case Primitive::kPrimDouble: 1665 __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1)); 1666 break; 1667 1668 default: 1669 LOG(FATAL) << "Unexpected div type " << type; 1670 } 1671} 1672 1673void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1674 LocationSummary* locations = 1675 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1676 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); 1677 if (instruction->HasUses()) { 1678 locations->SetOut(Location::SameAsFirstInput()); 1679 } 1680} 1681 1682void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1683 SlowPathCodeARM64* slow_path = 1684 new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction); 1685 codegen_->AddSlowPath(slow_path); 1686 Location value = instruction->GetLocations()->InAt(0); 1687 1688 Primitive::Type type = instruction->GetType(); 1689 1690 if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) { 1691 LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck."; 1692 return; 1693 } 1694 1695 if (value.IsConstant()) { 1696 int64_t divisor = Int64ConstantFrom(value); 1697 if (divisor == 0) { 1698 __ B(slow_path->GetEntryLabel()); 1699 } else { 1700 // A division by a non-null constant is valid. We don't need to perform 1701 // any check, so simply fall through. 1702 } 1703 } else { 1704 __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); 1705 } 1706} 1707 1708void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1709 LocationSummary* locations = 1710 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1711 locations->SetOut(Location::ConstantLocation(constant)); 1712} 1713 1714void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1715 UNUSED(constant); 1716 // Will be generated at use site. 1717} 1718 1719void LocationsBuilderARM64::VisitExit(HExit* exit) { 1720 exit->SetLocations(nullptr); 1721} 1722 1723void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) { 1724 UNUSED(exit); 1725 if (kIsDebugBuild) { 1726 down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable"); 1727 __ Brk(__LINE__); // TODO: Introduce special markers for such code locations. 1728 } 1729} 1730 1731void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) { 1732 LocationSummary* locations = 1733 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1734 locations->SetOut(Location::ConstantLocation(constant)); 1735} 1736 1737void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) { 1738 UNUSED(constant); 1739 // Will be generated at use site. 1740} 1741 1742void LocationsBuilderARM64::VisitGoto(HGoto* got) { 1743 got->SetLocations(nullptr); 1744} 1745 1746void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) { 1747 HBasicBlock* successor = got->GetSuccessor(); 1748 DCHECK(!successor->IsExitBlock()); 1749 HBasicBlock* block = got->GetBlock(); 1750 HInstruction* previous = got->GetPrevious(); 1751 HLoopInformation* info = block->GetLoopInformation(); 1752 1753 if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) { 1754 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck()); 1755 GenerateSuspendCheck(info->GetSuspendCheck(), successor); 1756 return; 1757 } 1758 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { 1759 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); 1760 } 1761 if (!codegen_->GoesToNextBlock(block, successor)) { 1762 __ B(codegen_->GetLabelOf(successor)); 1763 } 1764} 1765 1766void LocationsBuilderARM64::VisitIf(HIf* if_instr) { 1767 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); 1768 HInstruction* cond = if_instr->InputAt(0); 1769 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 1770 locations->SetInAt(0, Location::RequiresRegister()); 1771 } 1772} 1773 1774void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { 1775 HInstruction* cond = if_instr->InputAt(0); 1776 HCondition* condition = cond->AsCondition(); 1777 vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor()); 1778 vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); 1779 1780 if (cond->IsIntConstant()) { 1781 int32_t cond_value = cond->AsIntConstant()->GetValue(); 1782 if (cond_value == 1) { 1783 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) { 1784 __ B(true_target); 1785 } 1786 return; 1787 } else { 1788 DCHECK_EQ(cond_value, 0); 1789 } 1790 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) { 1791 // The condition instruction has been materialized, compare the output to 0. 1792 Location cond_val = if_instr->GetLocations()->InAt(0); 1793 DCHECK(cond_val.IsRegister()); 1794 __ Cbnz(InputRegisterAt(if_instr, 0), true_target); 1795 } else { 1796 // The condition instruction has not been materialized, use its inputs as 1797 // the comparison and its condition as the branch condition. 1798 Register lhs = InputRegisterAt(condition, 0); 1799 Operand rhs = InputOperandAt(condition, 1); 1800 Condition arm64_cond = ARM64Condition(condition->GetCondition()); 1801 if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) { 1802 if (arm64_cond == eq) { 1803 __ Cbz(lhs, true_target); 1804 } else { 1805 __ Cbnz(lhs, true_target); 1806 } 1807 } else { 1808 __ Cmp(lhs, rhs); 1809 __ B(arm64_cond, true_target); 1810 } 1811 } 1812 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) { 1813 __ B(false_target); 1814 } 1815} 1816 1817void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1818 LocationSummary* locations = 1819 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1820 locations->SetInAt(0, Location::RequiresRegister()); 1821 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1822} 1823 1824void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1825 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 1826 1827 if (instruction->IsVolatile()) { 1828 if (kUseAcquireRelease) { 1829 // NB: LoadAcquire will record the pc info if needed. 1830 codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field); 1831 } else { 1832 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 1833 codegen_->MaybeRecordImplicitNullCheck(instruction); 1834 // For IRIW sequential consistency kLoadAny is not sufficient. 1835 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 1836 } 1837 } else { 1838 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 1839 codegen_->MaybeRecordImplicitNullCheck(instruction); 1840 } 1841} 1842 1843void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1844 LocationSummary* locations = 1845 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1846 locations->SetInAt(0, Location::RequiresRegister()); 1847 locations->SetInAt(1, Location::RequiresRegister()); 1848} 1849 1850void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1851 Register obj = InputRegisterAt(instruction, 0); 1852 CPURegister value = InputCPURegisterAt(instruction, 1); 1853 Offset offset = instruction->GetFieldOffset(); 1854 Primitive::Type field_type = instruction->GetFieldType(); 1855 1856 if (instruction->IsVolatile()) { 1857 if (kUseAcquireRelease) { 1858 codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset)); 1859 codegen_->MaybeRecordImplicitNullCheck(instruction); 1860 } else { 1861 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 1862 codegen_->Store(field_type, value, HeapOperand(obj, offset)); 1863 codegen_->MaybeRecordImplicitNullCheck(instruction); 1864 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 1865 } 1866 } else { 1867 codegen_->Store(field_type, value, HeapOperand(obj, offset)); 1868 codegen_->MaybeRecordImplicitNullCheck(instruction); 1869 } 1870 1871 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 1872 codegen_->MarkGCCard(obj, Register(value)); 1873 } 1874} 1875 1876void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { 1877 LocationSummary::CallKind call_kind = 1878 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; 1879 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); 1880 locations->SetInAt(0, Location::RequiresRegister()); 1881 locations->SetInAt(1, Location::RequiresRegister()); 1882 locations->SetOut(Location::RequiresRegister(), true); // The output does overlap inputs. 1883} 1884 1885void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { 1886 LocationSummary* locations = instruction->GetLocations(); 1887 Register obj = InputRegisterAt(instruction, 0);; 1888 Register cls = InputRegisterAt(instruction, 1);; 1889 Register out = OutputRegister(instruction); 1890 1891 vixl::Label done; 1892 1893 // Return 0 if `obj` is null. 1894 // TODO: Avoid this check if we know `obj` is not null. 1895 __ Mov(out, 0); 1896 __ Cbz(obj, &done); 1897 1898 // Compare the class of `obj` with `cls`. 1899 __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset())); 1900 __ Cmp(out, cls); 1901 if (instruction->IsClassFinal()) { 1902 // Classes must be equal for the instanceof to succeed. 1903 __ Cset(out, eq); 1904 } else { 1905 // If the classes are not equal, we go into a slow path. 1906 DCHECK(locations->OnlyCallsOnSlowPath()); 1907 SlowPathCodeARM64* slow_path = 1908 new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1909 instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); 1910 codegen_->AddSlowPath(slow_path); 1911 __ B(ne, slow_path->GetEntryLabel()); 1912 __ Mov(out, 1); 1913 __ Bind(slow_path->GetExitLabel()); 1914 } 1915 1916 __ Bind(&done); 1917} 1918 1919void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) { 1920 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 1921 locations->SetOut(Location::ConstantLocation(constant)); 1922} 1923 1924void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) { 1925 // Will be generated at use site. 1926 UNUSED(constant); 1927} 1928 1929void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) { 1930 LocationSummary* locations = 1931 new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); 1932 locations->AddTemp(LocationFrom(x0)); 1933 1934 InvokeDexCallingConventionVisitor calling_convention_visitor; 1935 for (size_t i = 0; i < invoke->InputCount(); i++) { 1936 HInstruction* input = invoke->InputAt(i); 1937 locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType())); 1938 } 1939 1940 Primitive::Type return_type = invoke->GetType(); 1941 if (return_type != Primitive::kPrimVoid) { 1942 locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type)); 1943 } 1944} 1945 1946void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1947 HandleInvoke(invoke); 1948} 1949 1950void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1951 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. 1952 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1953 uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + 1954 (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); 1955 Location receiver = invoke->GetLocations()->InAt(0); 1956 Offset class_offset = mirror::Object::ClassOffset(); 1957 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 1958 1959 // The register ip1 is required to be used for the hidden argument in 1960 // art_quick_imt_conflict_trampoline, so prevent VIXL from using it. 1961 UseScratchRegisterScope scratch_scope(GetVIXLAssembler()); 1962 scratch_scope.Exclude(ip1); 1963 __ Mov(ip1, invoke->GetDexMethodIndex()); 1964 1965 // temp = object->GetClass(); 1966 if (receiver.IsStackSlot()) { 1967 __ Ldr(temp, StackOperandFrom(receiver)); 1968 __ Ldr(temp, HeapOperand(temp, class_offset)); 1969 } else { 1970 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 1971 } 1972 codegen_->MaybeRecordImplicitNullCheck(invoke); 1973 // temp = temp->GetImtEntryAt(method_offset); 1974 __ Ldr(temp, HeapOperand(temp, method_offset)); 1975 // lr = temp->GetEntryPoint(); 1976 __ Ldr(lr, HeapOperand(temp, entry_point)); 1977 // lr(); 1978 __ Blr(lr); 1979 DCHECK(!codegen_->IsLeafMethod()); 1980 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1981} 1982 1983void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1984 HandleInvoke(invoke); 1985} 1986 1987void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1988 HandleInvoke(invoke); 1989} 1990 1991void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1992 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1993 // Make sure that ArtMethod* is passed in W0 as per the calling convention 1994 DCHECK(temp.Is(w0)); 1995 size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() + 1996 invoke->GetDexMethodIndex() * kHeapRefSize; 1997 1998 // TODO: Implement all kinds of calls: 1999 // 1) boot -> boot 2000 // 2) app -> boot 2001 // 3) app -> app 2002 // 2003 // Currently we implement the app -> app logic, which looks up in the resolve cache. 2004 2005 // temp = method; 2006 codegen_->LoadCurrentMethod(temp); 2007 // temp = temp->dex_cache_resolved_methods_; 2008 __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset())); 2009 // temp = temp[index_in_cache]; 2010 __ Ldr(temp, HeapOperand(temp, index_in_cache)); 2011 // lr = temp->entry_point_from_quick_compiled_code_; 2012 __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 2013 kArm64WordSize))); 2014 // lr(); 2015 __ Blr(lr); 2016 2017 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 2018 DCHECK(!codegen_->IsLeafMethod()); 2019} 2020 2021void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 2022 LocationSummary* locations = invoke->GetLocations(); 2023 Location receiver = locations->InAt(0); 2024 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 2025 size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + 2026 invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); 2027 Offset class_offset = mirror::Object::ClassOffset(); 2028 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 2029 2030 // temp = object->GetClass(); 2031 if (receiver.IsStackSlot()) { 2032 __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex())); 2033 __ Ldr(temp, HeapOperand(temp, class_offset)); 2034 } else { 2035 DCHECK(receiver.IsRegister()); 2036 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 2037 } 2038 codegen_->MaybeRecordImplicitNullCheck(invoke); 2039 // temp = temp->GetMethodAt(method_offset); 2040 __ Ldr(temp, HeapOperand(temp, method_offset)); 2041 // lr = temp->GetEntryPoint(); 2042 __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue())); 2043 // lr(); 2044 __ Blr(lr); 2045 DCHECK(!codegen_->IsLeafMethod()); 2046 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 2047} 2048 2049void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { 2050 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath 2051 : LocationSummary::kNoCall; 2052 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); 2053 locations->SetOut(Location::RequiresRegister()); 2054} 2055 2056void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { 2057 Register out = OutputRegister(cls); 2058 if (cls->IsReferrersClass()) { 2059 DCHECK(!cls->CanCallRuntime()); 2060 DCHECK(!cls->MustGenerateClinitCheck()); 2061 codegen_->LoadCurrentMethod(out); 2062 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 2063 } else { 2064 DCHECK(cls->CanCallRuntime()); 2065 codegen_->LoadCurrentMethod(out); 2066 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset())); 2067 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); 2068 2069 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 2070 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); 2071 codegen_->AddSlowPath(slow_path); 2072 __ Cbz(out, slow_path->GetEntryLabel()); 2073 if (cls->MustGenerateClinitCheck()) { 2074 GenerateClassInitializationCheck(slow_path, out); 2075 } else { 2076 __ Bind(slow_path->GetExitLabel()); 2077 } 2078 } 2079} 2080 2081void LocationsBuilderARM64::VisitLoadException(HLoadException* load) { 2082 LocationSummary* locations = 2083 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); 2084 locations->SetOut(Location::RequiresRegister()); 2085} 2086 2087void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) { 2088 MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value()); 2089 __ Ldr(OutputRegister(instruction), exception); 2090 __ Str(wzr, exception); 2091} 2092 2093void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) { 2094 load->SetLocations(nullptr); 2095} 2096 2097void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) { 2098 // Nothing to do, this is driven by the code generator. 2099 UNUSED(load); 2100} 2101 2102void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { 2103 LocationSummary* locations = 2104 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath); 2105 locations->SetOut(Location::RequiresRegister()); 2106} 2107 2108void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { 2109 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load); 2110 codegen_->AddSlowPath(slow_path); 2111 2112 Register out = OutputRegister(load); 2113 codegen_->LoadCurrentMethod(out); 2114 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 2115 __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset())); 2116 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); 2117 __ Cbz(out, slow_path->GetEntryLabel()); 2118 __ Bind(slow_path->GetExitLabel()); 2119} 2120 2121void LocationsBuilderARM64::VisitLocal(HLocal* local) { 2122 local->SetLocations(nullptr); 2123} 2124 2125void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) { 2126 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock()); 2127} 2128 2129void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { 2130 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 2131 locations->SetOut(Location::ConstantLocation(constant)); 2132} 2133 2134void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) { 2135 // Will be generated at use site. 2136 UNUSED(constant); 2137} 2138 2139void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 2140 LocationSummary* locations = 2141 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2142 InvokeRuntimeCallingConvention calling_convention; 2143 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 2144} 2145 2146void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 2147 codegen_->InvokeRuntime(instruction->IsEnter() 2148 ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject), 2149 instruction, 2150 instruction->GetDexPc()); 2151 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>(); 2152} 2153 2154void LocationsBuilderARM64::VisitMul(HMul* mul) { 2155 LocationSummary* locations = 2156 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); 2157 switch (mul->GetResultType()) { 2158 case Primitive::kPrimInt: 2159 case Primitive::kPrimLong: 2160 locations->SetInAt(0, Location::RequiresRegister()); 2161 locations->SetInAt(1, Location::RequiresRegister()); 2162 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2163 break; 2164 2165 case Primitive::kPrimFloat: 2166 case Primitive::kPrimDouble: 2167 locations->SetInAt(0, Location::RequiresFpuRegister()); 2168 locations->SetInAt(1, Location::RequiresFpuRegister()); 2169 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2170 break; 2171 2172 default: 2173 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2174 } 2175} 2176 2177void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) { 2178 switch (mul->GetResultType()) { 2179 case Primitive::kPrimInt: 2180 case Primitive::kPrimLong: 2181 __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1)); 2182 break; 2183 2184 case Primitive::kPrimFloat: 2185 case Primitive::kPrimDouble: 2186 __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1)); 2187 break; 2188 2189 default: 2190 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2191 } 2192} 2193 2194void LocationsBuilderARM64::VisitNeg(HNeg* neg) { 2195 LocationSummary* locations = 2196 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); 2197 switch (neg->GetResultType()) { 2198 case Primitive::kPrimInt: 2199 case Primitive::kPrimLong: 2200 locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0))); 2201 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2202 break; 2203 2204 case Primitive::kPrimFloat: 2205 case Primitive::kPrimDouble: 2206 locations->SetInAt(0, Location::RequiresFpuRegister()); 2207 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2208 break; 2209 2210 default: 2211 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2212 } 2213} 2214 2215void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) { 2216 switch (neg->GetResultType()) { 2217 case Primitive::kPrimInt: 2218 case Primitive::kPrimLong: 2219 __ Neg(OutputRegister(neg), InputOperandAt(neg, 0)); 2220 break; 2221 2222 case Primitive::kPrimFloat: 2223 case Primitive::kPrimDouble: 2224 __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0)); 2225 break; 2226 2227 default: 2228 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2229 } 2230} 2231 2232void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) { 2233 LocationSummary* locations = 2234 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2235 InvokeRuntimeCallingConvention calling_convention; 2236 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2237 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2))); 2238 locations->SetOut(LocationFrom(x0)); 2239 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); 2240 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2241 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2242} 2243 2244void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { 2245 LocationSummary* locations = instruction->GetLocations(); 2246 InvokeRuntimeCallingConvention calling_convention; 2247 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2248 DCHECK(type_index.Is(w0)); 2249 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2250 DCHECK(current_method.Is(w2)); 2251 codegen_->LoadCurrentMethod(current_method); 2252 __ Mov(type_index, instruction->GetTypeIndex()); 2253 codegen_->InvokeRuntime( 2254 QUICK_ENTRY_POINT(pAllocArrayWithAccessCheck), instruction, instruction->GetDexPc()); 2255 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2256 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2257} 2258 2259void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { 2260 LocationSummary* locations = 2261 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2262 InvokeRuntimeCallingConvention calling_convention; 2263 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2264 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1))); 2265 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); 2266 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2267} 2268 2269void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { 2270 LocationSummary* locations = instruction->GetLocations(); 2271 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2272 DCHECK(type_index.Is(w0)); 2273 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2274 DCHECK(current_method.Is(w1)); 2275 codegen_->LoadCurrentMethod(current_method); 2276 __ Mov(type_index, instruction->GetTypeIndex()); 2277 codegen_->InvokeRuntime( 2278 QUICK_ENTRY_POINT(pAllocObjectWithAccessCheck), instruction, instruction->GetDexPc()); 2279 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2280} 2281 2282void LocationsBuilderARM64::VisitNot(HNot* instruction) { 2283 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2284 locations->SetInAt(0, Location::RequiresRegister()); 2285 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2286} 2287 2288void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) { 2289 switch (instruction->InputAt(0)->GetType()) { 2290 case Primitive::kPrimBoolean: 2291 __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), Operand(1)); 2292 break; 2293 2294 case Primitive::kPrimInt: 2295 case Primitive::kPrimLong: 2296 __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0)); 2297 break; 2298 2299 default: 2300 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType(); 2301 } 2302} 2303 2304void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) { 2305 LocationSummary* locations = 2306 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2307 locations->SetInAt(0, Location::RequiresRegister()); 2308 if (instruction->HasUses()) { 2309 locations->SetOut(Location::SameAsFirstInput()); 2310 } 2311} 2312 2313void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { 2314 if (codegen_->CanMoveNullCheckToUser(instruction)) { 2315 return; 2316 } 2317 Location obj = instruction->GetLocations()->InAt(0); 2318 2319 __ Ldr(wzr, HeapOperandFrom(obj, Offset(0))); 2320 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 2321} 2322 2323void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) { 2324 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction); 2325 codegen_->AddSlowPath(slow_path); 2326 2327 LocationSummary* locations = instruction->GetLocations(); 2328 Location obj = locations->InAt(0); 2329 2330 __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel()); 2331} 2332 2333void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) { 2334 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) { 2335 GenerateImplicitNullCheck(instruction); 2336 } else { 2337 GenerateExplicitNullCheck(instruction); 2338 } 2339} 2340 2341void LocationsBuilderARM64::VisitOr(HOr* instruction) { 2342 HandleBinaryOp(instruction); 2343} 2344 2345void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) { 2346 HandleBinaryOp(instruction); 2347} 2348 2349void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { 2350 LOG(FATAL) << "Unreachable"; 2351} 2352 2353void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) { 2354 codegen_->GetMoveResolver()->EmitNativeCode(instruction); 2355} 2356 2357void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) { 2358 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2359 Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); 2360 if (location.IsStackSlot()) { 2361 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2362 } else if (location.IsDoubleStackSlot()) { 2363 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2364 } 2365 locations->SetOut(location); 2366} 2367 2368void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) { 2369 // Nothing to do, the parameter is already at its location. 2370 UNUSED(instruction); 2371} 2372 2373void LocationsBuilderARM64::VisitPhi(HPhi* instruction) { 2374 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2375 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 2376 locations->SetInAt(i, Location::Any()); 2377 } 2378 locations->SetOut(Location::Any()); 2379} 2380 2381void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) { 2382 UNUSED(instruction); 2383 LOG(FATAL) << "Unreachable"; 2384} 2385 2386void LocationsBuilderARM64::VisitRem(HRem* rem) { 2387 Primitive::Type type = rem->GetResultType(); 2388 LocationSummary::CallKind call_kind = IsFPType(type) ? LocationSummary::kCall 2389 : LocationSummary::kNoCall; 2390 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); 2391 2392 switch (type) { 2393 case Primitive::kPrimInt: 2394 case Primitive::kPrimLong: 2395 locations->SetInAt(0, Location::RequiresRegister()); 2396 locations->SetInAt(1, Location::RequiresRegister()); 2397 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2398 break; 2399 2400 case Primitive::kPrimFloat: 2401 case Primitive::kPrimDouble: { 2402 InvokeRuntimeCallingConvention calling_convention; 2403 locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); 2404 locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1))); 2405 locations->SetOut(calling_convention.GetReturnLocation(type)); 2406 2407 break; 2408 } 2409 2410 default: 2411 LOG(FATAL) << "Unexpected rem type " << type; 2412 } 2413} 2414 2415void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) { 2416 Primitive::Type type = rem->GetResultType(); 2417 2418 switch (type) { 2419 case Primitive::kPrimInt: 2420 case Primitive::kPrimLong: { 2421 UseScratchRegisterScope temps(GetVIXLAssembler()); 2422 Register dividend = InputRegisterAt(rem, 0); 2423 Register divisor = InputRegisterAt(rem, 1); 2424 Register output = OutputRegister(rem); 2425 Register temp = temps.AcquireSameSizeAs(output); 2426 2427 __ Sdiv(temp, dividend, divisor); 2428 __ Msub(output, temp, divisor, dividend); 2429 break; 2430 } 2431 2432 case Primitive::kPrimFloat: 2433 case Primitive::kPrimDouble: { 2434 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf) 2435 : QUICK_ENTRY_POINT(pFmod); 2436 codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc()); 2437 break; 2438 } 2439 2440 default: 2441 LOG(FATAL) << "Unexpected rem type " << type; 2442 } 2443} 2444 2445void LocationsBuilderARM64::VisitReturn(HReturn* instruction) { 2446 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2447 Primitive::Type return_type = instruction->InputAt(0)->GetType(); 2448 locations->SetInAt(0, ARM64ReturnLocation(return_type)); 2449} 2450 2451void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) { 2452 UNUSED(instruction); 2453 codegen_->GenerateFrameExit(); 2454 __ Ret(); 2455} 2456 2457void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) { 2458 instruction->SetLocations(nullptr); 2459} 2460 2461void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) { 2462 UNUSED(instruction); 2463 codegen_->GenerateFrameExit(); 2464 __ Ret(); 2465} 2466 2467void LocationsBuilderARM64::VisitShl(HShl* shl) { 2468 HandleShift(shl); 2469} 2470 2471void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) { 2472 HandleShift(shl); 2473} 2474 2475void LocationsBuilderARM64::VisitShr(HShr* shr) { 2476 HandleShift(shr); 2477} 2478 2479void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) { 2480 HandleShift(shr); 2481} 2482 2483void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) { 2484 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store); 2485 Primitive::Type field_type = store->InputAt(1)->GetType(); 2486 switch (field_type) { 2487 case Primitive::kPrimNot: 2488 case Primitive::kPrimBoolean: 2489 case Primitive::kPrimByte: 2490 case Primitive::kPrimChar: 2491 case Primitive::kPrimShort: 2492 case Primitive::kPrimInt: 2493 case Primitive::kPrimFloat: 2494 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2495 break; 2496 2497 case Primitive::kPrimLong: 2498 case Primitive::kPrimDouble: 2499 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2500 break; 2501 2502 default: 2503 LOG(FATAL) << "Unimplemented local type " << field_type; 2504 } 2505} 2506 2507void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) { 2508 UNUSED(store); 2509} 2510 2511void LocationsBuilderARM64::VisitSub(HSub* instruction) { 2512 HandleBinaryOp(instruction); 2513} 2514 2515void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) { 2516 HandleBinaryOp(instruction); 2517} 2518 2519void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2520 LocationSummary* locations = 2521 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2522 locations->SetInAt(0, Location::RequiresRegister()); 2523 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2524} 2525 2526void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2527 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 2528 2529 if (instruction->IsVolatile()) { 2530 if (kUseAcquireRelease) { 2531 // NB: LoadAcquire will record the pc info if needed. 2532 codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field); 2533 } else { 2534 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 2535 // For IRIW sequential consistency kLoadAny is not sufficient. 2536 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 2537 } 2538 } else { 2539 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 2540 } 2541} 2542 2543void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2544 LocationSummary* locations = 2545 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2546 locations->SetInAt(0, Location::RequiresRegister()); 2547 locations->SetInAt(1, Location::RequiresRegister()); 2548} 2549 2550void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2551 Register cls = InputRegisterAt(instruction, 0); 2552 CPURegister value = InputCPURegisterAt(instruction, 1); 2553 Offset offset = instruction->GetFieldOffset(); 2554 Primitive::Type field_type = instruction->GetFieldType(); 2555 2556 if (instruction->IsVolatile()) { 2557 if (kUseAcquireRelease) { 2558 codegen_->StoreRelease(field_type, value, HeapOperand(cls, offset)); 2559 } else { 2560 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 2561 codegen_->Store(field_type, value, HeapOperand(cls, offset)); 2562 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 2563 } 2564 } else { 2565 codegen_->Store(field_type, value, HeapOperand(cls, offset)); 2566 } 2567 2568 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 2569 codegen_->MarkGCCard(cls, Register(value)); 2570 } 2571} 2572 2573void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2574 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); 2575} 2576 2577void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2578 HBasicBlock* block = instruction->GetBlock(); 2579 if (block->GetLoopInformation() != nullptr) { 2580 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); 2581 // The back edge will generate the suspend check. 2582 return; 2583 } 2584 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { 2585 // The goto will generate the suspend check. 2586 return; 2587 } 2588 GenerateSuspendCheck(instruction, nullptr); 2589} 2590 2591void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) { 2592 temp->SetLocations(nullptr); 2593} 2594 2595void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) { 2596 // Nothing to do, this is driven by the code generator. 2597 UNUSED(temp); 2598} 2599 2600void LocationsBuilderARM64::VisitThrow(HThrow* instruction) { 2601 LocationSummary* locations = 2602 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2603 InvokeRuntimeCallingConvention calling_convention; 2604 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 2605} 2606 2607void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) { 2608 codegen_->InvokeRuntime( 2609 QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc()); 2610 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>(); 2611} 2612 2613void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) { 2614 LocationSummary* locations = 2615 new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); 2616 Primitive::Type input_type = conversion->GetInputType(); 2617 Primitive::Type result_type = conversion->GetResultType(); 2618 DCHECK_NE(input_type, result_type); 2619 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) || 2620 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) { 2621 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; 2622 } 2623 2624 if (IsFPType(input_type)) { 2625 locations->SetInAt(0, Location::RequiresFpuRegister()); 2626 } else { 2627 locations->SetInAt(0, Location::RequiresRegister()); 2628 } 2629 2630 if (IsFPType(result_type)) { 2631 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2632 } else { 2633 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2634 } 2635} 2636 2637void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) { 2638 Primitive::Type result_type = conversion->GetResultType(); 2639 Primitive::Type input_type = conversion->GetInputType(); 2640 2641 DCHECK_NE(input_type, result_type); 2642 2643 if (IsIntegralType(result_type) && IsIntegralType(input_type)) { 2644 int result_size = Primitive::ComponentSize(result_type); 2645 int input_size = Primitive::ComponentSize(input_type); 2646 int min_size = std::min(result_size, input_size); 2647 Register output = OutputRegister(conversion); 2648 Register source = InputRegisterAt(conversion, 0); 2649 if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) { 2650 __ Ubfx(output, source, 0, result_size * kBitsPerByte); 2651 } else if ((result_type == Primitive::kPrimChar) || 2652 ((input_type == Primitive::kPrimChar) && (result_size > input_size))) { 2653 __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2654 } else { 2655 __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2656 } 2657 } else if (IsFPType(result_type) && IsIntegralType(input_type)) { 2658 __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0)); 2659 } else if (IsIntegralType(result_type) && IsFPType(input_type)) { 2660 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong); 2661 __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0)); 2662 } else if (IsFPType(result_type) && IsFPType(input_type)) { 2663 __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0)); 2664 } else { 2665 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type 2666 << " to " << result_type; 2667 } 2668} 2669 2670void LocationsBuilderARM64::VisitUShr(HUShr* ushr) { 2671 HandleShift(ushr); 2672} 2673 2674void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) { 2675 HandleShift(ushr); 2676} 2677 2678void LocationsBuilderARM64::VisitXor(HXor* instruction) { 2679 HandleBinaryOp(instruction); 2680} 2681 2682void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) { 2683 HandleBinaryOp(instruction); 2684} 2685 2686#undef __ 2687#undef QUICK_ENTRY_POINT 2688 2689} // namespace arm64 2690} // namespace art 2691