code_generator_arm64.cc revision 988939683c26c0b1c8808fc206add6337319509a
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator_arm64.h" 18 19#include "entrypoints/quick/quick_entrypoints.h" 20#include "entrypoints/quick/quick_entrypoints_enum.h" 21#include "gc/accounting/card_table.h" 22#include "mirror/array-inl.h" 23#include "mirror/art_method.h" 24#include "mirror/class.h" 25#include "offsets.h" 26#include "thread.h" 27#include "utils/arm64/assembler_arm64.h" 28#include "utils/assembler.h" 29#include "utils/stack_checks.h" 30 31 32using namespace vixl; // NOLINT(build/namespaces) 33 34#ifdef __ 35#error "ARM64 Codegen VIXL macro-assembler macro already defined." 36#endif 37 38 39namespace art { 40 41namespace arm64 { 42 43// TODO: Tune the use of Load-Acquire, Store-Release vs Data Memory Barriers. 44// For now we prefer the use of load-acquire, store-release over explicit memory barriers. 45static constexpr bool kUseAcquireRelease = true; 46static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>); 47static constexpr int kCurrentMethodStackOffset = 0; 48 49namespace { 50 51bool IsFPType(Primitive::Type type) { 52 return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble; 53} 54 55bool IsIntegralType(Primitive::Type type) { 56 switch (type) { 57 case Primitive::kPrimByte: 58 case Primitive::kPrimChar: 59 case Primitive::kPrimShort: 60 case Primitive::kPrimInt: 61 case Primitive::kPrimLong: 62 return true; 63 default: 64 return false; 65 } 66} 67 68bool Is64BitType(Primitive::Type type) { 69 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 70} 71 72// Convenience helpers to ease conversion to and from VIXL operands. 73static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32), 74 "Unexpected values for register codes."); 75 76int VIXLRegCodeFromART(int code) { 77 if (code == SP) { 78 return vixl::kSPRegInternalCode; 79 } 80 if (code == XZR) { 81 return vixl::kZeroRegCode; 82 } 83 return code; 84} 85 86int ARTRegCodeFromVIXL(int code) { 87 if (code == vixl::kSPRegInternalCode) { 88 return SP; 89 } 90 if (code == vixl::kZeroRegCode) { 91 return XZR; 92 } 93 return code; 94} 95 96Register XRegisterFrom(Location location) { 97 DCHECK(location.IsRegister()); 98 return Register::XRegFromCode(VIXLRegCodeFromART(location.reg())); 99} 100 101Register WRegisterFrom(Location location) { 102 DCHECK(location.IsRegister()); 103 return Register::WRegFromCode(VIXLRegCodeFromART(location.reg())); 104} 105 106Register RegisterFrom(Location location, Primitive::Type type) { 107 DCHECK(type != Primitive::kPrimVoid && !IsFPType(type)); 108 return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location); 109} 110 111Register OutputRegister(HInstruction* instr) { 112 return RegisterFrom(instr->GetLocations()->Out(), instr->GetType()); 113} 114 115Register InputRegisterAt(HInstruction* instr, int input_index) { 116 return RegisterFrom(instr->GetLocations()->InAt(input_index), 117 instr->InputAt(input_index)->GetType()); 118} 119 120FPRegister DRegisterFrom(Location location) { 121 DCHECK(location.IsFpuRegister()); 122 return FPRegister::DRegFromCode(location.reg()); 123} 124 125FPRegister SRegisterFrom(Location location) { 126 DCHECK(location.IsFpuRegister()); 127 return FPRegister::SRegFromCode(location.reg()); 128} 129 130FPRegister FPRegisterFrom(Location location, Primitive::Type type) { 131 DCHECK(IsFPType(type)); 132 return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location); 133} 134 135FPRegister OutputFPRegister(HInstruction* instr) { 136 return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType()); 137} 138 139FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) { 140 return FPRegisterFrom(instr->GetLocations()->InAt(input_index), 141 instr->InputAt(input_index)->GetType()); 142} 143 144CPURegister CPURegisterFrom(Location location, Primitive::Type type) { 145 return IsFPType(type) ? CPURegister(FPRegisterFrom(location, type)) 146 : CPURegister(RegisterFrom(location, type)); 147} 148 149CPURegister OutputCPURegister(HInstruction* instr) { 150 return IsFPType(instr->GetType()) ? static_cast<CPURegister>(OutputFPRegister(instr)) 151 : static_cast<CPURegister>(OutputRegister(instr)); 152} 153 154CPURegister InputCPURegisterAt(HInstruction* instr, int index) { 155 return IsFPType(instr->InputAt(index)->GetType()) 156 ? static_cast<CPURegister>(InputFPRegisterAt(instr, index)) 157 : static_cast<CPURegister>(InputRegisterAt(instr, index)); 158} 159 160int64_t Int64ConstantFrom(Location location) { 161 HConstant* instr = location.GetConstant(); 162 return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue() 163 : instr->AsLongConstant()->GetValue(); 164} 165 166Operand OperandFrom(Location location, Primitive::Type type) { 167 if (location.IsRegister()) { 168 return Operand(RegisterFrom(location, type)); 169 } else { 170 return Operand(Int64ConstantFrom(location)); 171 } 172} 173 174Operand InputOperandAt(HInstruction* instr, int input_index) { 175 return OperandFrom(instr->GetLocations()->InAt(input_index), 176 instr->InputAt(input_index)->GetType()); 177} 178 179MemOperand StackOperandFrom(Location location) { 180 return MemOperand(sp, location.GetStackIndex()); 181} 182 183MemOperand HeapOperand(const Register& base, size_t offset = 0) { 184 // A heap reference must be 32bit, so fit in a W register. 185 DCHECK(base.IsW()); 186 return MemOperand(base.X(), offset); 187} 188 189MemOperand HeapOperand(const Register& base, Offset offset) { 190 return HeapOperand(base, offset.SizeValue()); 191} 192 193MemOperand HeapOperandFrom(Location location, Offset offset) { 194 return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset); 195} 196 197Location LocationFrom(const Register& reg) { 198 return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code())); 199} 200 201Location LocationFrom(const FPRegister& fpreg) { 202 return Location::FpuRegisterLocation(fpreg.code()); 203} 204 205} // namespace 206 207inline Condition ARM64Condition(IfCondition cond) { 208 switch (cond) { 209 case kCondEQ: return eq; 210 case kCondNE: return ne; 211 case kCondLT: return lt; 212 case kCondLE: return le; 213 case kCondGT: return gt; 214 case kCondGE: return ge; 215 default: 216 LOG(FATAL) << "Unknown if condition"; 217 } 218 return nv; // Unreachable. 219} 220 221Location ARM64ReturnLocation(Primitive::Type return_type) { 222 DCHECK_NE(return_type, Primitive::kPrimVoid); 223 // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the 224 // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`, 225 // but we use the exact registers for clarity. 226 if (return_type == Primitive::kPrimFloat) { 227 return LocationFrom(s0); 228 } else if (return_type == Primitive::kPrimDouble) { 229 return LocationFrom(d0); 230 } else if (return_type == Primitive::kPrimLong) { 231 return LocationFrom(x0); 232 } else { 233 return LocationFrom(w0); 234 } 235} 236 237static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 }; 238static constexpr size_t kRuntimeParameterCoreRegistersLength = 239 arraysize(kRuntimeParameterCoreRegisters); 240static const FPRegister kRuntimeParameterFpuRegisters[] = { d0, d1, d2, d3, d4, d5, d6, d7 }; 241static constexpr size_t kRuntimeParameterFpuRegistersLength = 242 arraysize(kRuntimeParameterCoreRegisters); 243 244class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> { 245 public: 246 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); 247 248 InvokeRuntimeCallingConvention() 249 : CallingConvention(kRuntimeParameterCoreRegisters, 250 kRuntimeParameterCoreRegistersLength, 251 kRuntimeParameterFpuRegisters, 252 kRuntimeParameterFpuRegistersLength) {} 253 254 Location GetReturnLocation(Primitive::Type return_type); 255 256 private: 257 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 258}; 259 260Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) { 261 return ARM64ReturnLocation(return_type); 262} 263 264#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> 265#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value() 266 267class SlowPathCodeARM64 : public SlowPathCode { 268 public: 269 SlowPathCodeARM64() : entry_label_(), exit_label_() {} 270 271 vixl::Label* GetEntryLabel() { return &entry_label_; } 272 vixl::Label* GetExitLabel() { return &exit_label_; } 273 274 private: 275 vixl::Label entry_label_; 276 vixl::Label exit_label_; 277 278 DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64); 279}; 280 281class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { 282 public: 283 BoundsCheckSlowPathARM64(HBoundsCheck* instruction, 284 Location index_location, 285 Location length_location) 286 : instruction_(instruction), 287 index_location_(index_location), 288 length_location_(length_location) {} 289 290 291 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 292 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 293 __ Bind(GetEntryLabel()); 294 // We're moving two locations to locations that could overlap, so we need a parallel 295 // move resolver. 296 InvokeRuntimeCallingConvention calling_convention; 297 codegen->EmitParallelMoves( 298 index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), 299 length_location_, LocationFrom(calling_convention.GetRegisterAt(1))); 300 arm64_codegen->InvokeRuntime( 301 QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc()); 302 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); 303 } 304 305 private: 306 HBoundsCheck* const instruction_; 307 const Location index_location_; 308 const Location length_location_; 309 310 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64); 311}; 312 313class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { 314 public: 315 explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {} 316 317 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 318 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 319 __ Bind(GetEntryLabel()); 320 arm64_codegen->InvokeRuntime( 321 QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc()); 322 CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); 323 } 324 325 private: 326 HDivZeroCheck* const instruction_; 327 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64); 328}; 329 330class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { 331 public: 332 LoadClassSlowPathARM64(HLoadClass* cls, 333 HInstruction* at, 334 uint32_t dex_pc, 335 bool do_clinit) 336 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { 337 DCHECK(at->IsLoadClass() || at->IsClinitCheck()); 338 } 339 340 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 341 LocationSummary* locations = at_->GetLocations(); 342 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 343 344 __ Bind(GetEntryLabel()); 345 codegen->SaveLiveRegisters(locations); 346 347 InvokeRuntimeCallingConvention calling_convention; 348 __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex()); 349 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 350 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) 351 : QUICK_ENTRY_POINT(pInitializeType); 352 arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_); 353 if (do_clinit_) { 354 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*>(); 355 } else { 356 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t, mirror::ArtMethod*>(); 357 } 358 359 // Move the class to the desired location. 360 Location out = locations->Out(); 361 if (out.IsValid()) { 362 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); 363 Primitive::Type type = at_->GetType(); 364 arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type); 365 } 366 367 codegen->RestoreLiveRegisters(locations); 368 __ B(GetExitLabel()); 369 } 370 371 private: 372 // The class this slow path will load. 373 HLoadClass* const cls_; 374 375 // The instruction where this slow path is happening. 376 // (Might be the load class or an initialization check). 377 HInstruction* const at_; 378 379 // The dex PC of `at_`. 380 const uint32_t dex_pc_; 381 382 // Whether to initialize the class. 383 const bool do_clinit_; 384 385 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64); 386}; 387 388class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { 389 public: 390 explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {} 391 392 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 393 LocationSummary* locations = instruction_->GetLocations(); 394 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 395 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 396 397 __ Bind(GetEntryLabel()); 398 codegen->SaveLiveRegisters(locations); 399 400 InvokeRuntimeCallingConvention calling_convention; 401 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 402 __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex()); 403 arm64_codegen->InvokeRuntime( 404 QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc()); 405 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t, mirror::ArtMethod*>(); 406 Primitive::Type type = instruction_->GetType(); 407 arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type); 408 409 codegen->RestoreLiveRegisters(locations); 410 __ B(GetExitLabel()); 411 } 412 413 private: 414 HLoadString* const instruction_; 415 416 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64); 417}; 418 419class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { 420 public: 421 explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {} 422 423 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 424 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 425 __ Bind(GetEntryLabel()); 426 arm64_codegen->InvokeRuntime( 427 QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc()); 428 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); 429 } 430 431 private: 432 HNullCheck* const instruction_; 433 434 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64); 435}; 436 437class StackOverflowCheckSlowPathARM64 : public SlowPathCodeARM64 { 438 public: 439 StackOverflowCheckSlowPathARM64() {} 440 441 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 442 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 443 __ Bind(GetEntryLabel()); 444 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowStackOverflow), nullptr, 0); 445 CheckEntrypointTypes<kQuickThrowStackOverflow, void, void*>(); 446 } 447 448 private: 449 DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathARM64); 450}; 451 452class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { 453 public: 454 explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction, 455 HBasicBlock* successor) 456 : instruction_(instruction), successor_(successor) {} 457 458 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 459 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 460 __ Bind(GetEntryLabel()); 461 codegen->SaveLiveRegisters(instruction_->GetLocations()); 462 arm64_codegen->InvokeRuntime( 463 QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc()); 464 CheckEntrypointTypes<kQuickTestSuspend, void, void>(); 465 codegen->RestoreLiveRegisters(instruction_->GetLocations()); 466 if (successor_ == nullptr) { 467 __ B(GetReturnLabel()); 468 } else { 469 __ B(arm64_codegen->GetLabelOf(successor_)); 470 } 471 } 472 473 vixl::Label* GetReturnLabel() { 474 DCHECK(successor_ == nullptr); 475 return &return_label_; 476 } 477 478 private: 479 HSuspendCheck* const instruction_; 480 // If not null, the block to branch to after the suspend check. 481 HBasicBlock* const successor_; 482 483 // If `successor_` is null, the label to branch to after the suspend check. 484 vixl::Label return_label_; 485 486 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64); 487}; 488 489class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { 490 public: 491 TypeCheckSlowPathARM64(HInstruction* instruction, 492 Location class_to_check, 493 Location object_class, 494 uint32_t dex_pc) 495 : instruction_(instruction), 496 class_to_check_(class_to_check), 497 object_class_(object_class), 498 dex_pc_(dex_pc) {} 499 500 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 501 LocationSummary* locations = instruction_->GetLocations(); 502 DCHECK(instruction_->IsCheckCast() 503 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 504 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 505 506 __ Bind(GetEntryLabel()); 507 codegen->SaveLiveRegisters(locations); 508 509 // We're moving two locations to locations that could overlap, so we need a parallel 510 // move resolver. 511 InvokeRuntimeCallingConvention calling_convention; 512 codegen->EmitParallelMoves( 513 class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), 514 object_class_, LocationFrom(calling_convention.GetRegisterAt(1))); 515 516 if (instruction_->IsInstanceOf()) { 517 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_); 518 Primitive::Type ret_type = instruction_->GetType(); 519 Location ret_loc = calling_convention.GetReturnLocation(ret_type); 520 arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); 521 CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t, 522 const mirror::Class*, const mirror::Class*>(); 523 } else { 524 DCHECK(instruction_->IsCheckCast()); 525 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_); 526 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); 527 } 528 529 codegen->RestoreLiveRegisters(locations); 530 __ B(GetExitLabel()); 531 } 532 533 private: 534 HInstruction* const instruction_; 535 const Location class_to_check_; 536 const Location object_class_; 537 uint32_t dex_pc_; 538 539 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64); 540}; 541 542#undef __ 543 544Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) { 545 Location next_location; 546 if (type == Primitive::kPrimVoid) { 547 LOG(FATAL) << "Unreachable type " << type; 548 } 549 550 if (IsFPType(type) && (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) { 551 next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++)); 552 } else if (!IsFPType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) { 553 next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++)); 554 } else { 555 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_); 556 next_location = Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) 557 : Location::StackSlot(stack_offset); 558 } 559 560 // Space on the stack is reserved for all arguments. 561 stack_index_ += Is64BitType(type) ? 2 : 1; 562 return next_location; 563} 564 565CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, const CompilerOptions& compiler_options) 566 : CodeGenerator(graph, 567 kNumberOfAllocatableRegisters, 568 kNumberOfAllocatableFPRegisters, 569 kNumberOfAllocatableRegisterPairs, 570 0, 571 0, 572 compiler_options), 573 block_labels_(nullptr), 574 location_builder_(graph, this), 575 instruction_visitor_(graph, this), 576 move_resolver_(graph->GetArena(), this) {} 577 578#undef __ 579#define __ GetVIXLAssembler()-> 580 581void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { 582 // Ensure we emit the literal pool. 583 __ FinalizeCode(); 584 CodeGenerator::Finalize(allocator); 585} 586 587void ParallelMoveResolverARM64::EmitMove(size_t index) { 588 MoveOperands* move = moves_.Get(index); 589 codegen_->MoveLocation(move->GetDestination(), move->GetSource()); 590} 591 592void ParallelMoveResolverARM64::EmitSwap(size_t index) { 593 MoveOperands* move = moves_.Get(index); 594 codegen_->SwapLocations(move->GetDestination(), move->GetSource()); 595} 596 597void ParallelMoveResolverARM64::RestoreScratch(int reg) { 598 __ Pop(Register(VIXLRegCodeFromART(reg), kXRegSize)); 599} 600 601void ParallelMoveResolverARM64::SpillScratch(int reg) { 602 __ Push(Register(VIXLRegCodeFromART(reg), kXRegSize)); 603} 604 605void CodeGeneratorARM64::GenerateFrameEntry() { 606 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod(); 607 if (do_overflow_check) { 608 UseScratchRegisterScope temps(GetVIXLAssembler()); 609 Register temp = temps.AcquireX(); 610 if (GetCompilerOptions().GetImplicitStackOverflowChecks()) { 611 __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64))); 612 __ Ldr(wzr, MemOperand(temp, 0)); 613 RecordPcInfo(nullptr, 0); 614 } else { 615 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM64(); 616 AddSlowPath(slow_path); 617 618 __ Ldr(temp, MemOperand(tr, Thread::StackEndOffset<kArm64WordSize>().Int32Value())); 619 __ Cmp(sp, temp); 620 __ B(lo, slow_path->GetEntryLabel()); 621 } 622 } 623 624 CPURegList preserved_regs = GetFramePreservedRegisters(); 625 int frame_size = GetFrameSize(); 626 core_spill_mask_ |= preserved_regs.list(); 627 628 __ Str(w0, MemOperand(sp, -frame_size, PreIndex)); 629 __ PokeCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes()); 630 631 // Stack layout: 632 // sp[frame_size - 8] : lr. 633 // ... : other preserved registers. 634 // sp[frame_size - regs_size]: first preserved register. 635 // ... : reserved frame space. 636 // sp[0] : current method. 637} 638 639void CodeGeneratorARM64::GenerateFrameExit() { 640 int frame_size = GetFrameSize(); 641 CPURegList preserved_regs = GetFramePreservedRegisters(); 642 __ PeekCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes()); 643 __ Drop(frame_size); 644} 645 646void CodeGeneratorARM64::Bind(HBasicBlock* block) { 647 __ Bind(GetLabelOf(block)); 648} 649 650void CodeGeneratorARM64::Move(HInstruction* instruction, 651 Location location, 652 HInstruction* move_for) { 653 LocationSummary* locations = instruction->GetLocations(); 654 if (locations != nullptr && locations->Out().Equals(location)) { 655 return; 656 } 657 658 Primitive::Type type = instruction->GetType(); 659 DCHECK_NE(type, Primitive::kPrimVoid); 660 661 if (instruction->IsIntConstant() || instruction->IsLongConstant()) { 662 int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue() 663 : instruction->AsLongConstant()->GetValue(); 664 if (location.IsRegister()) { 665 Register dst = RegisterFrom(location, type); 666 DCHECK((instruction->IsIntConstant() && dst.Is32Bits()) || 667 (instruction->IsLongConstant() && dst.Is64Bits())); 668 __ Mov(dst, value); 669 } else { 670 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot()); 671 UseScratchRegisterScope temps(GetVIXLAssembler()); 672 Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX(); 673 __ Mov(temp, value); 674 __ Str(temp, StackOperandFrom(location)); 675 } 676 } else if (instruction->IsTemporary()) { 677 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 678 MoveLocation(location, temp_location, type); 679 } else if (instruction->IsLoadLocal()) { 680 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal()); 681 if (Is64BitType(type)) { 682 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type); 683 } else { 684 MoveLocation(location, Location::StackSlot(stack_slot), type); 685 } 686 687 } else { 688 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary()); 689 MoveLocation(location, locations->Out(), type); 690 } 691} 692 693size_t CodeGeneratorARM64::FrameEntrySpillSize() const { 694 return GetFramePreservedRegistersSize(); 695} 696 697Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const { 698 Primitive::Type type = load->GetType(); 699 700 switch (type) { 701 case Primitive::kPrimNot: 702 case Primitive::kPrimInt: 703 case Primitive::kPrimFloat: 704 return Location::StackSlot(GetStackSlot(load->GetLocal())); 705 706 case Primitive::kPrimLong: 707 case Primitive::kPrimDouble: 708 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 709 710 case Primitive::kPrimBoolean: 711 case Primitive::kPrimByte: 712 case Primitive::kPrimChar: 713 case Primitive::kPrimShort: 714 case Primitive::kPrimVoid: 715 LOG(FATAL) << "Unexpected type " << type; 716 } 717 718 LOG(FATAL) << "Unreachable"; 719 return Location::NoLocation(); 720} 721 722void CodeGeneratorARM64::MarkGCCard(Register object, Register value) { 723 UseScratchRegisterScope temps(GetVIXLAssembler()); 724 Register card = temps.AcquireX(); 725 Register temp = temps.AcquireW(); // Index within the CardTable - 32bit. 726 vixl::Label done; 727 __ Cbz(value, &done); 728 __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value())); 729 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); 730 __ Strb(card, MemOperand(card, temp.X())); 731 __ Bind(&done); 732} 733 734void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const { 735 // Block reserved registers: 736 // ip0 (VIXL temporary) 737 // ip1 (VIXL temporary) 738 // tr 739 // lr 740 // sp is not part of the allocatable registers, so we don't need to block it. 741 // TODO: Avoid blocking callee-saved registers, and instead preserve them 742 // where necessary. 743 CPURegList reserved_core_registers = vixl_reserved_core_registers; 744 reserved_core_registers.Combine(runtime_reserved_core_registers); 745 reserved_core_registers.Combine(quick_callee_saved_registers); 746 while (!reserved_core_registers.IsEmpty()) { 747 blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true; 748 } 749 CPURegList reserved_fp_registers = vixl_reserved_fp_registers; 750 reserved_fp_registers.Combine(CPURegList::GetCalleeSavedFP()); 751 while (!reserved_core_registers.IsEmpty()) { 752 blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true; 753 } 754} 755 756Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const { 757 if (type == Primitive::kPrimVoid) { 758 LOG(FATAL) << "Unreachable type " << type; 759 } 760 761 if (IsFPType(type)) { 762 ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters); 763 DCHECK_NE(reg, -1); 764 return Location::FpuRegisterLocation(reg); 765 } else { 766 ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters); 767 DCHECK_NE(reg, -1); 768 return Location::RegisterLocation(reg); 769 } 770} 771 772size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { 773 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 774 __ Str(reg, MemOperand(sp, stack_index)); 775 return kArm64WordSize; 776} 777 778size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { 779 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 780 __ Ldr(reg, MemOperand(sp, stack_index)); 781 return kArm64WordSize; 782} 783 784size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 785 FPRegister reg = FPRegister(reg_id, kDRegSize); 786 __ Str(reg, MemOperand(sp, stack_index)); 787 return kArm64WordSize; 788} 789 790size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 791 FPRegister reg = FPRegister(reg_id, kDRegSize); 792 __ Ldr(reg, MemOperand(sp, stack_index)); 793 return kArm64WordSize; 794} 795 796void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const { 797 stream << Arm64ManagedRegister::FromXRegister(XRegister(reg)); 798} 799 800void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const { 801 stream << Arm64ManagedRegister::FromDRegister(DRegister(reg)); 802} 803 804void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) { 805 if (constant->IsIntConstant() || constant->IsLongConstant()) { 806 __ Mov(Register(destination), 807 constant->IsIntConstant() ? constant->AsIntConstant()->GetValue() 808 : constant->AsLongConstant()->GetValue()); 809 } else if (constant->IsFloatConstant()) { 810 __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue()); 811 } else { 812 DCHECK(constant->IsDoubleConstant()); 813 __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue()); 814 } 815} 816 817 818static bool CoherentConstantAndType(Location constant, Primitive::Type type) { 819 DCHECK(constant.IsConstant()); 820 HConstant* cst = constant.GetConstant(); 821 return (cst->IsIntConstant() && type == Primitive::kPrimInt) || 822 (cst->IsLongConstant() && type == Primitive::kPrimLong) || 823 (cst->IsFloatConstant() && type == Primitive::kPrimFloat) || 824 (cst->IsDoubleConstant() && type == Primitive::kPrimDouble); 825} 826 827void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) { 828 if (source.Equals(destination)) { 829 return; 830 } 831 832 // A valid move can always be inferred from the destination and source 833 // locations. When moving from and to a register, the argument type can be 834 // used to generate 32bit instead of 64bit moves. In debug mode we also 835 // checks the coherency of the locations and the type. 836 bool unspecified_type = (type == Primitive::kPrimVoid); 837 838 if (destination.IsRegister() || destination.IsFpuRegister()) { 839 if (unspecified_type) { 840 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr; 841 if (source.IsStackSlot() || 842 (src_cst != nullptr && (src_cst->IsIntConstant() || src_cst->IsFloatConstant()))) { 843 // For stack slots and 32bit constants, a 64bit type is appropriate. 844 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; 845 } else { 846 // If the source is a double stack slot or a 64bit constant, a 64bit 847 // type is appropriate. Else the source is a register, and since the 848 // type has not been specified, we chose a 64bit type to force a 64bit 849 // move. 850 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; 851 } 852 } 853 DCHECK((destination.IsFpuRegister() && IsFPType(type)) || 854 (destination.IsRegister() && !IsFPType(type))); 855 CPURegister dst = CPURegisterFrom(destination, type); 856 if (source.IsStackSlot() || source.IsDoubleStackSlot()) { 857 DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot()); 858 __ Ldr(dst, StackOperandFrom(source)); 859 } else if (source.IsConstant()) { 860 DCHECK(CoherentConstantAndType(source, type)); 861 MoveConstant(dst, source.GetConstant()); 862 } else { 863 if (destination.IsRegister()) { 864 __ Mov(Register(dst), RegisterFrom(source, type)); 865 } else { 866 __ Fmov(FPRegister(dst), FPRegisterFrom(source, type)); 867 } 868 } 869 870 } else { // The destination is not a register. It must be a stack slot. 871 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot()); 872 if (source.IsRegister() || source.IsFpuRegister()) { 873 if (unspecified_type) { 874 if (source.IsRegister()) { 875 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; 876 } else { 877 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; 878 } 879 } 880 DCHECK((destination.IsDoubleStackSlot() == Is64BitType(type)) && 881 (source.IsFpuRegister() == IsFPType(type))); 882 __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination)); 883 } else if (source.IsConstant()) { 884 DCHECK(unspecified_type || CoherentConstantAndType(source, type)); 885 UseScratchRegisterScope temps(GetVIXLAssembler()); 886 HConstant* src_cst = source.GetConstant(); 887 CPURegister temp; 888 if (src_cst->IsIntConstant()) { 889 temp = temps.AcquireW(); 890 } else if (src_cst->IsLongConstant()) { 891 temp = temps.AcquireX(); 892 } else if (src_cst->IsFloatConstant()) { 893 temp = temps.AcquireS(); 894 } else { 895 DCHECK(src_cst->IsDoubleConstant()); 896 temp = temps.AcquireD(); 897 } 898 MoveConstant(temp, src_cst); 899 __ Str(temp, StackOperandFrom(destination)); 900 } else { 901 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot()); 902 DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot()); 903 UseScratchRegisterScope temps(GetVIXLAssembler()); 904 // There is generally less pressure on FP registers. 905 FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS(); 906 __ Ldr(temp, StackOperandFrom(source)); 907 __ Str(temp, StackOperandFrom(destination)); 908 } 909 } 910} 911 912void CodeGeneratorARM64::SwapLocations(Location loc1, Location loc2) { 913 DCHECK(!loc1.IsConstant()); 914 DCHECK(!loc2.IsConstant()); 915 916 if (loc1.Equals(loc2)) { 917 return; 918 } 919 920 UseScratchRegisterScope temps(GetAssembler()->vixl_masm_); 921 922 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot(); 923 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot(); 924 bool is_fp_reg1 = loc1.IsFpuRegister(); 925 bool is_fp_reg2 = loc2.IsFpuRegister(); 926 927 if (loc2.IsRegister() && loc1.IsRegister()) { 928 Register r1 = XRegisterFrom(loc1); 929 Register r2 = XRegisterFrom(loc2); 930 Register tmp = temps.AcquireSameSizeAs(r1); 931 __ Mov(tmp, r2); 932 __ Mov(r2, r1); 933 __ Mov(r1, tmp); 934 } else if (is_fp_reg2 && is_fp_reg1) { 935 FPRegister r1 = DRegisterFrom(loc1); 936 FPRegister r2 = DRegisterFrom(loc2); 937 FPRegister tmp = temps.AcquireSameSizeAs(r1); 938 __ Fmov(tmp, r2); 939 __ Fmov(r2, r1); 940 __ Fmov(r1, tmp); 941 } else if (is_slot1 != is_slot2) { 942 MemOperand mem = StackOperandFrom(is_slot1 ? loc1 : loc2); 943 Location reg_loc = is_slot1 ? loc2 : loc1; 944 CPURegister reg, tmp; 945 if (reg_loc.IsFpuRegister()) { 946 reg = DRegisterFrom(reg_loc); 947 tmp = temps.AcquireD(); 948 } else { 949 reg = XRegisterFrom(reg_loc); 950 tmp = temps.AcquireX(); 951 } 952 __ Ldr(tmp, mem); 953 __ Str(reg, mem); 954 if (reg_loc.IsFpuRegister()) { 955 __ Fmov(FPRegister(reg), FPRegister(tmp)); 956 } else { 957 __ Mov(Register(reg), Register(tmp)); 958 } 959 } else if (is_slot1 && is_slot2) { 960 MemOperand mem1 = StackOperandFrom(loc1); 961 MemOperand mem2 = StackOperandFrom(loc2); 962 Register tmp1 = loc1.IsStackSlot() ? temps.AcquireW() : temps.AcquireX(); 963 Register tmp2 = temps.AcquireSameSizeAs(tmp1); 964 __ Ldr(tmp1, mem1); 965 __ Ldr(tmp2, mem2); 966 __ Str(tmp1, mem2); 967 __ Str(tmp2, mem1); 968 } else { 969 LOG(FATAL) << "Unimplemented"; 970 } 971} 972 973void CodeGeneratorARM64::Load(Primitive::Type type, 974 CPURegister dst, 975 const MemOperand& src) { 976 switch (type) { 977 case Primitive::kPrimBoolean: 978 __ Ldrb(Register(dst), src); 979 break; 980 case Primitive::kPrimByte: 981 __ Ldrsb(Register(dst), src); 982 break; 983 case Primitive::kPrimShort: 984 __ Ldrsh(Register(dst), src); 985 break; 986 case Primitive::kPrimChar: 987 __ Ldrh(Register(dst), src); 988 break; 989 case Primitive::kPrimInt: 990 case Primitive::kPrimNot: 991 case Primitive::kPrimLong: 992 case Primitive::kPrimFloat: 993 case Primitive::kPrimDouble: 994 DCHECK_EQ(dst.Is64Bits(), Is64BitType(type)); 995 __ Ldr(dst, src); 996 break; 997 case Primitive::kPrimVoid: 998 LOG(FATAL) << "Unreachable type " << type; 999 } 1000} 1001 1002void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, 1003 CPURegister dst, 1004 const MemOperand& src) { 1005 UseScratchRegisterScope temps(GetVIXLAssembler()); 1006 Register temp_base = temps.AcquireX(); 1007 Primitive::Type type = instruction->GetType(); 1008 1009 DCHECK(!src.IsRegisterOffset()); 1010 DCHECK(!src.IsPreIndex()); 1011 DCHECK(!src.IsPostIndex()); 1012 1013 // TODO(vixl): Let the MacroAssembler handle MemOperand. 1014 __ Add(temp_base, src.base(), src.offset()); 1015 MemOperand base = MemOperand(temp_base); 1016 switch (type) { 1017 case Primitive::kPrimBoolean: 1018 __ Ldarb(Register(dst), base); 1019 MaybeRecordImplicitNullCheck(instruction); 1020 break; 1021 case Primitive::kPrimByte: 1022 __ Ldarb(Register(dst), base); 1023 MaybeRecordImplicitNullCheck(instruction); 1024 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); 1025 break; 1026 case Primitive::kPrimChar: 1027 __ Ldarh(Register(dst), base); 1028 MaybeRecordImplicitNullCheck(instruction); 1029 break; 1030 case Primitive::kPrimShort: 1031 __ Ldarh(Register(dst), base); 1032 MaybeRecordImplicitNullCheck(instruction); 1033 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); 1034 break; 1035 case Primitive::kPrimInt: 1036 case Primitive::kPrimNot: 1037 case Primitive::kPrimLong: 1038 DCHECK_EQ(dst.Is64Bits(), Is64BitType(type)); 1039 __ Ldar(Register(dst), base); 1040 MaybeRecordImplicitNullCheck(instruction); 1041 break; 1042 case Primitive::kPrimFloat: 1043 case Primitive::kPrimDouble: { 1044 DCHECK(dst.IsFPRegister()); 1045 DCHECK_EQ(dst.Is64Bits(), Is64BitType(type)); 1046 1047 Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); 1048 __ Ldar(temp, base); 1049 MaybeRecordImplicitNullCheck(instruction); 1050 __ Fmov(FPRegister(dst), temp); 1051 break; 1052 } 1053 case Primitive::kPrimVoid: 1054 LOG(FATAL) << "Unreachable type " << type; 1055 } 1056} 1057 1058void CodeGeneratorARM64::Store(Primitive::Type type, 1059 CPURegister src, 1060 const MemOperand& dst) { 1061 switch (type) { 1062 case Primitive::kPrimBoolean: 1063 case Primitive::kPrimByte: 1064 __ Strb(Register(src), dst); 1065 break; 1066 case Primitive::kPrimChar: 1067 case Primitive::kPrimShort: 1068 __ Strh(Register(src), dst); 1069 break; 1070 case Primitive::kPrimInt: 1071 case Primitive::kPrimNot: 1072 case Primitive::kPrimLong: 1073 case Primitive::kPrimFloat: 1074 case Primitive::kPrimDouble: 1075 DCHECK_EQ(src.Is64Bits(), Is64BitType(type)); 1076 __ Str(src, dst); 1077 break; 1078 case Primitive::kPrimVoid: 1079 LOG(FATAL) << "Unreachable type " << type; 1080 } 1081} 1082 1083void CodeGeneratorARM64::StoreRelease(Primitive::Type type, 1084 CPURegister src, 1085 const MemOperand& dst) { 1086 UseScratchRegisterScope temps(GetVIXLAssembler()); 1087 Register temp_base = temps.AcquireX(); 1088 1089 DCHECK(!dst.IsRegisterOffset()); 1090 DCHECK(!dst.IsPreIndex()); 1091 DCHECK(!dst.IsPostIndex()); 1092 1093 // TODO(vixl): Let the MacroAssembler handle this. 1094 __ Add(temp_base, dst.base(), dst.offset()); 1095 MemOperand base = MemOperand(temp_base); 1096 switch (type) { 1097 case Primitive::kPrimBoolean: 1098 case Primitive::kPrimByte: 1099 __ Stlrb(Register(src), base); 1100 break; 1101 case Primitive::kPrimChar: 1102 case Primitive::kPrimShort: 1103 __ Stlrh(Register(src), base); 1104 break; 1105 case Primitive::kPrimInt: 1106 case Primitive::kPrimNot: 1107 case Primitive::kPrimLong: 1108 DCHECK_EQ(src.Is64Bits(), Is64BitType(type)); 1109 __ Stlr(Register(src), base); 1110 break; 1111 case Primitive::kPrimFloat: 1112 case Primitive::kPrimDouble: { 1113 DCHECK(src.IsFPRegister()); 1114 DCHECK_EQ(src.Is64Bits(), Is64BitType(type)); 1115 1116 Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); 1117 __ Fmov(temp, FPRegister(src)); 1118 __ Stlr(temp, base); 1119 break; 1120 } 1121 case Primitive::kPrimVoid: 1122 LOG(FATAL) << "Unreachable type " << type; 1123 } 1124} 1125 1126void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) { 1127 DCHECK(current_method.IsW()); 1128 __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset)); 1129} 1130 1131void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset, 1132 HInstruction* instruction, 1133 uint32_t dex_pc) { 1134 __ Ldr(lr, MemOperand(tr, entry_point_offset)); 1135 __ Blr(lr); 1136 if (instruction != nullptr) { 1137 RecordPcInfo(instruction, dex_pc); 1138 DCHECK(instruction->IsSuspendCheck() 1139 || instruction->IsBoundsCheck() 1140 || instruction->IsNullCheck() 1141 || instruction->IsDivZeroCheck() 1142 || !IsLeafMethod()); 1143 } 1144} 1145 1146void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, 1147 vixl::Register class_reg) { 1148 UseScratchRegisterScope temps(GetVIXLAssembler()); 1149 Register temp = temps.AcquireW(); 1150 size_t status_offset = mirror::Class::StatusOffset().SizeValue(); 1151 1152 // Even if the initialized flag is set, we need to ensure consistent memory ordering. 1153 if (kUseAcquireRelease) { 1154 // TODO(vixl): Let the MacroAssembler handle MemOperand. 1155 __ Add(temp, class_reg, status_offset); 1156 __ Ldar(temp, HeapOperand(temp)); 1157 __ Cmp(temp, mirror::Class::kStatusInitialized); 1158 __ B(lt, slow_path->GetEntryLabel()); 1159 } else { 1160 __ Ldr(temp, HeapOperand(class_reg, status_offset)); 1161 __ Cmp(temp, mirror::Class::kStatusInitialized); 1162 __ B(lt, slow_path->GetEntryLabel()); 1163 __ Dmb(InnerShareable, BarrierReads); 1164 } 1165 __ Bind(slow_path->GetExitLabel()); 1166} 1167 1168void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) { 1169 BarrierType type = BarrierAll; 1170 1171 switch (kind) { 1172 case MemBarrierKind::kAnyAny: 1173 case MemBarrierKind::kAnyStore: { 1174 type = BarrierAll; 1175 break; 1176 } 1177 case MemBarrierKind::kLoadAny: { 1178 type = BarrierReads; 1179 break; 1180 } 1181 case MemBarrierKind::kStoreStore: { 1182 type = BarrierWrites; 1183 break; 1184 } 1185 default: 1186 LOG(FATAL) << "Unexpected memory barrier " << kind; 1187 } 1188 __ Dmb(InnerShareable, type); 1189} 1190 1191void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction, 1192 HBasicBlock* successor) { 1193 SuspendCheckSlowPathARM64* slow_path = 1194 new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor); 1195 codegen_->AddSlowPath(slow_path); 1196 UseScratchRegisterScope temps(codegen_->GetVIXLAssembler()); 1197 Register temp = temps.AcquireW(); 1198 1199 __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue())); 1200 if (successor == nullptr) { 1201 __ Cbnz(temp, slow_path->GetEntryLabel()); 1202 __ Bind(slow_path->GetReturnLabel()); 1203 } else { 1204 __ Cbz(temp, codegen_->GetLabelOf(successor)); 1205 __ B(slow_path->GetEntryLabel()); 1206 // slow_path will return to GetLabelOf(successor). 1207 } 1208} 1209 1210InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph, 1211 CodeGeneratorARM64* codegen) 1212 : HGraphVisitor(graph), 1213 assembler_(codegen->GetAssembler()), 1214 codegen_(codegen) {} 1215 1216#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \ 1217 /* No unimplemented IR. */ 1218 1219#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode 1220 1221enum UnimplementedInstructionBreakCode { 1222 // Using a base helps identify when we hit such breakpoints. 1223 UnimplementedInstructionBreakCodeBaseCode = 0x900, 1224#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name), 1225 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION) 1226#undef ENUM_UNIMPLEMENTED_INSTRUCTION 1227}; 1228 1229#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \ 1230 void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \ 1231 UNUSED(instr); \ 1232 __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \ 1233 } \ 1234 void LocationsBuilderARM64::Visit##name(H##name* instr) { \ 1235 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \ 1236 locations->SetOut(Location::Any()); \ 1237 } 1238 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS) 1239#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS 1240 1241#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE 1242#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION 1243 1244void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) { 1245 DCHECK_EQ(instr->InputCount(), 2U); 1246 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1247 Primitive::Type type = instr->GetResultType(); 1248 switch (type) { 1249 case Primitive::kPrimInt: 1250 case Primitive::kPrimLong: 1251 locations->SetInAt(0, Location::RequiresRegister()); 1252 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); 1253 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1254 break; 1255 1256 case Primitive::kPrimFloat: 1257 case Primitive::kPrimDouble: 1258 locations->SetInAt(0, Location::RequiresFpuRegister()); 1259 locations->SetInAt(1, Location::RequiresFpuRegister()); 1260 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1261 break; 1262 1263 default: 1264 LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type; 1265 } 1266} 1267 1268void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { 1269 Primitive::Type type = instr->GetType(); 1270 1271 switch (type) { 1272 case Primitive::kPrimInt: 1273 case Primitive::kPrimLong: { 1274 Register dst = OutputRegister(instr); 1275 Register lhs = InputRegisterAt(instr, 0); 1276 Operand rhs = InputOperandAt(instr, 1); 1277 if (instr->IsAdd()) { 1278 __ Add(dst, lhs, rhs); 1279 } else if (instr->IsAnd()) { 1280 __ And(dst, lhs, rhs); 1281 } else if (instr->IsOr()) { 1282 __ Orr(dst, lhs, rhs); 1283 } else if (instr->IsSub()) { 1284 __ Sub(dst, lhs, rhs); 1285 } else { 1286 DCHECK(instr->IsXor()); 1287 __ Eor(dst, lhs, rhs); 1288 } 1289 break; 1290 } 1291 case Primitive::kPrimFloat: 1292 case Primitive::kPrimDouble: { 1293 FPRegister dst = OutputFPRegister(instr); 1294 FPRegister lhs = InputFPRegisterAt(instr, 0); 1295 FPRegister rhs = InputFPRegisterAt(instr, 1); 1296 if (instr->IsAdd()) { 1297 __ Fadd(dst, lhs, rhs); 1298 } else if (instr->IsSub()) { 1299 __ Fsub(dst, lhs, rhs); 1300 } else { 1301 LOG(FATAL) << "Unexpected floating-point binary operation"; 1302 } 1303 break; 1304 } 1305 default: 1306 LOG(FATAL) << "Unexpected binary operation type " << type; 1307 } 1308} 1309 1310void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { 1311 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1312 1313 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1314 Primitive::Type type = instr->GetResultType(); 1315 switch (type) { 1316 case Primitive::kPrimInt: 1317 case Primitive::kPrimLong: { 1318 locations->SetInAt(0, Location::RequiresRegister()); 1319 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); 1320 locations->SetOut(Location::RequiresRegister()); 1321 break; 1322 } 1323 default: 1324 LOG(FATAL) << "Unexpected shift type " << type; 1325 } 1326} 1327 1328void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) { 1329 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1330 1331 Primitive::Type type = instr->GetType(); 1332 switch (type) { 1333 case Primitive::kPrimInt: 1334 case Primitive::kPrimLong: { 1335 Register dst = OutputRegister(instr); 1336 Register lhs = InputRegisterAt(instr, 0); 1337 Operand rhs = InputOperandAt(instr, 1); 1338 if (rhs.IsImmediate()) { 1339 uint32_t shift_value = (type == Primitive::kPrimInt) 1340 ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue) 1341 : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue); 1342 if (instr->IsShl()) { 1343 __ Lsl(dst, lhs, shift_value); 1344 } else if (instr->IsShr()) { 1345 __ Asr(dst, lhs, shift_value); 1346 } else { 1347 __ Lsr(dst, lhs, shift_value); 1348 } 1349 } else { 1350 Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W(); 1351 1352 if (instr->IsShl()) { 1353 __ Lsl(dst, lhs, rhs_reg); 1354 } else if (instr->IsShr()) { 1355 __ Asr(dst, lhs, rhs_reg); 1356 } else { 1357 __ Lsr(dst, lhs, rhs_reg); 1358 } 1359 } 1360 break; 1361 } 1362 default: 1363 LOG(FATAL) << "Unexpected shift operation type " << type; 1364 } 1365} 1366 1367void LocationsBuilderARM64::VisitAdd(HAdd* instruction) { 1368 HandleBinaryOp(instruction); 1369} 1370 1371void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) { 1372 HandleBinaryOp(instruction); 1373} 1374 1375void LocationsBuilderARM64::VisitAnd(HAnd* instruction) { 1376 HandleBinaryOp(instruction); 1377} 1378 1379void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { 1380 HandleBinaryOp(instruction); 1381} 1382 1383void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { 1384 LocationSummary* locations = 1385 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1386 locations->SetInAt(0, Location::RequiresRegister()); 1387 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1388 locations->SetOut(Location::RequiresRegister()); 1389} 1390 1391void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { 1392 LocationSummary* locations = instruction->GetLocations(); 1393 Primitive::Type type = instruction->GetType(); 1394 Register obj = InputRegisterAt(instruction, 0); 1395 Location index = locations->InAt(1); 1396 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value(); 1397 MemOperand source = HeapOperand(obj); 1398 UseScratchRegisterScope temps(GetVIXLAssembler()); 1399 1400 if (index.IsConstant()) { 1401 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type); 1402 source = HeapOperand(obj, offset); 1403 } else { 1404 Register temp = temps.AcquireSameSizeAs(obj); 1405 Register index_reg = RegisterFrom(index, Primitive::kPrimInt); 1406 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type))); 1407 source = HeapOperand(temp, offset); 1408 } 1409 1410 codegen_->Load(type, OutputCPURegister(instruction), source); 1411 codegen_->MaybeRecordImplicitNullCheck(instruction); 1412} 1413 1414void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) { 1415 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1416 locations->SetInAt(0, Location::RequiresRegister()); 1417 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1418} 1419 1420void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) { 1421 __ Ldr(OutputRegister(instruction), 1422 HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset())); 1423 codegen_->MaybeRecordImplicitNullCheck(instruction); 1424} 1425 1426void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { 1427 Primitive::Type value_type = instruction->GetComponentType(); 1428 bool is_object = value_type == Primitive::kPrimNot; 1429 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1430 instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall); 1431 if (is_object) { 1432 InvokeRuntimeCallingConvention calling_convention; 1433 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 1434 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); 1435 locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); 1436 } else { 1437 locations->SetInAt(0, Location::RequiresRegister()); 1438 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1439 locations->SetInAt(2, Location::RequiresRegister()); 1440 } 1441} 1442 1443void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { 1444 Primitive::Type value_type = instruction->GetComponentType(); 1445 if (value_type == Primitive::kPrimNot) { 1446 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc()); 1447 CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>(); 1448 } else { 1449 LocationSummary* locations = instruction->GetLocations(); 1450 Register obj = InputRegisterAt(instruction, 0); 1451 CPURegister value = InputCPURegisterAt(instruction, 2); 1452 Location index = locations->InAt(1); 1453 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value(); 1454 MemOperand destination = HeapOperand(obj); 1455 UseScratchRegisterScope temps(GetVIXLAssembler()); 1456 1457 if (index.IsConstant()) { 1458 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type); 1459 destination = HeapOperand(obj, offset); 1460 } else { 1461 Register temp = temps.AcquireSameSizeAs(obj); 1462 Register index_reg = InputRegisterAt(instruction, 1); 1463 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type))); 1464 destination = HeapOperand(temp, offset); 1465 } 1466 1467 codegen_->Store(value_type, value, destination); 1468 codegen_->MaybeRecordImplicitNullCheck(instruction); 1469 } 1470} 1471 1472void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1473 LocationSummary* locations = 1474 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1475 locations->SetInAt(0, Location::RequiresRegister()); 1476 locations->SetInAt(1, Location::RequiresRegister()); 1477 if (instruction->HasUses()) { 1478 locations->SetOut(Location::SameAsFirstInput()); 1479 } 1480} 1481 1482void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1483 LocationSummary* locations = instruction->GetLocations(); 1484 BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64( 1485 instruction, locations->InAt(0), locations->InAt(1)); 1486 codegen_->AddSlowPath(slow_path); 1487 1488 __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1)); 1489 __ B(slow_path->GetEntryLabel(), hs); 1490} 1491 1492void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { 1493 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1494 instruction, LocationSummary::kCallOnSlowPath); 1495 locations->SetInAt(0, Location::RequiresRegister()); 1496 locations->SetInAt(1, Location::RequiresRegister()); 1497 locations->AddTemp(Location::RequiresRegister()); 1498} 1499 1500void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { 1501 LocationSummary* locations = instruction->GetLocations(); 1502 Register obj = InputRegisterAt(instruction, 0);; 1503 Register cls = InputRegisterAt(instruction, 1);; 1504 Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0)); 1505 1506 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1507 instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc()); 1508 codegen_->AddSlowPath(slow_path); 1509 1510 // TODO: avoid this check if we know obj is not null. 1511 __ Cbz(obj, slow_path->GetExitLabel()); 1512 // Compare the class of `obj` with `cls`. 1513 __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset())); 1514 __ Cmp(obj_cls, cls); 1515 __ B(ne, slow_path->GetEntryLabel()); 1516 __ Bind(slow_path->GetExitLabel()); 1517} 1518 1519void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { 1520 LocationSummary* locations = 1521 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); 1522 locations->SetInAt(0, Location::RequiresRegister()); 1523 if (check->HasUses()) { 1524 locations->SetOut(Location::SameAsFirstInput()); 1525 } 1526} 1527 1528void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) { 1529 // We assume the class is not null. 1530 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 1531 check->GetLoadClass(), check, check->GetDexPc(), true); 1532 codegen_->AddSlowPath(slow_path); 1533 GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); 1534} 1535 1536void LocationsBuilderARM64::VisitCompare(HCompare* compare) { 1537 LocationSummary* locations = 1538 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); 1539 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1540 switch (in_type) { 1541 case Primitive::kPrimLong: { 1542 locations->SetInAt(0, Location::RequiresRegister()); 1543 locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1))); 1544 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1545 break; 1546 } 1547 case Primitive::kPrimFloat: 1548 case Primitive::kPrimDouble: { 1549 locations->SetInAt(0, Location::RequiresFpuRegister()); 1550 locations->SetInAt(1, Location::RequiresFpuRegister()); 1551 locations->SetOut(Location::RequiresRegister()); 1552 break; 1553 } 1554 default: 1555 LOG(FATAL) << "Unexpected type for compare operation " << in_type; 1556 } 1557} 1558 1559void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { 1560 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1561 1562 // 0 if: left == right 1563 // 1 if: left > right 1564 // -1 if: left < right 1565 switch (in_type) { 1566 case Primitive::kPrimLong: { 1567 Register result = OutputRegister(compare); 1568 Register left = InputRegisterAt(compare, 0); 1569 Operand right = InputOperandAt(compare, 1); 1570 1571 __ Cmp(left, right); 1572 __ Cset(result, ne); 1573 __ Cneg(result, result, lt); 1574 break; 1575 } 1576 case Primitive::kPrimFloat: 1577 case Primitive::kPrimDouble: { 1578 Register result = OutputRegister(compare); 1579 FPRegister left = InputFPRegisterAt(compare, 0); 1580 FPRegister right = InputFPRegisterAt(compare, 1); 1581 1582 __ Fcmp(left, right); 1583 if (compare->IsGtBias()) { 1584 __ Cset(result, ne); 1585 } else { 1586 __ Csetm(result, ne); 1587 } 1588 __ Cneg(result, result, compare->IsGtBias() ? mi : gt); 1589 break; 1590 } 1591 default: 1592 LOG(FATAL) << "Unimplemented compare type " << in_type; 1593 } 1594} 1595 1596void LocationsBuilderARM64::VisitCondition(HCondition* instruction) { 1597 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1598 locations->SetInAt(0, Location::RequiresRegister()); 1599 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1600 if (instruction->NeedsMaterialization()) { 1601 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1602 } 1603} 1604 1605void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) { 1606 if (!instruction->NeedsMaterialization()) { 1607 return; 1608 } 1609 1610 LocationSummary* locations = instruction->GetLocations(); 1611 Register lhs = InputRegisterAt(instruction, 0); 1612 Operand rhs = InputOperandAt(instruction, 1); 1613 Register res = RegisterFrom(locations->Out(), instruction->GetType()); 1614 Condition cond = ARM64Condition(instruction->GetCondition()); 1615 1616 __ Cmp(lhs, rhs); 1617 __ Cset(res, cond); 1618} 1619 1620#define FOR_EACH_CONDITION_INSTRUCTION(M) \ 1621 M(Equal) \ 1622 M(NotEqual) \ 1623 M(LessThan) \ 1624 M(LessThanOrEqual) \ 1625 M(GreaterThan) \ 1626 M(GreaterThanOrEqual) 1627#define DEFINE_CONDITION_VISITORS(Name) \ 1628void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \ 1629void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } 1630FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS) 1631#undef DEFINE_CONDITION_VISITORS 1632#undef FOR_EACH_CONDITION_INSTRUCTION 1633 1634void LocationsBuilderARM64::VisitDiv(HDiv* div) { 1635 LocationSummary* locations = 1636 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); 1637 switch (div->GetResultType()) { 1638 case Primitive::kPrimInt: 1639 case Primitive::kPrimLong: 1640 locations->SetInAt(0, Location::RequiresRegister()); 1641 locations->SetInAt(1, Location::RequiresRegister()); 1642 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1643 break; 1644 1645 case Primitive::kPrimFloat: 1646 case Primitive::kPrimDouble: 1647 locations->SetInAt(0, Location::RequiresFpuRegister()); 1648 locations->SetInAt(1, Location::RequiresFpuRegister()); 1649 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1650 break; 1651 1652 default: 1653 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 1654 } 1655} 1656 1657void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) { 1658 Primitive::Type type = div->GetResultType(); 1659 switch (type) { 1660 case Primitive::kPrimInt: 1661 case Primitive::kPrimLong: 1662 __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1)); 1663 break; 1664 1665 case Primitive::kPrimFloat: 1666 case Primitive::kPrimDouble: 1667 __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1)); 1668 break; 1669 1670 default: 1671 LOG(FATAL) << "Unexpected div type " << type; 1672 } 1673} 1674 1675void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1676 LocationSummary* locations = 1677 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1678 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); 1679 if (instruction->HasUses()) { 1680 locations->SetOut(Location::SameAsFirstInput()); 1681 } 1682} 1683 1684void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1685 SlowPathCodeARM64* slow_path = 1686 new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction); 1687 codegen_->AddSlowPath(slow_path); 1688 Location value = instruction->GetLocations()->InAt(0); 1689 1690 Primitive::Type type = instruction->GetType(); 1691 1692 if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) { 1693 LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck."; 1694 return; 1695 } 1696 1697 if (value.IsConstant()) { 1698 int64_t divisor = Int64ConstantFrom(value); 1699 if (divisor == 0) { 1700 __ B(slow_path->GetEntryLabel()); 1701 } else { 1702 // A division by a non-null constant is valid. We don't need to perform 1703 // any check, so simply fall through. 1704 } 1705 } else { 1706 __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); 1707 } 1708} 1709 1710void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1711 LocationSummary* locations = 1712 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1713 locations->SetOut(Location::ConstantLocation(constant)); 1714} 1715 1716void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1717 UNUSED(constant); 1718 // Will be generated at use site. 1719} 1720 1721void LocationsBuilderARM64::VisitExit(HExit* exit) { 1722 exit->SetLocations(nullptr); 1723} 1724 1725void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) { 1726 UNUSED(exit); 1727 if (kIsDebugBuild) { 1728 down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable"); 1729 __ Brk(__LINE__); // TODO: Introduce special markers for such code locations. 1730 } 1731} 1732 1733void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) { 1734 LocationSummary* locations = 1735 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1736 locations->SetOut(Location::ConstantLocation(constant)); 1737} 1738 1739void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) { 1740 UNUSED(constant); 1741 // Will be generated at use site. 1742} 1743 1744void LocationsBuilderARM64::VisitGoto(HGoto* got) { 1745 got->SetLocations(nullptr); 1746} 1747 1748void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) { 1749 HBasicBlock* successor = got->GetSuccessor(); 1750 DCHECK(!successor->IsExitBlock()); 1751 HBasicBlock* block = got->GetBlock(); 1752 HInstruction* previous = got->GetPrevious(); 1753 HLoopInformation* info = block->GetLoopInformation(); 1754 1755 if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) { 1756 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck()); 1757 GenerateSuspendCheck(info->GetSuspendCheck(), successor); 1758 return; 1759 } 1760 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { 1761 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); 1762 } 1763 if (!codegen_->GoesToNextBlock(block, successor)) { 1764 __ B(codegen_->GetLabelOf(successor)); 1765 } 1766} 1767 1768void LocationsBuilderARM64::VisitIf(HIf* if_instr) { 1769 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); 1770 HInstruction* cond = if_instr->InputAt(0); 1771 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 1772 locations->SetInAt(0, Location::RequiresRegister()); 1773 } 1774} 1775 1776void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { 1777 HInstruction* cond = if_instr->InputAt(0); 1778 HCondition* condition = cond->AsCondition(); 1779 vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor()); 1780 vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); 1781 1782 if (cond->IsIntConstant()) { 1783 int32_t cond_value = cond->AsIntConstant()->GetValue(); 1784 if (cond_value == 1) { 1785 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) { 1786 __ B(true_target); 1787 } 1788 return; 1789 } else { 1790 DCHECK_EQ(cond_value, 0); 1791 } 1792 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) { 1793 // The condition instruction has been materialized, compare the output to 0. 1794 Location cond_val = if_instr->GetLocations()->InAt(0); 1795 DCHECK(cond_val.IsRegister()); 1796 __ Cbnz(InputRegisterAt(if_instr, 0), true_target); 1797 } else { 1798 // The condition instruction has not been materialized, use its inputs as 1799 // the comparison and its condition as the branch condition. 1800 Register lhs = InputRegisterAt(condition, 0); 1801 Operand rhs = InputOperandAt(condition, 1); 1802 Condition arm64_cond = ARM64Condition(condition->GetCondition()); 1803 if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) { 1804 if (arm64_cond == eq) { 1805 __ Cbz(lhs, true_target); 1806 } else { 1807 __ Cbnz(lhs, true_target); 1808 } 1809 } else { 1810 __ Cmp(lhs, rhs); 1811 __ B(arm64_cond, true_target); 1812 } 1813 } 1814 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) { 1815 __ B(false_target); 1816 } 1817} 1818 1819void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1820 LocationSummary* locations = 1821 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1822 locations->SetInAt(0, Location::RequiresRegister()); 1823 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1824} 1825 1826void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1827 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 1828 1829 if (instruction->IsVolatile()) { 1830 if (kUseAcquireRelease) { 1831 // NB: LoadAcquire will record the pc info if needed. 1832 codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field); 1833 } else { 1834 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 1835 codegen_->MaybeRecordImplicitNullCheck(instruction); 1836 // For IRIW sequential consistency kLoadAny is not sufficient. 1837 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 1838 } 1839 } else { 1840 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 1841 codegen_->MaybeRecordImplicitNullCheck(instruction); 1842 } 1843} 1844 1845void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1846 LocationSummary* locations = 1847 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1848 locations->SetInAt(0, Location::RequiresRegister()); 1849 locations->SetInAt(1, Location::RequiresRegister()); 1850} 1851 1852void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1853 Register obj = InputRegisterAt(instruction, 0); 1854 CPURegister value = InputCPURegisterAt(instruction, 1); 1855 Offset offset = instruction->GetFieldOffset(); 1856 Primitive::Type field_type = instruction->GetFieldType(); 1857 1858 if (instruction->IsVolatile()) { 1859 if (kUseAcquireRelease) { 1860 codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset)); 1861 codegen_->MaybeRecordImplicitNullCheck(instruction); 1862 } else { 1863 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 1864 codegen_->Store(field_type, value, HeapOperand(obj, offset)); 1865 codegen_->MaybeRecordImplicitNullCheck(instruction); 1866 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 1867 } 1868 } else { 1869 codegen_->Store(field_type, value, HeapOperand(obj, offset)); 1870 codegen_->MaybeRecordImplicitNullCheck(instruction); 1871 } 1872 1873 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 1874 codegen_->MarkGCCard(obj, Register(value)); 1875 } 1876} 1877 1878void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { 1879 LocationSummary::CallKind call_kind = 1880 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; 1881 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); 1882 locations->SetInAt(0, Location::RequiresRegister()); 1883 locations->SetInAt(1, Location::RequiresRegister()); 1884 // The output does overlap inputs. 1885 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); 1886} 1887 1888void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { 1889 LocationSummary* locations = instruction->GetLocations(); 1890 Register obj = InputRegisterAt(instruction, 0);; 1891 Register cls = InputRegisterAt(instruction, 1);; 1892 Register out = OutputRegister(instruction); 1893 1894 vixl::Label done; 1895 1896 // Return 0 if `obj` is null. 1897 // TODO: Avoid this check if we know `obj` is not null. 1898 __ Mov(out, 0); 1899 __ Cbz(obj, &done); 1900 1901 // Compare the class of `obj` with `cls`. 1902 __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset())); 1903 __ Cmp(out, cls); 1904 if (instruction->IsClassFinal()) { 1905 // Classes must be equal for the instanceof to succeed. 1906 __ Cset(out, eq); 1907 } else { 1908 // If the classes are not equal, we go into a slow path. 1909 DCHECK(locations->OnlyCallsOnSlowPath()); 1910 SlowPathCodeARM64* slow_path = 1911 new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1912 instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); 1913 codegen_->AddSlowPath(slow_path); 1914 __ B(ne, slow_path->GetEntryLabel()); 1915 __ Mov(out, 1); 1916 __ Bind(slow_path->GetExitLabel()); 1917 } 1918 1919 __ Bind(&done); 1920} 1921 1922void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) { 1923 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 1924 locations->SetOut(Location::ConstantLocation(constant)); 1925} 1926 1927void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) { 1928 // Will be generated at use site. 1929 UNUSED(constant); 1930} 1931 1932void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) { 1933 LocationSummary* locations = 1934 new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); 1935 locations->AddTemp(LocationFrom(x0)); 1936 1937 InvokeDexCallingConventionVisitor calling_convention_visitor; 1938 for (size_t i = 0; i < invoke->InputCount(); i++) { 1939 HInstruction* input = invoke->InputAt(i); 1940 locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType())); 1941 } 1942 1943 Primitive::Type return_type = invoke->GetType(); 1944 if (return_type != Primitive::kPrimVoid) { 1945 locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type)); 1946 } 1947} 1948 1949void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1950 HandleInvoke(invoke); 1951} 1952 1953void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1954 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. 1955 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1956 uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + 1957 (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); 1958 Location receiver = invoke->GetLocations()->InAt(0); 1959 Offset class_offset = mirror::Object::ClassOffset(); 1960 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 1961 1962 // The register ip1 is required to be used for the hidden argument in 1963 // art_quick_imt_conflict_trampoline, so prevent VIXL from using it. 1964 UseScratchRegisterScope scratch_scope(GetVIXLAssembler()); 1965 scratch_scope.Exclude(ip1); 1966 __ Mov(ip1, invoke->GetDexMethodIndex()); 1967 1968 // temp = object->GetClass(); 1969 if (receiver.IsStackSlot()) { 1970 __ Ldr(temp, StackOperandFrom(receiver)); 1971 __ Ldr(temp, HeapOperand(temp, class_offset)); 1972 } else { 1973 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 1974 } 1975 codegen_->MaybeRecordImplicitNullCheck(invoke); 1976 // temp = temp->GetImtEntryAt(method_offset); 1977 __ Ldr(temp, HeapOperand(temp, method_offset)); 1978 // lr = temp->GetEntryPoint(); 1979 __ Ldr(lr, HeapOperand(temp, entry_point)); 1980 // lr(); 1981 __ Blr(lr); 1982 DCHECK(!codegen_->IsLeafMethod()); 1983 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1984} 1985 1986void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1987 HandleInvoke(invoke); 1988} 1989 1990void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1991 HandleInvoke(invoke); 1992} 1993 1994void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1995 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1996 // Make sure that ArtMethod* is passed in W0 as per the calling convention 1997 DCHECK(temp.Is(w0)); 1998 size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() + 1999 invoke->GetDexMethodIndex() * kHeapRefSize; 2000 2001 // TODO: Implement all kinds of calls: 2002 // 1) boot -> boot 2003 // 2) app -> boot 2004 // 3) app -> app 2005 // 2006 // Currently we implement the app -> app logic, which looks up in the resolve cache. 2007 2008 // temp = method; 2009 codegen_->LoadCurrentMethod(temp); 2010 // temp = temp->dex_cache_resolved_methods_; 2011 __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset())); 2012 // temp = temp[index_in_cache]; 2013 __ Ldr(temp, HeapOperand(temp, index_in_cache)); 2014 // lr = temp->entry_point_from_quick_compiled_code_; 2015 __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 2016 kArm64WordSize))); 2017 // lr(); 2018 __ Blr(lr); 2019 2020 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 2021 DCHECK(!codegen_->IsLeafMethod()); 2022} 2023 2024void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 2025 LocationSummary* locations = invoke->GetLocations(); 2026 Location receiver = locations->InAt(0); 2027 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 2028 size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + 2029 invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); 2030 Offset class_offset = mirror::Object::ClassOffset(); 2031 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 2032 2033 // temp = object->GetClass(); 2034 if (receiver.IsStackSlot()) { 2035 __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex())); 2036 __ Ldr(temp, HeapOperand(temp, class_offset)); 2037 } else { 2038 DCHECK(receiver.IsRegister()); 2039 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 2040 } 2041 codegen_->MaybeRecordImplicitNullCheck(invoke); 2042 // temp = temp->GetMethodAt(method_offset); 2043 __ Ldr(temp, HeapOperand(temp, method_offset)); 2044 // lr = temp->GetEntryPoint(); 2045 __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue())); 2046 // lr(); 2047 __ Blr(lr); 2048 DCHECK(!codegen_->IsLeafMethod()); 2049 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 2050} 2051 2052void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { 2053 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath 2054 : LocationSummary::kNoCall; 2055 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); 2056 locations->SetOut(Location::RequiresRegister()); 2057} 2058 2059void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { 2060 Register out = OutputRegister(cls); 2061 if (cls->IsReferrersClass()) { 2062 DCHECK(!cls->CanCallRuntime()); 2063 DCHECK(!cls->MustGenerateClinitCheck()); 2064 codegen_->LoadCurrentMethod(out); 2065 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 2066 } else { 2067 DCHECK(cls->CanCallRuntime()); 2068 codegen_->LoadCurrentMethod(out); 2069 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset())); 2070 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); 2071 2072 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 2073 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); 2074 codegen_->AddSlowPath(slow_path); 2075 __ Cbz(out, slow_path->GetEntryLabel()); 2076 if (cls->MustGenerateClinitCheck()) { 2077 GenerateClassInitializationCheck(slow_path, out); 2078 } else { 2079 __ Bind(slow_path->GetExitLabel()); 2080 } 2081 } 2082} 2083 2084void LocationsBuilderARM64::VisitLoadException(HLoadException* load) { 2085 LocationSummary* locations = 2086 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); 2087 locations->SetOut(Location::RequiresRegister()); 2088} 2089 2090void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) { 2091 MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value()); 2092 __ Ldr(OutputRegister(instruction), exception); 2093 __ Str(wzr, exception); 2094} 2095 2096void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) { 2097 load->SetLocations(nullptr); 2098} 2099 2100void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) { 2101 // Nothing to do, this is driven by the code generator. 2102 UNUSED(load); 2103} 2104 2105void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { 2106 LocationSummary* locations = 2107 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath); 2108 locations->SetOut(Location::RequiresRegister()); 2109} 2110 2111void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { 2112 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load); 2113 codegen_->AddSlowPath(slow_path); 2114 2115 Register out = OutputRegister(load); 2116 codegen_->LoadCurrentMethod(out); 2117 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 2118 __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset())); 2119 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); 2120 __ Cbz(out, slow_path->GetEntryLabel()); 2121 __ Bind(slow_path->GetExitLabel()); 2122} 2123 2124void LocationsBuilderARM64::VisitLocal(HLocal* local) { 2125 local->SetLocations(nullptr); 2126} 2127 2128void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) { 2129 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock()); 2130} 2131 2132void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { 2133 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 2134 locations->SetOut(Location::ConstantLocation(constant)); 2135} 2136 2137void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) { 2138 // Will be generated at use site. 2139 UNUSED(constant); 2140} 2141 2142void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 2143 LocationSummary* locations = 2144 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2145 InvokeRuntimeCallingConvention calling_convention; 2146 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 2147} 2148 2149void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 2150 codegen_->InvokeRuntime(instruction->IsEnter() 2151 ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject), 2152 instruction, 2153 instruction->GetDexPc()); 2154 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>(); 2155} 2156 2157void LocationsBuilderARM64::VisitMul(HMul* mul) { 2158 LocationSummary* locations = 2159 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); 2160 switch (mul->GetResultType()) { 2161 case Primitive::kPrimInt: 2162 case Primitive::kPrimLong: 2163 locations->SetInAt(0, Location::RequiresRegister()); 2164 locations->SetInAt(1, Location::RequiresRegister()); 2165 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2166 break; 2167 2168 case Primitive::kPrimFloat: 2169 case Primitive::kPrimDouble: 2170 locations->SetInAt(0, Location::RequiresFpuRegister()); 2171 locations->SetInAt(1, Location::RequiresFpuRegister()); 2172 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2173 break; 2174 2175 default: 2176 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2177 } 2178} 2179 2180void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) { 2181 switch (mul->GetResultType()) { 2182 case Primitive::kPrimInt: 2183 case Primitive::kPrimLong: 2184 __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1)); 2185 break; 2186 2187 case Primitive::kPrimFloat: 2188 case Primitive::kPrimDouble: 2189 __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1)); 2190 break; 2191 2192 default: 2193 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2194 } 2195} 2196 2197void LocationsBuilderARM64::VisitNeg(HNeg* neg) { 2198 LocationSummary* locations = 2199 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); 2200 switch (neg->GetResultType()) { 2201 case Primitive::kPrimInt: 2202 case Primitive::kPrimLong: 2203 locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0))); 2204 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2205 break; 2206 2207 case Primitive::kPrimFloat: 2208 case Primitive::kPrimDouble: 2209 locations->SetInAt(0, Location::RequiresFpuRegister()); 2210 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2211 break; 2212 2213 default: 2214 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2215 } 2216} 2217 2218void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) { 2219 switch (neg->GetResultType()) { 2220 case Primitive::kPrimInt: 2221 case Primitive::kPrimLong: 2222 __ Neg(OutputRegister(neg), InputOperandAt(neg, 0)); 2223 break; 2224 2225 case Primitive::kPrimFloat: 2226 case Primitive::kPrimDouble: 2227 __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0)); 2228 break; 2229 2230 default: 2231 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2232 } 2233} 2234 2235void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) { 2236 LocationSummary* locations = 2237 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2238 InvokeRuntimeCallingConvention calling_convention; 2239 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2240 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2))); 2241 locations->SetOut(LocationFrom(x0)); 2242 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); 2243 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2244 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2245} 2246 2247void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { 2248 LocationSummary* locations = instruction->GetLocations(); 2249 InvokeRuntimeCallingConvention calling_convention; 2250 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2251 DCHECK(type_index.Is(w0)); 2252 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2253 DCHECK(current_method.Is(w2)); 2254 codegen_->LoadCurrentMethod(current_method); 2255 __ Mov(type_index, instruction->GetTypeIndex()); 2256 codegen_->InvokeRuntime( 2257 QUICK_ENTRY_POINT(pAllocArrayWithAccessCheck), instruction, instruction->GetDexPc()); 2258 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2259 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2260} 2261 2262void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { 2263 LocationSummary* locations = 2264 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2265 InvokeRuntimeCallingConvention calling_convention; 2266 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2267 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1))); 2268 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); 2269 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2270} 2271 2272void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { 2273 LocationSummary* locations = instruction->GetLocations(); 2274 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2275 DCHECK(type_index.Is(w0)); 2276 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2277 DCHECK(current_method.Is(w1)); 2278 codegen_->LoadCurrentMethod(current_method); 2279 __ Mov(type_index, instruction->GetTypeIndex()); 2280 codegen_->InvokeRuntime( 2281 QUICK_ENTRY_POINT(pAllocObjectWithAccessCheck), instruction, instruction->GetDexPc()); 2282 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2283} 2284 2285void LocationsBuilderARM64::VisitNot(HNot* instruction) { 2286 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2287 locations->SetInAt(0, Location::RequiresRegister()); 2288 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2289} 2290 2291void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) { 2292 switch (instruction->InputAt(0)->GetType()) { 2293 case Primitive::kPrimInt: 2294 case Primitive::kPrimLong: 2295 __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0)); 2296 break; 2297 2298 default: 2299 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType(); 2300 } 2301} 2302 2303void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) { 2304 LocationSummary* locations = 2305 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2306 locations->SetInAt(0, Location::RequiresRegister()); 2307 if (instruction->HasUses()) { 2308 locations->SetOut(Location::SameAsFirstInput()); 2309 } 2310} 2311 2312void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { 2313 if (codegen_->CanMoveNullCheckToUser(instruction)) { 2314 return; 2315 } 2316 Location obj = instruction->GetLocations()->InAt(0); 2317 2318 __ Ldr(wzr, HeapOperandFrom(obj, Offset(0))); 2319 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 2320} 2321 2322void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) { 2323 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction); 2324 codegen_->AddSlowPath(slow_path); 2325 2326 LocationSummary* locations = instruction->GetLocations(); 2327 Location obj = locations->InAt(0); 2328 2329 __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel()); 2330} 2331 2332void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) { 2333 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) { 2334 GenerateImplicitNullCheck(instruction); 2335 } else { 2336 GenerateExplicitNullCheck(instruction); 2337 } 2338} 2339 2340void LocationsBuilderARM64::VisitOr(HOr* instruction) { 2341 HandleBinaryOp(instruction); 2342} 2343 2344void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) { 2345 HandleBinaryOp(instruction); 2346} 2347 2348void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { 2349 LOG(FATAL) << "Unreachable"; 2350} 2351 2352void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) { 2353 codegen_->GetMoveResolver()->EmitNativeCode(instruction); 2354} 2355 2356void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) { 2357 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2358 Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); 2359 if (location.IsStackSlot()) { 2360 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2361 } else if (location.IsDoubleStackSlot()) { 2362 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2363 } 2364 locations->SetOut(location); 2365} 2366 2367void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) { 2368 // Nothing to do, the parameter is already at its location. 2369 UNUSED(instruction); 2370} 2371 2372void LocationsBuilderARM64::VisitPhi(HPhi* instruction) { 2373 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2374 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 2375 locations->SetInAt(i, Location::Any()); 2376 } 2377 locations->SetOut(Location::Any()); 2378} 2379 2380void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) { 2381 UNUSED(instruction); 2382 LOG(FATAL) << "Unreachable"; 2383} 2384 2385void LocationsBuilderARM64::VisitRem(HRem* rem) { 2386 Primitive::Type type = rem->GetResultType(); 2387 LocationSummary::CallKind call_kind = IsFPType(type) ? LocationSummary::kCall 2388 : LocationSummary::kNoCall; 2389 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); 2390 2391 switch (type) { 2392 case Primitive::kPrimInt: 2393 case Primitive::kPrimLong: 2394 locations->SetInAt(0, Location::RequiresRegister()); 2395 locations->SetInAt(1, Location::RequiresRegister()); 2396 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2397 break; 2398 2399 case Primitive::kPrimFloat: 2400 case Primitive::kPrimDouble: { 2401 InvokeRuntimeCallingConvention calling_convention; 2402 locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); 2403 locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1))); 2404 locations->SetOut(calling_convention.GetReturnLocation(type)); 2405 2406 break; 2407 } 2408 2409 default: 2410 LOG(FATAL) << "Unexpected rem type " << type; 2411 } 2412} 2413 2414void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) { 2415 Primitive::Type type = rem->GetResultType(); 2416 2417 switch (type) { 2418 case Primitive::kPrimInt: 2419 case Primitive::kPrimLong: { 2420 UseScratchRegisterScope temps(GetVIXLAssembler()); 2421 Register dividend = InputRegisterAt(rem, 0); 2422 Register divisor = InputRegisterAt(rem, 1); 2423 Register output = OutputRegister(rem); 2424 Register temp = temps.AcquireSameSizeAs(output); 2425 2426 __ Sdiv(temp, dividend, divisor); 2427 __ Msub(output, temp, divisor, dividend); 2428 break; 2429 } 2430 2431 case Primitive::kPrimFloat: 2432 case Primitive::kPrimDouble: { 2433 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf) 2434 : QUICK_ENTRY_POINT(pFmod); 2435 codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc()); 2436 break; 2437 } 2438 2439 default: 2440 LOG(FATAL) << "Unexpected rem type " << type; 2441 } 2442} 2443 2444void LocationsBuilderARM64::VisitReturn(HReturn* instruction) { 2445 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2446 Primitive::Type return_type = instruction->InputAt(0)->GetType(); 2447 locations->SetInAt(0, ARM64ReturnLocation(return_type)); 2448} 2449 2450void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) { 2451 UNUSED(instruction); 2452 codegen_->GenerateFrameExit(); 2453 __ Ret(); 2454} 2455 2456void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) { 2457 instruction->SetLocations(nullptr); 2458} 2459 2460void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) { 2461 UNUSED(instruction); 2462 codegen_->GenerateFrameExit(); 2463 __ Ret(); 2464} 2465 2466void LocationsBuilderARM64::VisitShl(HShl* shl) { 2467 HandleShift(shl); 2468} 2469 2470void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) { 2471 HandleShift(shl); 2472} 2473 2474void LocationsBuilderARM64::VisitShr(HShr* shr) { 2475 HandleShift(shr); 2476} 2477 2478void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) { 2479 HandleShift(shr); 2480} 2481 2482void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) { 2483 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store); 2484 Primitive::Type field_type = store->InputAt(1)->GetType(); 2485 switch (field_type) { 2486 case Primitive::kPrimNot: 2487 case Primitive::kPrimBoolean: 2488 case Primitive::kPrimByte: 2489 case Primitive::kPrimChar: 2490 case Primitive::kPrimShort: 2491 case Primitive::kPrimInt: 2492 case Primitive::kPrimFloat: 2493 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2494 break; 2495 2496 case Primitive::kPrimLong: 2497 case Primitive::kPrimDouble: 2498 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2499 break; 2500 2501 default: 2502 LOG(FATAL) << "Unimplemented local type " << field_type; 2503 } 2504} 2505 2506void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) { 2507 UNUSED(store); 2508} 2509 2510void LocationsBuilderARM64::VisitSub(HSub* instruction) { 2511 HandleBinaryOp(instruction); 2512} 2513 2514void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) { 2515 HandleBinaryOp(instruction); 2516} 2517 2518void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2519 LocationSummary* locations = 2520 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2521 locations->SetInAt(0, Location::RequiresRegister()); 2522 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2523} 2524 2525void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2526 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 2527 2528 if (instruction->IsVolatile()) { 2529 if (kUseAcquireRelease) { 2530 // NB: LoadAcquire will record the pc info if needed. 2531 codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field); 2532 } else { 2533 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 2534 // For IRIW sequential consistency kLoadAny is not sufficient. 2535 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 2536 } 2537 } else { 2538 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 2539 } 2540} 2541 2542void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2543 LocationSummary* locations = 2544 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2545 locations->SetInAt(0, Location::RequiresRegister()); 2546 locations->SetInAt(1, Location::RequiresRegister()); 2547} 2548 2549void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2550 Register cls = InputRegisterAt(instruction, 0); 2551 CPURegister value = InputCPURegisterAt(instruction, 1); 2552 Offset offset = instruction->GetFieldOffset(); 2553 Primitive::Type field_type = instruction->GetFieldType(); 2554 2555 if (instruction->IsVolatile()) { 2556 if (kUseAcquireRelease) { 2557 codegen_->StoreRelease(field_type, value, HeapOperand(cls, offset)); 2558 } else { 2559 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 2560 codegen_->Store(field_type, value, HeapOperand(cls, offset)); 2561 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 2562 } 2563 } else { 2564 codegen_->Store(field_type, value, HeapOperand(cls, offset)); 2565 } 2566 2567 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 2568 codegen_->MarkGCCard(cls, Register(value)); 2569 } 2570} 2571 2572void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2573 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); 2574} 2575 2576void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2577 HBasicBlock* block = instruction->GetBlock(); 2578 if (block->GetLoopInformation() != nullptr) { 2579 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); 2580 // The back edge will generate the suspend check. 2581 return; 2582 } 2583 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { 2584 // The goto will generate the suspend check. 2585 return; 2586 } 2587 GenerateSuspendCheck(instruction, nullptr); 2588} 2589 2590void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) { 2591 temp->SetLocations(nullptr); 2592} 2593 2594void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) { 2595 // Nothing to do, this is driven by the code generator. 2596 UNUSED(temp); 2597} 2598 2599void LocationsBuilderARM64::VisitThrow(HThrow* instruction) { 2600 LocationSummary* locations = 2601 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2602 InvokeRuntimeCallingConvention calling_convention; 2603 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 2604} 2605 2606void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) { 2607 codegen_->InvokeRuntime( 2608 QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc()); 2609 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>(); 2610} 2611 2612void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) { 2613 LocationSummary* locations = 2614 new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); 2615 Primitive::Type input_type = conversion->GetInputType(); 2616 Primitive::Type result_type = conversion->GetResultType(); 2617 DCHECK_NE(input_type, result_type); 2618 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) || 2619 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) { 2620 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; 2621 } 2622 2623 if (IsFPType(input_type)) { 2624 locations->SetInAt(0, Location::RequiresFpuRegister()); 2625 } else { 2626 locations->SetInAt(0, Location::RequiresRegister()); 2627 } 2628 2629 if (IsFPType(result_type)) { 2630 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2631 } else { 2632 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2633 } 2634} 2635 2636void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) { 2637 Primitive::Type result_type = conversion->GetResultType(); 2638 Primitive::Type input_type = conversion->GetInputType(); 2639 2640 DCHECK_NE(input_type, result_type); 2641 2642 if (IsIntegralType(result_type) && IsIntegralType(input_type)) { 2643 int result_size = Primitive::ComponentSize(result_type); 2644 int input_size = Primitive::ComponentSize(input_type); 2645 int min_size = std::min(result_size, input_size); 2646 Register output = OutputRegister(conversion); 2647 Register source = InputRegisterAt(conversion, 0); 2648 if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) { 2649 __ Ubfx(output, source, 0, result_size * kBitsPerByte); 2650 } else if ((result_type == Primitive::kPrimChar) || 2651 ((input_type == Primitive::kPrimChar) && (result_size > input_size))) { 2652 __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2653 } else { 2654 __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2655 } 2656 } else if (IsFPType(result_type) && IsIntegralType(input_type)) { 2657 __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0)); 2658 } else if (IsIntegralType(result_type) && IsFPType(input_type)) { 2659 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong); 2660 __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0)); 2661 } else if (IsFPType(result_type) && IsFPType(input_type)) { 2662 __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0)); 2663 } else { 2664 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type 2665 << " to " << result_type; 2666 } 2667} 2668 2669void LocationsBuilderARM64::VisitUShr(HUShr* ushr) { 2670 HandleShift(ushr); 2671} 2672 2673void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) { 2674 HandleShift(ushr); 2675} 2676 2677void LocationsBuilderARM64::VisitXor(HXor* instruction) { 2678 HandleBinaryOp(instruction); 2679} 2680 2681void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) { 2682 HandleBinaryOp(instruction); 2683} 2684 2685#undef __ 2686#undef QUICK_ENTRY_POINT 2687 2688} // namespace arm64 2689} // namespace art 2690