code_generator_arm64.cc revision 46e2a3915aa68c77426b71e95b9f3658250646b7
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator_arm64.h" 18 19#include "arch/arm64/instruction_set_features_arm64.h" 20#include "common_arm64.h" 21#include "entrypoints/quick/quick_entrypoints.h" 22#include "entrypoints/quick/quick_entrypoints_enum.h" 23#include "gc/accounting/card_table.h" 24#include "intrinsics.h" 25#include "intrinsics_arm64.h" 26#include "mirror/array-inl.h" 27#include "mirror/art_method.h" 28#include "mirror/class.h" 29#include "offsets.h" 30#include "thread.h" 31#include "utils/arm64/assembler_arm64.h" 32#include "utils/assembler.h" 33#include "utils/stack_checks.h" 34 35 36using namespace vixl; // NOLINT(build/namespaces) 37 38#ifdef __ 39#error "ARM64 Codegen VIXL macro-assembler macro already defined." 40#endif 41 42namespace art { 43 44namespace arm64 { 45 46using helpers::CPURegisterFrom; 47using helpers::DRegisterFrom; 48using helpers::FPRegisterFrom; 49using helpers::HeapOperand; 50using helpers::HeapOperandFrom; 51using helpers::InputCPURegisterAt; 52using helpers::InputFPRegisterAt; 53using helpers::InputRegisterAt; 54using helpers::InputOperandAt; 55using helpers::Int64ConstantFrom; 56using helpers::LocationFrom; 57using helpers::OperandFromMemOperand; 58using helpers::OutputCPURegister; 59using helpers::OutputFPRegister; 60using helpers::OutputRegister; 61using helpers::RegisterFrom; 62using helpers::StackOperandFrom; 63using helpers::VIXLRegCodeFromART; 64using helpers::WRegisterFrom; 65using helpers::XRegisterFrom; 66using helpers::ARM64EncodableConstantOrRegister; 67 68static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>); 69static constexpr int kCurrentMethodStackOffset = 0; 70 71inline Condition ARM64Condition(IfCondition cond) { 72 switch (cond) { 73 case kCondEQ: return eq; 74 case kCondNE: return ne; 75 case kCondLT: return lt; 76 case kCondLE: return le; 77 case kCondGT: return gt; 78 case kCondGE: return ge; 79 default: 80 LOG(FATAL) << "Unknown if condition"; 81 } 82 return nv; // Unreachable. 83} 84 85Location ARM64ReturnLocation(Primitive::Type return_type) { 86 DCHECK_NE(return_type, Primitive::kPrimVoid); 87 // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the 88 // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`, 89 // but we use the exact registers for clarity. 90 if (return_type == Primitive::kPrimFloat) { 91 return LocationFrom(s0); 92 } else if (return_type == Primitive::kPrimDouble) { 93 return LocationFrom(d0); 94 } else if (return_type == Primitive::kPrimLong) { 95 return LocationFrom(x0); 96 } else { 97 return LocationFrom(w0); 98 } 99} 100 101static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 }; 102static constexpr size_t kRuntimeParameterCoreRegistersLength = 103 arraysize(kRuntimeParameterCoreRegisters); 104static const FPRegister kRuntimeParameterFpuRegisters[] = { d0, d1, d2, d3, d4, d5, d6, d7 }; 105static constexpr size_t kRuntimeParameterFpuRegistersLength = 106 arraysize(kRuntimeParameterCoreRegisters); 107 108class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> { 109 public: 110 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); 111 112 InvokeRuntimeCallingConvention() 113 : CallingConvention(kRuntimeParameterCoreRegisters, 114 kRuntimeParameterCoreRegistersLength, 115 kRuntimeParameterFpuRegisters, 116 kRuntimeParameterFpuRegistersLength) {} 117 118 Location GetReturnLocation(Primitive::Type return_type); 119 120 private: 121 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 122}; 123 124Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) { 125 return ARM64ReturnLocation(return_type); 126} 127 128#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> 129#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value() 130 131class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { 132 public: 133 BoundsCheckSlowPathARM64(HBoundsCheck* instruction, 134 Location index_location, 135 Location length_location) 136 : instruction_(instruction), 137 index_location_(index_location), 138 length_location_(length_location) {} 139 140 141 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 142 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 143 __ Bind(GetEntryLabel()); 144 // We're moving two locations to locations that could overlap, so we need a parallel 145 // move resolver. 146 InvokeRuntimeCallingConvention calling_convention; 147 codegen->EmitParallelMoves( 148 index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), 149 length_location_, LocationFrom(calling_convention.GetRegisterAt(1))); 150 arm64_codegen->InvokeRuntime( 151 QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this); 152 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); 153 } 154 155 private: 156 HBoundsCheck* const instruction_; 157 const Location index_location_; 158 const Location length_location_; 159 160 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64); 161}; 162 163class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { 164 public: 165 explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {} 166 167 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 168 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 169 __ Bind(GetEntryLabel()); 170 arm64_codegen->InvokeRuntime( 171 QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this); 172 CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); 173 } 174 175 private: 176 HDivZeroCheck* const instruction_; 177 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64); 178}; 179 180class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { 181 public: 182 LoadClassSlowPathARM64(HLoadClass* cls, 183 HInstruction* at, 184 uint32_t dex_pc, 185 bool do_clinit) 186 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { 187 DCHECK(at->IsLoadClass() || at->IsClinitCheck()); 188 } 189 190 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 191 LocationSummary* locations = at_->GetLocations(); 192 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 193 194 __ Bind(GetEntryLabel()); 195 SaveLiveRegisters(codegen, locations); 196 197 InvokeRuntimeCallingConvention calling_convention; 198 __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex()); 199 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 200 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) 201 : QUICK_ENTRY_POINT(pInitializeType); 202 arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this); 203 if (do_clinit_) { 204 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*>(); 205 } else { 206 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t, mirror::ArtMethod*>(); 207 } 208 209 // Move the class to the desired location. 210 Location out = locations->Out(); 211 if (out.IsValid()) { 212 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); 213 Primitive::Type type = at_->GetType(); 214 arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type); 215 } 216 217 RestoreLiveRegisters(codegen, locations); 218 __ B(GetExitLabel()); 219 } 220 221 private: 222 // The class this slow path will load. 223 HLoadClass* const cls_; 224 225 // The instruction where this slow path is happening. 226 // (Might be the load class or an initialization check). 227 HInstruction* const at_; 228 229 // The dex PC of `at_`. 230 const uint32_t dex_pc_; 231 232 // Whether to initialize the class. 233 const bool do_clinit_; 234 235 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64); 236}; 237 238class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { 239 public: 240 explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {} 241 242 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 243 LocationSummary* locations = instruction_->GetLocations(); 244 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 245 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 246 247 __ Bind(GetEntryLabel()); 248 SaveLiveRegisters(codegen, locations); 249 250 InvokeRuntimeCallingConvention calling_convention; 251 arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); 252 __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex()); 253 arm64_codegen->InvokeRuntime( 254 QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this); 255 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t, mirror::ArtMethod*>(); 256 Primitive::Type type = instruction_->GetType(); 257 arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type); 258 259 RestoreLiveRegisters(codegen, locations); 260 __ B(GetExitLabel()); 261 } 262 263 private: 264 HLoadString* const instruction_; 265 266 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64); 267}; 268 269class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { 270 public: 271 explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {} 272 273 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 274 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 275 __ Bind(GetEntryLabel()); 276 arm64_codegen->InvokeRuntime( 277 QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this); 278 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); 279 } 280 281 private: 282 HNullCheck* const instruction_; 283 284 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64); 285}; 286 287class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { 288 public: 289 explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction, 290 HBasicBlock* successor) 291 : instruction_(instruction), successor_(successor) {} 292 293 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 294 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 295 __ Bind(GetEntryLabel()); 296 SaveLiveRegisters(codegen, instruction_->GetLocations()); 297 arm64_codegen->InvokeRuntime( 298 QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this); 299 CheckEntrypointTypes<kQuickTestSuspend, void, void>(); 300 RestoreLiveRegisters(codegen, instruction_->GetLocations()); 301 if (successor_ == nullptr) { 302 __ B(GetReturnLabel()); 303 } else { 304 __ B(arm64_codegen->GetLabelOf(successor_)); 305 } 306 } 307 308 vixl::Label* GetReturnLabel() { 309 DCHECK(successor_ == nullptr); 310 return &return_label_; 311 } 312 313 private: 314 HSuspendCheck* const instruction_; 315 // If not null, the block to branch to after the suspend check. 316 HBasicBlock* const successor_; 317 318 // If `successor_` is null, the label to branch to after the suspend check. 319 vixl::Label return_label_; 320 321 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64); 322}; 323 324class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { 325 public: 326 TypeCheckSlowPathARM64(HInstruction* instruction, 327 Location class_to_check, 328 Location object_class, 329 uint32_t dex_pc) 330 : instruction_(instruction), 331 class_to_check_(class_to_check), 332 object_class_(object_class), 333 dex_pc_(dex_pc) {} 334 335 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 336 LocationSummary* locations = instruction_->GetLocations(); 337 DCHECK(instruction_->IsCheckCast() 338 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 339 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); 340 341 __ Bind(GetEntryLabel()); 342 SaveLiveRegisters(codegen, locations); 343 344 // We're moving two locations to locations that could overlap, so we need a parallel 345 // move resolver. 346 InvokeRuntimeCallingConvention calling_convention; 347 codegen->EmitParallelMoves( 348 class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), 349 object_class_, LocationFrom(calling_convention.GetRegisterAt(1))); 350 351 if (instruction_->IsInstanceOf()) { 352 arm64_codegen->InvokeRuntime( 353 QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this); 354 Primitive::Type ret_type = instruction_->GetType(); 355 Location ret_loc = calling_convention.GetReturnLocation(ret_type); 356 arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); 357 CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t, 358 const mirror::Class*, const mirror::Class*>(); 359 } else { 360 DCHECK(instruction_->IsCheckCast()); 361 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this); 362 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); 363 } 364 365 RestoreLiveRegisters(codegen, locations); 366 __ B(GetExitLabel()); 367 } 368 369 private: 370 HInstruction* const instruction_; 371 const Location class_to_check_; 372 const Location object_class_; 373 uint32_t dex_pc_; 374 375 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64); 376}; 377 378#undef __ 379 380Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) { 381 Location next_location; 382 if (type == Primitive::kPrimVoid) { 383 LOG(FATAL) << "Unreachable type " << type; 384 } 385 386 if (Primitive::IsFloatingPointType(type) && 387 (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) { 388 next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++)); 389 } else if (!Primitive::IsFloatingPointType(type) && 390 (gp_index_ < calling_convention.GetNumberOfRegisters())) { 391 next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++)); 392 } else { 393 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_); 394 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset) 395 : Location::StackSlot(stack_offset); 396 } 397 398 // Space on the stack is reserved for all arguments. 399 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1; 400 return next_location; 401} 402 403CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, 404 const Arm64InstructionSetFeatures& isa_features, 405 const CompilerOptions& compiler_options) 406 : CodeGenerator(graph, 407 kNumberOfAllocatableRegisters, 408 kNumberOfAllocatableFPRegisters, 409 kNumberOfAllocatableRegisterPairs, 410 callee_saved_core_registers.list(), 411 callee_saved_fp_registers.list(), 412 compiler_options), 413 block_labels_(nullptr), 414 location_builder_(graph, this), 415 instruction_visitor_(graph, this), 416 move_resolver_(graph->GetArena(), this), 417 isa_features_(isa_features) { 418 // Save the link register (containing the return address) to mimic Quick. 419 AddAllocatedRegister(LocationFrom(lr)); 420} 421 422#undef __ 423#define __ GetVIXLAssembler()-> 424 425void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { 426 // Ensure we emit the literal pool. 427 __ FinalizeCode(); 428 CodeGenerator::Finalize(allocator); 429} 430 431void ParallelMoveResolverARM64::EmitMove(size_t index) { 432 MoveOperands* move = moves_.Get(index); 433 codegen_->MoveLocation(move->GetDestination(), move->GetSource()); 434} 435 436void ParallelMoveResolverARM64::EmitSwap(size_t index) { 437 MoveOperands* move = moves_.Get(index); 438 codegen_->SwapLocations(move->GetDestination(), move->GetSource()); 439} 440 441void ParallelMoveResolverARM64::RestoreScratch(int reg) { 442 __ Pop(Register(VIXLRegCodeFromART(reg), kXRegSize)); 443} 444 445void ParallelMoveResolverARM64::SpillScratch(int reg) { 446 __ Push(Register(VIXLRegCodeFromART(reg), kXRegSize)); 447} 448 449void CodeGeneratorARM64::GenerateFrameEntry() { 450 __ Bind(&frame_entry_label_); 451 452 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod(); 453 if (do_overflow_check) { 454 UseScratchRegisterScope temps(GetVIXLAssembler()); 455 Register temp = temps.AcquireX(); 456 DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks()); 457 __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64))); 458 __ Ldr(wzr, MemOperand(temp, 0)); 459 RecordPcInfo(nullptr, 0); 460 } 461 462 if (!HasEmptyFrame()) { 463 int frame_size = GetFrameSize(); 464 // Stack layout: 465 // sp[frame_size - 8] : lr. 466 // ... : other preserved core registers. 467 // ... : other preserved fp registers. 468 // ... : reserved frame space. 469 // sp[0] : current method. 470 __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex)); 471 __ PokeCPURegList(GetFramePreservedCoreRegisters(), frame_size - GetCoreSpillSize()); 472 __ PokeCPURegList(GetFramePreservedFPRegisters(), frame_size - FrameEntrySpillSize()); 473 } 474} 475 476void CodeGeneratorARM64::GenerateFrameExit() { 477 if (!HasEmptyFrame()) { 478 int frame_size = GetFrameSize(); 479 __ PeekCPURegList(GetFramePreservedFPRegisters(), frame_size - FrameEntrySpillSize()); 480 __ PeekCPURegList(GetFramePreservedCoreRegisters(), frame_size - GetCoreSpillSize()); 481 __ Drop(frame_size); 482 } 483} 484 485void CodeGeneratorARM64::Bind(HBasicBlock* block) { 486 __ Bind(GetLabelOf(block)); 487} 488 489void CodeGeneratorARM64::Move(HInstruction* instruction, 490 Location location, 491 HInstruction* move_for) { 492 LocationSummary* locations = instruction->GetLocations(); 493 if (locations != nullptr && locations->Out().Equals(location)) { 494 return; 495 } 496 497 Primitive::Type type = instruction->GetType(); 498 DCHECK_NE(type, Primitive::kPrimVoid); 499 500 if (instruction->IsIntConstant() 501 || instruction->IsLongConstant() 502 || instruction->IsNullConstant()) { 503 int64_t value = GetInt64ValueOf(instruction->AsConstant()); 504 if (location.IsRegister()) { 505 Register dst = RegisterFrom(location, type); 506 DCHECK(((instruction->IsIntConstant() || instruction->IsNullConstant()) && dst.Is32Bits()) || 507 (instruction->IsLongConstant() && dst.Is64Bits())); 508 __ Mov(dst, value); 509 } else { 510 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot()); 511 UseScratchRegisterScope temps(GetVIXLAssembler()); 512 Register temp = (instruction->IsIntConstant() || instruction->IsNullConstant()) 513 ? temps.AcquireW() 514 : temps.AcquireX(); 515 __ Mov(temp, value); 516 __ Str(temp, StackOperandFrom(location)); 517 } 518 } else if (instruction->IsTemporary()) { 519 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 520 MoveLocation(location, temp_location, type); 521 } else if (instruction->IsLoadLocal()) { 522 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal()); 523 if (Primitive::Is64BitType(type)) { 524 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type); 525 } else { 526 MoveLocation(location, Location::StackSlot(stack_slot), type); 527 } 528 529 } else { 530 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary()); 531 MoveLocation(location, locations->Out(), type); 532 } 533} 534 535Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const { 536 Primitive::Type type = load->GetType(); 537 538 switch (type) { 539 case Primitive::kPrimNot: 540 case Primitive::kPrimInt: 541 case Primitive::kPrimFloat: 542 return Location::StackSlot(GetStackSlot(load->GetLocal())); 543 544 case Primitive::kPrimLong: 545 case Primitive::kPrimDouble: 546 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 547 548 case Primitive::kPrimBoolean: 549 case Primitive::kPrimByte: 550 case Primitive::kPrimChar: 551 case Primitive::kPrimShort: 552 case Primitive::kPrimVoid: 553 LOG(FATAL) << "Unexpected type " << type; 554 } 555 556 LOG(FATAL) << "Unreachable"; 557 return Location::NoLocation(); 558} 559 560void CodeGeneratorARM64::MarkGCCard(Register object, Register value) { 561 UseScratchRegisterScope temps(GetVIXLAssembler()); 562 Register card = temps.AcquireX(); 563 Register temp = temps.AcquireW(); // Index within the CardTable - 32bit. 564 vixl::Label done; 565 __ Cbz(value, &done); 566 __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value())); 567 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); 568 __ Strb(card, MemOperand(card, temp.X())); 569 __ Bind(&done); 570} 571 572void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const { 573 // Blocked core registers: 574 // lr : Runtime reserved. 575 // tr : Runtime reserved. 576 // xSuspend : Runtime reserved. TODO: Unblock this when the runtime stops using it. 577 // ip1 : VIXL core temp. 578 // ip0 : VIXL core temp. 579 // 580 // Blocked fp registers: 581 // d31 : VIXL fp temp. 582 CPURegList reserved_core_registers = vixl_reserved_core_registers; 583 reserved_core_registers.Combine(runtime_reserved_core_registers); 584 while (!reserved_core_registers.IsEmpty()) { 585 blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true; 586 } 587 588 CPURegList reserved_fp_registers = vixl_reserved_fp_registers; 589 while (!reserved_fp_registers.IsEmpty()) { 590 blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true; 591 } 592 593 if (is_baseline) { 594 CPURegList reserved_core_baseline_registers = callee_saved_core_registers; 595 while (!reserved_core_baseline_registers.IsEmpty()) { 596 blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true; 597 } 598 599 CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers; 600 while (!reserved_fp_baseline_registers.IsEmpty()) { 601 blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true; 602 } 603 } 604} 605 606Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const { 607 if (type == Primitive::kPrimVoid) { 608 LOG(FATAL) << "Unreachable type " << type; 609 } 610 611 if (Primitive::IsFloatingPointType(type)) { 612 ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters); 613 DCHECK_NE(reg, -1); 614 return Location::FpuRegisterLocation(reg); 615 } else { 616 ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters); 617 DCHECK_NE(reg, -1); 618 return Location::RegisterLocation(reg); 619 } 620} 621 622size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { 623 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 624 __ Str(reg, MemOperand(sp, stack_index)); 625 return kArm64WordSize; 626} 627 628size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { 629 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize); 630 __ Ldr(reg, MemOperand(sp, stack_index)); 631 return kArm64WordSize; 632} 633 634size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 635 FPRegister reg = FPRegister(reg_id, kDRegSize); 636 __ Str(reg, MemOperand(sp, stack_index)); 637 return kArm64WordSize; 638} 639 640size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) { 641 FPRegister reg = FPRegister(reg_id, kDRegSize); 642 __ Ldr(reg, MemOperand(sp, stack_index)); 643 return kArm64WordSize; 644} 645 646void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const { 647 stream << Arm64ManagedRegister::FromXRegister(XRegister(reg)); 648} 649 650void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const { 651 stream << Arm64ManagedRegister::FromDRegister(DRegister(reg)); 652} 653 654void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) { 655 if (constant->IsIntConstant()) { 656 __ Mov(Register(destination), constant->AsIntConstant()->GetValue()); 657 } else if (constant->IsLongConstant()) { 658 __ Mov(Register(destination), constant->AsLongConstant()->GetValue()); 659 } else if (constant->IsNullConstant()) { 660 __ Mov(Register(destination), 0); 661 } else if (constant->IsFloatConstant()) { 662 __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue()); 663 } else { 664 DCHECK(constant->IsDoubleConstant()); 665 __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue()); 666 } 667} 668 669 670static bool CoherentConstantAndType(Location constant, Primitive::Type type) { 671 DCHECK(constant.IsConstant()); 672 HConstant* cst = constant.GetConstant(); 673 return (cst->IsIntConstant() && type == Primitive::kPrimInt) || 674 // Null is mapped to a core W register, which we associate with kPrimInt. 675 (cst->IsNullConstant() && type == Primitive::kPrimInt) || 676 (cst->IsLongConstant() && type == Primitive::kPrimLong) || 677 (cst->IsFloatConstant() && type == Primitive::kPrimFloat) || 678 (cst->IsDoubleConstant() && type == Primitive::kPrimDouble); 679} 680 681void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) { 682 if (source.Equals(destination)) { 683 return; 684 } 685 686 // A valid move can always be inferred from the destination and source 687 // locations. When moving from and to a register, the argument type can be 688 // used to generate 32bit instead of 64bit moves. In debug mode we also 689 // checks the coherency of the locations and the type. 690 bool unspecified_type = (type == Primitive::kPrimVoid); 691 692 if (destination.IsRegister() || destination.IsFpuRegister()) { 693 if (unspecified_type) { 694 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr; 695 if (source.IsStackSlot() || 696 (src_cst != nullptr && (src_cst->IsIntConstant() 697 || src_cst->IsFloatConstant() 698 || src_cst->IsNullConstant()))) { 699 // For stack slots and 32bit constants, a 64bit type is appropriate. 700 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; 701 } else { 702 // If the source is a double stack slot or a 64bit constant, a 64bit 703 // type is appropriate. Else the source is a register, and since the 704 // type has not been specified, we chose a 64bit type to force a 64bit 705 // move. 706 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; 707 } 708 } 709 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) || 710 (destination.IsRegister() && !Primitive::IsFloatingPointType(type))); 711 CPURegister dst = CPURegisterFrom(destination, type); 712 if (source.IsStackSlot() || source.IsDoubleStackSlot()) { 713 DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot()); 714 __ Ldr(dst, StackOperandFrom(source)); 715 } else if (source.IsConstant()) { 716 DCHECK(CoherentConstantAndType(source, type)); 717 MoveConstant(dst, source.GetConstant()); 718 } else { 719 if (destination.IsRegister()) { 720 __ Mov(Register(dst), RegisterFrom(source, type)); 721 } else { 722 __ Fmov(FPRegister(dst), FPRegisterFrom(source, type)); 723 } 724 } 725 726 } else { // The destination is not a register. It must be a stack slot. 727 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot()); 728 if (source.IsRegister() || source.IsFpuRegister()) { 729 if (unspecified_type) { 730 if (source.IsRegister()) { 731 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; 732 } else { 733 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; 734 } 735 } 736 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) && 737 (source.IsFpuRegister() == Primitive::IsFloatingPointType(type))); 738 __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination)); 739 } else if (source.IsConstant()) { 740 DCHECK(unspecified_type || CoherentConstantAndType(source, type)); 741 UseScratchRegisterScope temps(GetVIXLAssembler()); 742 HConstant* src_cst = source.GetConstant(); 743 CPURegister temp; 744 if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) { 745 temp = temps.AcquireW(); 746 } else if (src_cst->IsLongConstant()) { 747 temp = temps.AcquireX(); 748 } else if (src_cst->IsFloatConstant()) { 749 temp = temps.AcquireS(); 750 } else { 751 DCHECK(src_cst->IsDoubleConstant()); 752 temp = temps.AcquireD(); 753 } 754 MoveConstant(temp, src_cst); 755 __ Str(temp, StackOperandFrom(destination)); 756 } else { 757 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot()); 758 DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot()); 759 UseScratchRegisterScope temps(GetVIXLAssembler()); 760 // There is generally less pressure on FP registers. 761 FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS(); 762 __ Ldr(temp, StackOperandFrom(source)); 763 __ Str(temp, StackOperandFrom(destination)); 764 } 765 } 766} 767 768void CodeGeneratorARM64::SwapLocations(Location loc1, Location loc2) { 769 DCHECK(!loc1.IsConstant()); 770 DCHECK(!loc2.IsConstant()); 771 772 if (loc1.Equals(loc2)) { 773 return; 774 } 775 776 UseScratchRegisterScope temps(GetAssembler()->vixl_masm_); 777 778 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot(); 779 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot(); 780 bool is_fp_reg1 = loc1.IsFpuRegister(); 781 bool is_fp_reg2 = loc2.IsFpuRegister(); 782 783 if (loc2.IsRegister() && loc1.IsRegister()) { 784 Register r1 = XRegisterFrom(loc1); 785 Register r2 = XRegisterFrom(loc2); 786 Register tmp = temps.AcquireSameSizeAs(r1); 787 __ Mov(tmp, r2); 788 __ Mov(r2, r1); 789 __ Mov(r1, tmp); 790 } else if (is_fp_reg2 && is_fp_reg1) { 791 FPRegister r1 = DRegisterFrom(loc1); 792 FPRegister r2 = DRegisterFrom(loc2); 793 FPRegister tmp = temps.AcquireSameSizeAs(r1); 794 __ Fmov(tmp, r2); 795 __ Fmov(r2, r1); 796 __ Fmov(r1, tmp); 797 } else if (is_slot1 != is_slot2) { 798 MemOperand mem = StackOperandFrom(is_slot1 ? loc1 : loc2); 799 Location reg_loc = is_slot1 ? loc2 : loc1; 800 CPURegister reg, tmp; 801 if (reg_loc.IsFpuRegister()) { 802 reg = DRegisterFrom(reg_loc); 803 tmp = temps.AcquireD(); 804 } else { 805 reg = XRegisterFrom(reg_loc); 806 tmp = temps.AcquireX(); 807 } 808 __ Ldr(tmp, mem); 809 __ Str(reg, mem); 810 if (reg_loc.IsFpuRegister()) { 811 __ Fmov(FPRegister(reg), FPRegister(tmp)); 812 } else { 813 __ Mov(Register(reg), Register(tmp)); 814 } 815 } else if (is_slot1 && is_slot2) { 816 MemOperand mem1 = StackOperandFrom(loc1); 817 MemOperand mem2 = StackOperandFrom(loc2); 818 Register tmp1 = loc1.IsStackSlot() ? temps.AcquireW() : temps.AcquireX(); 819 Register tmp2 = temps.AcquireSameSizeAs(tmp1); 820 __ Ldr(tmp1, mem1); 821 __ Ldr(tmp2, mem2); 822 __ Str(tmp1, mem2); 823 __ Str(tmp2, mem1); 824 } else { 825 LOG(FATAL) << "Unimplemented"; 826 } 827} 828 829void CodeGeneratorARM64::Load(Primitive::Type type, 830 CPURegister dst, 831 const MemOperand& src) { 832 switch (type) { 833 case Primitive::kPrimBoolean: 834 __ Ldrb(Register(dst), src); 835 break; 836 case Primitive::kPrimByte: 837 __ Ldrsb(Register(dst), src); 838 break; 839 case Primitive::kPrimShort: 840 __ Ldrsh(Register(dst), src); 841 break; 842 case Primitive::kPrimChar: 843 __ Ldrh(Register(dst), src); 844 break; 845 case Primitive::kPrimInt: 846 case Primitive::kPrimNot: 847 case Primitive::kPrimLong: 848 case Primitive::kPrimFloat: 849 case Primitive::kPrimDouble: 850 DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type)); 851 __ Ldr(dst, src); 852 break; 853 case Primitive::kPrimVoid: 854 LOG(FATAL) << "Unreachable type " << type; 855 } 856} 857 858void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction, 859 CPURegister dst, 860 const MemOperand& src) { 861 UseScratchRegisterScope temps(GetVIXLAssembler()); 862 Register temp_base = temps.AcquireX(); 863 Primitive::Type type = instruction->GetType(); 864 865 DCHECK(!src.IsPreIndex()); 866 DCHECK(!src.IsPostIndex()); 867 868 // TODO(vixl): Let the MacroAssembler handle MemOperand. 869 __ Add(temp_base, src.base(), OperandFromMemOperand(src)); 870 MemOperand base = MemOperand(temp_base); 871 switch (type) { 872 case Primitive::kPrimBoolean: 873 __ Ldarb(Register(dst), base); 874 MaybeRecordImplicitNullCheck(instruction); 875 break; 876 case Primitive::kPrimByte: 877 __ Ldarb(Register(dst), base); 878 MaybeRecordImplicitNullCheck(instruction); 879 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); 880 break; 881 case Primitive::kPrimChar: 882 __ Ldarh(Register(dst), base); 883 MaybeRecordImplicitNullCheck(instruction); 884 break; 885 case Primitive::kPrimShort: 886 __ Ldarh(Register(dst), base); 887 MaybeRecordImplicitNullCheck(instruction); 888 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte); 889 break; 890 case Primitive::kPrimInt: 891 case Primitive::kPrimNot: 892 case Primitive::kPrimLong: 893 DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type)); 894 __ Ldar(Register(dst), base); 895 MaybeRecordImplicitNullCheck(instruction); 896 break; 897 case Primitive::kPrimFloat: 898 case Primitive::kPrimDouble: { 899 DCHECK(dst.IsFPRegister()); 900 DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type)); 901 902 Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); 903 __ Ldar(temp, base); 904 MaybeRecordImplicitNullCheck(instruction); 905 __ Fmov(FPRegister(dst), temp); 906 break; 907 } 908 case Primitive::kPrimVoid: 909 LOG(FATAL) << "Unreachable type " << type; 910 } 911} 912 913void CodeGeneratorARM64::Store(Primitive::Type type, 914 CPURegister src, 915 const MemOperand& dst) { 916 switch (type) { 917 case Primitive::kPrimBoolean: 918 case Primitive::kPrimByte: 919 __ Strb(Register(src), dst); 920 break; 921 case Primitive::kPrimChar: 922 case Primitive::kPrimShort: 923 __ Strh(Register(src), dst); 924 break; 925 case Primitive::kPrimInt: 926 case Primitive::kPrimNot: 927 case Primitive::kPrimLong: 928 case Primitive::kPrimFloat: 929 case Primitive::kPrimDouble: 930 DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type)); 931 __ Str(src, dst); 932 break; 933 case Primitive::kPrimVoid: 934 LOG(FATAL) << "Unreachable type " << type; 935 } 936} 937 938void CodeGeneratorARM64::StoreRelease(Primitive::Type type, 939 CPURegister src, 940 const MemOperand& dst) { 941 UseScratchRegisterScope temps(GetVIXLAssembler()); 942 Register temp_base = temps.AcquireX(); 943 944 DCHECK(!dst.IsPreIndex()); 945 DCHECK(!dst.IsPostIndex()); 946 947 // TODO(vixl): Let the MacroAssembler handle this. 948 Operand op = OperandFromMemOperand(dst); 949 __ Add(temp_base, dst.base(), op); 950 MemOperand base = MemOperand(temp_base); 951 switch (type) { 952 case Primitive::kPrimBoolean: 953 case Primitive::kPrimByte: 954 __ Stlrb(Register(src), base); 955 break; 956 case Primitive::kPrimChar: 957 case Primitive::kPrimShort: 958 __ Stlrh(Register(src), base); 959 break; 960 case Primitive::kPrimInt: 961 case Primitive::kPrimNot: 962 case Primitive::kPrimLong: 963 DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type)); 964 __ Stlr(Register(src), base); 965 break; 966 case Primitive::kPrimFloat: 967 case Primitive::kPrimDouble: { 968 DCHECK(src.IsFPRegister()); 969 DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type)); 970 971 Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW(); 972 __ Fmov(temp, FPRegister(src)); 973 __ Stlr(temp, base); 974 break; 975 } 976 case Primitive::kPrimVoid: 977 LOG(FATAL) << "Unreachable type " << type; 978 } 979} 980 981void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) { 982 DCHECK(RequiresCurrentMethod()); 983 DCHECK(current_method.IsW()); 984 __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset)); 985} 986 987void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset, 988 HInstruction* instruction, 989 uint32_t dex_pc, 990 SlowPathCode* slow_path) { 991 __ Ldr(lr, MemOperand(tr, entry_point_offset)); 992 __ Blr(lr); 993 if (instruction != nullptr) { 994 RecordPcInfo(instruction, dex_pc, slow_path); 995 DCHECK(instruction->IsSuspendCheck() 996 || instruction->IsBoundsCheck() 997 || instruction->IsNullCheck() 998 || instruction->IsDivZeroCheck() 999 || !IsLeafMethod()); 1000 } 1001} 1002 1003void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, 1004 vixl::Register class_reg) { 1005 UseScratchRegisterScope temps(GetVIXLAssembler()); 1006 Register temp = temps.AcquireW(); 1007 size_t status_offset = mirror::Class::StatusOffset().SizeValue(); 1008 bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease(); 1009 1010 // Even if the initialized flag is set, we need to ensure consistent memory ordering. 1011 if (use_acquire_release) { 1012 // TODO(vixl): Let the MacroAssembler handle MemOperand. 1013 __ Add(temp, class_reg, status_offset); 1014 __ Ldar(temp, HeapOperand(temp)); 1015 __ Cmp(temp, mirror::Class::kStatusInitialized); 1016 __ B(lt, slow_path->GetEntryLabel()); 1017 } else { 1018 __ Ldr(temp, HeapOperand(class_reg, status_offset)); 1019 __ Cmp(temp, mirror::Class::kStatusInitialized); 1020 __ B(lt, slow_path->GetEntryLabel()); 1021 __ Dmb(InnerShareable, BarrierReads); 1022 } 1023 __ Bind(slow_path->GetExitLabel()); 1024} 1025 1026void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) { 1027 BarrierType type = BarrierAll; 1028 1029 switch (kind) { 1030 case MemBarrierKind::kAnyAny: 1031 case MemBarrierKind::kAnyStore: { 1032 type = BarrierAll; 1033 break; 1034 } 1035 case MemBarrierKind::kLoadAny: { 1036 type = BarrierReads; 1037 break; 1038 } 1039 case MemBarrierKind::kStoreStore: { 1040 type = BarrierWrites; 1041 break; 1042 } 1043 default: 1044 LOG(FATAL) << "Unexpected memory barrier " << kind; 1045 } 1046 __ Dmb(InnerShareable, type); 1047} 1048 1049void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction, 1050 HBasicBlock* successor) { 1051 SuspendCheckSlowPathARM64* slow_path = 1052 new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor); 1053 codegen_->AddSlowPath(slow_path); 1054 UseScratchRegisterScope temps(codegen_->GetVIXLAssembler()); 1055 Register temp = temps.AcquireW(); 1056 1057 __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue())); 1058 if (successor == nullptr) { 1059 __ Cbnz(temp, slow_path->GetEntryLabel()); 1060 __ Bind(slow_path->GetReturnLabel()); 1061 } else { 1062 __ Cbz(temp, codegen_->GetLabelOf(successor)); 1063 __ B(slow_path->GetEntryLabel()); 1064 // slow_path will return to GetLabelOf(successor). 1065 } 1066} 1067 1068InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph, 1069 CodeGeneratorARM64* codegen) 1070 : HGraphVisitor(graph), 1071 assembler_(codegen->GetAssembler()), 1072 codegen_(codegen) {} 1073 1074#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \ 1075 /* No unimplemented IR. */ 1076 1077#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode 1078 1079enum UnimplementedInstructionBreakCode { 1080 // Using a base helps identify when we hit such breakpoints. 1081 UnimplementedInstructionBreakCodeBaseCode = 0x900, 1082#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name), 1083 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION) 1084#undef ENUM_UNIMPLEMENTED_INSTRUCTION 1085}; 1086 1087#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \ 1088 void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \ 1089 UNUSED(instr); \ 1090 __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \ 1091 } \ 1092 void LocationsBuilderARM64::Visit##name(H##name* instr) { \ 1093 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \ 1094 locations->SetOut(Location::Any()); \ 1095 } 1096 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS) 1097#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS 1098 1099#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE 1100#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION 1101 1102void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) { 1103 DCHECK_EQ(instr->InputCount(), 2U); 1104 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1105 Primitive::Type type = instr->GetResultType(); 1106 switch (type) { 1107 case Primitive::kPrimInt: 1108 case Primitive::kPrimLong: 1109 locations->SetInAt(0, Location::RequiresRegister()); 1110 locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr)); 1111 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1112 break; 1113 1114 case Primitive::kPrimFloat: 1115 case Primitive::kPrimDouble: 1116 locations->SetInAt(0, Location::RequiresFpuRegister()); 1117 locations->SetInAt(1, Location::RequiresFpuRegister()); 1118 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1119 break; 1120 1121 default: 1122 LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type; 1123 } 1124} 1125 1126void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) { 1127 Primitive::Type type = instr->GetType(); 1128 1129 switch (type) { 1130 case Primitive::kPrimInt: 1131 case Primitive::kPrimLong: { 1132 Register dst = OutputRegister(instr); 1133 Register lhs = InputRegisterAt(instr, 0); 1134 Operand rhs = InputOperandAt(instr, 1); 1135 if (instr->IsAdd()) { 1136 __ Add(dst, lhs, rhs); 1137 } else if (instr->IsAnd()) { 1138 __ And(dst, lhs, rhs); 1139 } else if (instr->IsOr()) { 1140 __ Orr(dst, lhs, rhs); 1141 } else if (instr->IsSub()) { 1142 __ Sub(dst, lhs, rhs); 1143 } else { 1144 DCHECK(instr->IsXor()); 1145 __ Eor(dst, lhs, rhs); 1146 } 1147 break; 1148 } 1149 case Primitive::kPrimFloat: 1150 case Primitive::kPrimDouble: { 1151 FPRegister dst = OutputFPRegister(instr); 1152 FPRegister lhs = InputFPRegisterAt(instr, 0); 1153 FPRegister rhs = InputFPRegisterAt(instr, 1); 1154 if (instr->IsAdd()) { 1155 __ Fadd(dst, lhs, rhs); 1156 } else if (instr->IsSub()) { 1157 __ Fsub(dst, lhs, rhs); 1158 } else { 1159 LOG(FATAL) << "Unexpected floating-point binary operation"; 1160 } 1161 break; 1162 } 1163 default: 1164 LOG(FATAL) << "Unexpected binary operation type " << type; 1165 } 1166} 1167 1168void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { 1169 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1170 1171 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); 1172 Primitive::Type type = instr->GetResultType(); 1173 switch (type) { 1174 case Primitive::kPrimInt: 1175 case Primitive::kPrimLong: { 1176 locations->SetInAt(0, Location::RequiresRegister()); 1177 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); 1178 locations->SetOut(Location::RequiresRegister()); 1179 break; 1180 } 1181 default: 1182 LOG(FATAL) << "Unexpected shift type " << type; 1183 } 1184} 1185 1186void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) { 1187 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr()); 1188 1189 Primitive::Type type = instr->GetType(); 1190 switch (type) { 1191 case Primitive::kPrimInt: 1192 case Primitive::kPrimLong: { 1193 Register dst = OutputRegister(instr); 1194 Register lhs = InputRegisterAt(instr, 0); 1195 Operand rhs = InputOperandAt(instr, 1); 1196 if (rhs.IsImmediate()) { 1197 uint32_t shift_value = (type == Primitive::kPrimInt) 1198 ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue) 1199 : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue); 1200 if (instr->IsShl()) { 1201 __ Lsl(dst, lhs, shift_value); 1202 } else if (instr->IsShr()) { 1203 __ Asr(dst, lhs, shift_value); 1204 } else { 1205 __ Lsr(dst, lhs, shift_value); 1206 } 1207 } else { 1208 Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W(); 1209 1210 if (instr->IsShl()) { 1211 __ Lsl(dst, lhs, rhs_reg); 1212 } else if (instr->IsShr()) { 1213 __ Asr(dst, lhs, rhs_reg); 1214 } else { 1215 __ Lsr(dst, lhs, rhs_reg); 1216 } 1217 } 1218 break; 1219 } 1220 default: 1221 LOG(FATAL) << "Unexpected shift operation type " << type; 1222 } 1223} 1224 1225void LocationsBuilderARM64::VisitAdd(HAdd* instruction) { 1226 HandleBinaryOp(instruction); 1227} 1228 1229void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) { 1230 HandleBinaryOp(instruction); 1231} 1232 1233void LocationsBuilderARM64::VisitAnd(HAnd* instruction) { 1234 HandleBinaryOp(instruction); 1235} 1236 1237void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { 1238 HandleBinaryOp(instruction); 1239} 1240 1241void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { 1242 LocationSummary* locations = 1243 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1244 locations->SetInAt(0, Location::RequiresRegister()); 1245 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1246 locations->SetOut(Location::RequiresRegister()); 1247} 1248 1249void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { 1250 LocationSummary* locations = instruction->GetLocations(); 1251 Primitive::Type type = instruction->GetType(); 1252 Register obj = InputRegisterAt(instruction, 0); 1253 Location index = locations->InAt(1); 1254 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value(); 1255 MemOperand source = HeapOperand(obj); 1256 UseScratchRegisterScope temps(GetVIXLAssembler()); 1257 1258 if (index.IsConstant()) { 1259 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type); 1260 source = HeapOperand(obj, offset); 1261 } else { 1262 Register temp = temps.AcquireSameSizeAs(obj); 1263 Register index_reg = RegisterFrom(index, Primitive::kPrimInt); 1264 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type))); 1265 source = HeapOperand(temp, offset); 1266 } 1267 1268 codegen_->Load(type, OutputCPURegister(instruction), source); 1269 codegen_->MaybeRecordImplicitNullCheck(instruction); 1270} 1271 1272void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) { 1273 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1274 locations->SetInAt(0, Location::RequiresRegister()); 1275 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1276} 1277 1278void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) { 1279 __ Ldr(OutputRegister(instruction), 1280 HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset())); 1281 codegen_->MaybeRecordImplicitNullCheck(instruction); 1282} 1283 1284void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) { 1285 Primitive::Type value_type = instruction->GetComponentType(); 1286 bool is_object = value_type == Primitive::kPrimNot; 1287 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1288 instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall); 1289 if (is_object) { 1290 InvokeRuntimeCallingConvention calling_convention; 1291 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 1292 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); 1293 locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); 1294 } else { 1295 locations->SetInAt(0, Location::RequiresRegister()); 1296 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 1297 locations->SetInAt(2, Location::RequiresRegister()); 1298 } 1299} 1300 1301void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { 1302 Primitive::Type value_type = instruction->GetComponentType(); 1303 if (value_type == Primitive::kPrimNot) { 1304 codegen_->InvokeRuntime( 1305 QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr); 1306 CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>(); 1307 } else { 1308 LocationSummary* locations = instruction->GetLocations(); 1309 Register obj = InputRegisterAt(instruction, 0); 1310 CPURegister value = InputCPURegisterAt(instruction, 2); 1311 Location index = locations->InAt(1); 1312 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value(); 1313 MemOperand destination = HeapOperand(obj); 1314 UseScratchRegisterScope temps(GetVIXLAssembler()); 1315 1316 if (index.IsConstant()) { 1317 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type); 1318 destination = HeapOperand(obj, offset); 1319 } else { 1320 Register temp = temps.AcquireSameSizeAs(obj); 1321 Register index_reg = InputRegisterAt(instruction, 1); 1322 __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type))); 1323 destination = HeapOperand(temp, offset); 1324 } 1325 1326 codegen_->Store(value_type, value, destination); 1327 codegen_->MaybeRecordImplicitNullCheck(instruction); 1328 } 1329} 1330 1331void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1332 LocationSummary* locations = 1333 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1334 locations->SetInAt(0, Location::RequiresRegister()); 1335 locations->SetInAt(1, Location::RequiresRegister()); 1336 if (instruction->HasUses()) { 1337 locations->SetOut(Location::SameAsFirstInput()); 1338 } 1339} 1340 1341void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { 1342 LocationSummary* locations = instruction->GetLocations(); 1343 BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64( 1344 instruction, locations->InAt(0), locations->InAt(1)); 1345 codegen_->AddSlowPath(slow_path); 1346 1347 __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1)); 1348 __ B(slow_path->GetEntryLabel(), hs); 1349} 1350 1351void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { 1352 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 1353 instruction, LocationSummary::kCallOnSlowPath); 1354 locations->SetInAt(0, Location::RequiresRegister()); 1355 locations->SetInAt(1, Location::RequiresRegister()); 1356 locations->AddTemp(Location::RequiresRegister()); 1357} 1358 1359void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { 1360 LocationSummary* locations = instruction->GetLocations(); 1361 Register obj = InputRegisterAt(instruction, 0);; 1362 Register cls = InputRegisterAt(instruction, 1);; 1363 Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0)); 1364 1365 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1366 instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc()); 1367 codegen_->AddSlowPath(slow_path); 1368 1369 // TODO: avoid this check if we know obj is not null. 1370 __ Cbz(obj, slow_path->GetExitLabel()); 1371 // Compare the class of `obj` with `cls`. 1372 __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset())); 1373 __ Cmp(obj_cls, cls); 1374 __ B(ne, slow_path->GetEntryLabel()); 1375 __ Bind(slow_path->GetExitLabel()); 1376} 1377 1378void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) { 1379 LocationSummary* locations = 1380 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); 1381 locations->SetInAt(0, Location::RequiresRegister()); 1382 if (check->HasUses()) { 1383 locations->SetOut(Location::SameAsFirstInput()); 1384 } 1385} 1386 1387void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) { 1388 // We assume the class is not null. 1389 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 1390 check->GetLoadClass(), check, check->GetDexPc(), true); 1391 codegen_->AddSlowPath(slow_path); 1392 GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); 1393} 1394 1395void LocationsBuilderARM64::VisitCompare(HCompare* compare) { 1396 LocationSummary* locations = 1397 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); 1398 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1399 switch (in_type) { 1400 case Primitive::kPrimLong: { 1401 locations->SetInAt(0, Location::RequiresRegister()); 1402 locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare)); 1403 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1404 break; 1405 } 1406 case Primitive::kPrimFloat: 1407 case Primitive::kPrimDouble: { 1408 locations->SetInAt(0, Location::RequiresFpuRegister()); 1409 HInstruction* right = compare->InputAt(1); 1410 if ((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) || 1411 (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0))) { 1412 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant())); 1413 } else { 1414 locations->SetInAt(1, Location::RequiresFpuRegister()); 1415 } 1416 locations->SetOut(Location::RequiresRegister()); 1417 break; 1418 } 1419 default: 1420 LOG(FATAL) << "Unexpected type for compare operation " << in_type; 1421 } 1422} 1423 1424void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { 1425 Primitive::Type in_type = compare->InputAt(0)->GetType(); 1426 1427 // 0 if: left == right 1428 // 1 if: left > right 1429 // -1 if: left < right 1430 switch (in_type) { 1431 case Primitive::kPrimLong: { 1432 Register result = OutputRegister(compare); 1433 Register left = InputRegisterAt(compare, 0); 1434 Operand right = InputOperandAt(compare, 1); 1435 1436 __ Cmp(left, right); 1437 __ Cset(result, ne); 1438 __ Cneg(result, result, lt); 1439 break; 1440 } 1441 case Primitive::kPrimFloat: 1442 case Primitive::kPrimDouble: { 1443 Register result = OutputRegister(compare); 1444 FPRegister left = InputFPRegisterAt(compare, 0); 1445 if (compare->GetLocations()->InAt(1).IsConstant()) { 1446 if (kIsDebugBuild) { 1447 HInstruction* right = compare->GetLocations()->InAt(1).GetConstant(); 1448 DCHECK((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) || 1449 (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0))); 1450 } 1451 // 0.0 is the only immediate that can be encoded directly in a FCMP instruction. 1452 __ Fcmp(left, 0.0); 1453 } else { 1454 __ Fcmp(left, InputFPRegisterAt(compare, 1)); 1455 } 1456 if (compare->IsGtBias()) { 1457 __ Cset(result, ne); 1458 } else { 1459 __ Csetm(result, ne); 1460 } 1461 __ Cneg(result, result, compare->IsGtBias() ? mi : gt); 1462 break; 1463 } 1464 default: 1465 LOG(FATAL) << "Unimplemented compare type " << in_type; 1466 } 1467} 1468 1469void LocationsBuilderARM64::VisitCondition(HCondition* instruction) { 1470 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 1471 locations->SetInAt(0, Location::RequiresRegister()); 1472 locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction)); 1473 if (instruction->NeedsMaterialization()) { 1474 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1475 } 1476} 1477 1478void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) { 1479 if (!instruction->NeedsMaterialization()) { 1480 return; 1481 } 1482 1483 LocationSummary* locations = instruction->GetLocations(); 1484 Register lhs = InputRegisterAt(instruction, 0); 1485 Operand rhs = InputOperandAt(instruction, 1); 1486 Register res = RegisterFrom(locations->Out(), instruction->GetType()); 1487 Condition cond = ARM64Condition(instruction->GetCondition()); 1488 1489 __ Cmp(lhs, rhs); 1490 __ Cset(res, cond); 1491} 1492 1493#define FOR_EACH_CONDITION_INSTRUCTION(M) \ 1494 M(Equal) \ 1495 M(NotEqual) \ 1496 M(LessThan) \ 1497 M(LessThanOrEqual) \ 1498 M(GreaterThan) \ 1499 M(GreaterThanOrEqual) 1500#define DEFINE_CONDITION_VISITORS(Name) \ 1501void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \ 1502void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } 1503FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS) 1504#undef DEFINE_CONDITION_VISITORS 1505#undef FOR_EACH_CONDITION_INSTRUCTION 1506 1507void LocationsBuilderARM64::VisitDiv(HDiv* div) { 1508 LocationSummary* locations = 1509 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall); 1510 switch (div->GetResultType()) { 1511 case Primitive::kPrimInt: 1512 case Primitive::kPrimLong: 1513 locations->SetInAt(0, Location::RequiresRegister()); 1514 locations->SetInAt(1, Location::RequiresRegister()); 1515 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1516 break; 1517 1518 case Primitive::kPrimFloat: 1519 case Primitive::kPrimDouble: 1520 locations->SetInAt(0, Location::RequiresFpuRegister()); 1521 locations->SetInAt(1, Location::RequiresFpuRegister()); 1522 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1523 break; 1524 1525 default: 1526 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 1527 } 1528} 1529 1530void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) { 1531 Primitive::Type type = div->GetResultType(); 1532 switch (type) { 1533 case Primitive::kPrimInt: 1534 case Primitive::kPrimLong: 1535 __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1)); 1536 break; 1537 1538 case Primitive::kPrimFloat: 1539 case Primitive::kPrimDouble: 1540 __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1)); 1541 break; 1542 1543 default: 1544 LOG(FATAL) << "Unexpected div type " << type; 1545 } 1546} 1547 1548void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1549 LocationSummary* locations = 1550 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1551 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); 1552 if (instruction->HasUses()) { 1553 locations->SetOut(Location::SameAsFirstInput()); 1554 } 1555} 1556 1557void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { 1558 SlowPathCodeARM64* slow_path = 1559 new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction); 1560 codegen_->AddSlowPath(slow_path); 1561 Location value = instruction->GetLocations()->InAt(0); 1562 1563 Primitive::Type type = instruction->GetType(); 1564 1565 if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) { 1566 LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck."; 1567 return; 1568 } 1569 1570 if (value.IsConstant()) { 1571 int64_t divisor = Int64ConstantFrom(value); 1572 if (divisor == 0) { 1573 __ B(slow_path->GetEntryLabel()); 1574 } else { 1575 // A division by a non-null constant is valid. We don't need to perform 1576 // any check, so simply fall through. 1577 } 1578 } else { 1579 __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel()); 1580 } 1581} 1582 1583void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1584 LocationSummary* locations = 1585 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1586 locations->SetOut(Location::ConstantLocation(constant)); 1587} 1588 1589void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) { 1590 UNUSED(constant); 1591 // Will be generated at use site. 1592} 1593 1594void LocationsBuilderARM64::VisitExit(HExit* exit) { 1595 exit->SetLocations(nullptr); 1596} 1597 1598void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) { 1599 UNUSED(exit); 1600} 1601 1602void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) { 1603 LocationSummary* locations = 1604 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1605 locations->SetOut(Location::ConstantLocation(constant)); 1606} 1607 1608void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) { 1609 UNUSED(constant); 1610 // Will be generated at use site. 1611} 1612 1613void LocationsBuilderARM64::VisitGoto(HGoto* got) { 1614 got->SetLocations(nullptr); 1615} 1616 1617void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) { 1618 HBasicBlock* successor = got->GetSuccessor(); 1619 DCHECK(!successor->IsExitBlock()); 1620 HBasicBlock* block = got->GetBlock(); 1621 HInstruction* previous = got->GetPrevious(); 1622 HLoopInformation* info = block->GetLoopInformation(); 1623 1624 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) { 1625 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck()); 1626 GenerateSuspendCheck(info->GetSuspendCheck(), successor); 1627 return; 1628 } 1629 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { 1630 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); 1631 } 1632 if (!codegen_->GoesToNextBlock(block, successor)) { 1633 __ B(codegen_->GetLabelOf(successor)); 1634 } 1635} 1636 1637void LocationsBuilderARM64::VisitIf(HIf* if_instr) { 1638 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr); 1639 HInstruction* cond = if_instr->InputAt(0); 1640 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 1641 locations->SetInAt(0, Location::RequiresRegister()); 1642 } 1643} 1644 1645void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { 1646 HInstruction* cond = if_instr->InputAt(0); 1647 HCondition* condition = cond->AsCondition(); 1648 vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor()); 1649 vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); 1650 1651 if (cond->IsIntConstant()) { 1652 int32_t cond_value = cond->AsIntConstant()->GetValue(); 1653 if (cond_value == 1) { 1654 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) { 1655 __ B(true_target); 1656 } 1657 return; 1658 } else { 1659 DCHECK_EQ(cond_value, 0); 1660 } 1661 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) { 1662 // The condition instruction has been materialized, compare the output to 0. 1663 Location cond_val = if_instr->GetLocations()->InAt(0); 1664 DCHECK(cond_val.IsRegister()); 1665 __ Cbnz(InputRegisterAt(if_instr, 0), true_target); 1666 } else { 1667 // The condition instruction has not been materialized, use its inputs as 1668 // the comparison and its condition as the branch condition. 1669 Register lhs = InputRegisterAt(condition, 0); 1670 Operand rhs = InputOperandAt(condition, 1); 1671 Condition arm64_cond = ARM64Condition(condition->GetCondition()); 1672 if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) { 1673 if (arm64_cond == eq) { 1674 __ Cbz(lhs, true_target); 1675 } else { 1676 __ Cbnz(lhs, true_target); 1677 } 1678 } else { 1679 __ Cmp(lhs, rhs); 1680 __ B(arm64_cond, true_target); 1681 } 1682 } 1683 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) { 1684 __ B(false_target); 1685 } 1686} 1687 1688void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1689 LocationSummary* locations = 1690 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1691 locations->SetInAt(0, Location::RequiresRegister()); 1692 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1693} 1694 1695void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 1696 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 1697 bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease(); 1698 1699 if (instruction->IsVolatile()) { 1700 if (use_acquire_release) { 1701 // NB: LoadAcquire will record the pc info if needed. 1702 codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field); 1703 } else { 1704 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 1705 codegen_->MaybeRecordImplicitNullCheck(instruction); 1706 // For IRIW sequential consistency kLoadAny is not sufficient. 1707 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 1708 } 1709 } else { 1710 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 1711 codegen_->MaybeRecordImplicitNullCheck(instruction); 1712 } 1713} 1714 1715void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1716 LocationSummary* locations = 1717 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 1718 locations->SetInAt(0, Location::RequiresRegister()); 1719 locations->SetInAt(1, Location::RequiresRegister()); 1720} 1721 1722void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 1723 Register obj = InputRegisterAt(instruction, 0); 1724 CPURegister value = InputCPURegisterAt(instruction, 1); 1725 Offset offset = instruction->GetFieldOffset(); 1726 Primitive::Type field_type = instruction->GetFieldType(); 1727 bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease(); 1728 1729 if (instruction->IsVolatile()) { 1730 if (use_acquire_release) { 1731 codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset)); 1732 codegen_->MaybeRecordImplicitNullCheck(instruction); 1733 } else { 1734 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 1735 codegen_->Store(field_type, value, HeapOperand(obj, offset)); 1736 codegen_->MaybeRecordImplicitNullCheck(instruction); 1737 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 1738 } 1739 } else { 1740 codegen_->Store(field_type, value, HeapOperand(obj, offset)); 1741 codegen_->MaybeRecordImplicitNullCheck(instruction); 1742 } 1743 1744 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 1745 codegen_->MarkGCCard(obj, Register(value)); 1746 } 1747} 1748 1749void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { 1750 LocationSummary::CallKind call_kind = 1751 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath; 1752 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); 1753 locations->SetInAt(0, Location::RequiresRegister()); 1754 locations->SetInAt(1, Location::RequiresRegister()); 1755 // The output does overlap inputs. 1756 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); 1757} 1758 1759void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { 1760 LocationSummary* locations = instruction->GetLocations(); 1761 Register obj = InputRegisterAt(instruction, 0);; 1762 Register cls = InputRegisterAt(instruction, 1);; 1763 Register out = OutputRegister(instruction); 1764 1765 vixl::Label done; 1766 1767 // Return 0 if `obj` is null. 1768 // TODO: Avoid this check if we know `obj` is not null. 1769 __ Mov(out, 0); 1770 __ Cbz(obj, &done); 1771 1772 // Compare the class of `obj` with `cls`. 1773 __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset())); 1774 __ Cmp(out, cls); 1775 if (instruction->IsClassFinal()) { 1776 // Classes must be equal for the instanceof to succeed. 1777 __ Cset(out, eq); 1778 } else { 1779 // If the classes are not equal, we go into a slow path. 1780 DCHECK(locations->OnlyCallsOnSlowPath()); 1781 SlowPathCodeARM64* slow_path = 1782 new (GetGraph()->GetArena()) TypeCheckSlowPathARM64( 1783 instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); 1784 codegen_->AddSlowPath(slow_path); 1785 __ B(ne, slow_path->GetEntryLabel()); 1786 __ Mov(out, 1); 1787 __ Bind(slow_path->GetExitLabel()); 1788 } 1789 1790 __ Bind(&done); 1791} 1792 1793void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) { 1794 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 1795 locations->SetOut(Location::ConstantLocation(constant)); 1796} 1797 1798void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) { 1799 // Will be generated at use site. 1800 UNUSED(constant); 1801} 1802 1803void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) { 1804 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 1805 locations->SetOut(Location::ConstantLocation(constant)); 1806} 1807 1808void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) { 1809 // Will be generated at use site. 1810 UNUSED(constant); 1811} 1812 1813void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) { 1814 LocationSummary* locations = 1815 new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); 1816 locations->AddTemp(LocationFrom(x0)); 1817 1818 InvokeDexCallingConventionVisitor calling_convention_visitor; 1819 for (size_t i = 0; i < invoke->InputCount(); i++) { 1820 HInstruction* input = invoke->InputAt(i); 1821 locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType())); 1822 } 1823 1824 Primitive::Type return_type = invoke->GetType(); 1825 if (return_type != Primitive::kPrimVoid) { 1826 locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type)); 1827 } 1828} 1829 1830void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1831 HandleInvoke(invoke); 1832} 1833 1834void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) { 1835 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. 1836 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1837 uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + 1838 (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); 1839 Location receiver = invoke->GetLocations()->InAt(0); 1840 Offset class_offset = mirror::Object::ClassOffset(); 1841 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 1842 1843 // The register ip1 is required to be used for the hidden argument in 1844 // art_quick_imt_conflict_trampoline, so prevent VIXL from using it. 1845 UseScratchRegisterScope scratch_scope(GetVIXLAssembler()); 1846 scratch_scope.Exclude(ip1); 1847 __ Mov(ip1, invoke->GetDexMethodIndex()); 1848 1849 // temp = object->GetClass(); 1850 if (receiver.IsStackSlot()) { 1851 __ Ldr(temp, StackOperandFrom(receiver)); 1852 __ Ldr(temp, HeapOperand(temp, class_offset)); 1853 } else { 1854 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 1855 } 1856 codegen_->MaybeRecordImplicitNullCheck(invoke); 1857 // temp = temp->GetImtEntryAt(method_offset); 1858 __ Ldr(temp, HeapOperand(temp, method_offset)); 1859 // lr = temp->GetEntryPoint(); 1860 __ Ldr(lr, HeapOperand(temp, entry_point)); 1861 // lr(); 1862 __ Blr(lr); 1863 DCHECK(!codegen_->IsLeafMethod()); 1864 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1865} 1866 1867void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1868 IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena()); 1869 if (intrinsic.TryDispatch(invoke)) { 1870 return; 1871 } 1872 1873 HandleInvoke(invoke); 1874} 1875 1876void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1877 IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena()); 1878 if (intrinsic.TryDispatch(invoke)) { 1879 return; 1880 } 1881 1882 HandleInvoke(invoke); 1883} 1884 1885static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codegen) { 1886 if (invoke->GetLocations()->Intrinsified()) { 1887 IntrinsicCodeGeneratorARM64 intrinsic(codegen); 1888 intrinsic.Dispatch(invoke); 1889 return true; 1890 } 1891 return false; 1892} 1893 1894void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) { 1895 // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention. 1896 DCHECK(temp.Is(kArtMethodRegister)); 1897 size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() + 1898 invoke->GetDexMethodIndex() * kHeapRefSize; 1899 1900 // TODO: Implement all kinds of calls: 1901 // 1) boot -> boot 1902 // 2) app -> boot 1903 // 3) app -> app 1904 // 1905 // Currently we implement the app -> app logic, which looks up in the resolve cache. 1906 1907 // temp = method; 1908 LoadCurrentMethod(temp); 1909 if (!invoke->IsRecursive()) { 1910 // temp = temp->dex_cache_resolved_methods_; 1911 __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset())); 1912 // temp = temp[index_in_cache]; 1913 __ Ldr(temp, HeapOperand(temp, index_in_cache)); 1914 // lr = temp->entry_point_from_quick_compiled_code_; 1915 __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 1916 kArm64WordSize))); 1917 // lr(); 1918 __ Blr(lr); 1919 } else { 1920 __ Bl(&frame_entry_label_); 1921 } 1922 1923 DCHECK(!IsLeafMethod()); 1924} 1925 1926void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1927 if (TryGenerateIntrinsicCode(invoke, codegen_)) { 1928 return; 1929 } 1930 1931 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1932 codegen_->GenerateStaticOrDirectCall(invoke, temp); 1933 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1934} 1935 1936void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1937 if (TryGenerateIntrinsicCode(invoke, codegen_)) { 1938 return; 1939 } 1940 1941 LocationSummary* locations = invoke->GetLocations(); 1942 Location receiver = locations->InAt(0); 1943 Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); 1944 size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + 1945 invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); 1946 Offset class_offset = mirror::Object::ClassOffset(); 1947 Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); 1948 1949 // temp = object->GetClass(); 1950 if (receiver.IsStackSlot()) { 1951 __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex())); 1952 __ Ldr(temp, HeapOperand(temp, class_offset)); 1953 } else { 1954 DCHECK(receiver.IsRegister()); 1955 __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); 1956 } 1957 codegen_->MaybeRecordImplicitNullCheck(invoke); 1958 // temp = temp->GetMethodAt(method_offset); 1959 __ Ldr(temp, HeapOperand(temp, method_offset)); 1960 // lr = temp->GetEntryPoint(); 1961 __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue())); 1962 // lr(); 1963 __ Blr(lr); 1964 DCHECK(!codegen_->IsLeafMethod()); 1965 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1966} 1967 1968void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { 1969 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath 1970 : LocationSummary::kNoCall; 1971 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); 1972 locations->SetOut(Location::RequiresRegister()); 1973} 1974 1975void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { 1976 Register out = OutputRegister(cls); 1977 if (cls->IsReferrersClass()) { 1978 DCHECK(!cls->CanCallRuntime()); 1979 DCHECK(!cls->MustGenerateClinitCheck()); 1980 codegen_->LoadCurrentMethod(out); 1981 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 1982 } else { 1983 DCHECK(cls->CanCallRuntime()); 1984 codegen_->LoadCurrentMethod(out); 1985 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset())); 1986 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); 1987 1988 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( 1989 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); 1990 codegen_->AddSlowPath(slow_path); 1991 __ Cbz(out, slow_path->GetEntryLabel()); 1992 if (cls->MustGenerateClinitCheck()) { 1993 GenerateClassInitializationCheck(slow_path, out); 1994 } else { 1995 __ Bind(slow_path->GetExitLabel()); 1996 } 1997 } 1998} 1999 2000void LocationsBuilderARM64::VisitLoadException(HLoadException* load) { 2001 LocationSummary* locations = 2002 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); 2003 locations->SetOut(Location::RequiresRegister()); 2004} 2005 2006void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) { 2007 MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value()); 2008 __ Ldr(OutputRegister(instruction), exception); 2009 __ Str(wzr, exception); 2010} 2011 2012void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) { 2013 load->SetLocations(nullptr); 2014} 2015 2016void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) { 2017 // Nothing to do, this is driven by the code generator. 2018 UNUSED(load); 2019} 2020 2021void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { 2022 LocationSummary* locations = 2023 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath); 2024 locations->SetOut(Location::RequiresRegister()); 2025} 2026 2027void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { 2028 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load); 2029 codegen_->AddSlowPath(slow_path); 2030 2031 Register out = OutputRegister(load); 2032 codegen_->LoadCurrentMethod(out); 2033 __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); 2034 __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset())); 2035 __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); 2036 __ Cbz(out, slow_path->GetEntryLabel()); 2037 __ Bind(slow_path->GetExitLabel()); 2038} 2039 2040void LocationsBuilderARM64::VisitLocal(HLocal* local) { 2041 local->SetLocations(nullptr); 2042} 2043 2044void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) { 2045 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock()); 2046} 2047 2048void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { 2049 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant); 2050 locations->SetOut(Location::ConstantLocation(constant)); 2051} 2052 2053void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) { 2054 // Will be generated at use site. 2055 UNUSED(constant); 2056} 2057 2058void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 2059 LocationSummary* locations = 2060 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2061 InvokeRuntimeCallingConvention calling_convention; 2062 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 2063} 2064 2065void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) { 2066 codegen_->InvokeRuntime(instruction->IsEnter() 2067 ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject), 2068 instruction, 2069 instruction->GetDexPc(), 2070 nullptr); 2071 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>(); 2072} 2073 2074void LocationsBuilderARM64::VisitMul(HMul* mul) { 2075 LocationSummary* locations = 2076 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); 2077 switch (mul->GetResultType()) { 2078 case Primitive::kPrimInt: 2079 case Primitive::kPrimLong: 2080 locations->SetInAt(0, Location::RequiresRegister()); 2081 locations->SetInAt(1, Location::RequiresRegister()); 2082 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2083 break; 2084 2085 case Primitive::kPrimFloat: 2086 case Primitive::kPrimDouble: 2087 locations->SetInAt(0, Location::RequiresFpuRegister()); 2088 locations->SetInAt(1, Location::RequiresFpuRegister()); 2089 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2090 break; 2091 2092 default: 2093 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2094 } 2095} 2096 2097void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) { 2098 switch (mul->GetResultType()) { 2099 case Primitive::kPrimInt: 2100 case Primitive::kPrimLong: 2101 __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1)); 2102 break; 2103 2104 case Primitive::kPrimFloat: 2105 case Primitive::kPrimDouble: 2106 __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1)); 2107 break; 2108 2109 default: 2110 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2111 } 2112} 2113 2114void LocationsBuilderARM64::VisitNeg(HNeg* neg) { 2115 LocationSummary* locations = 2116 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); 2117 switch (neg->GetResultType()) { 2118 case Primitive::kPrimInt: 2119 case Primitive::kPrimLong: 2120 locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg)); 2121 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2122 break; 2123 2124 case Primitive::kPrimFloat: 2125 case Primitive::kPrimDouble: 2126 locations->SetInAt(0, Location::RequiresFpuRegister()); 2127 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2128 break; 2129 2130 default: 2131 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2132 } 2133} 2134 2135void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) { 2136 switch (neg->GetResultType()) { 2137 case Primitive::kPrimInt: 2138 case Primitive::kPrimLong: 2139 __ Neg(OutputRegister(neg), InputOperandAt(neg, 0)); 2140 break; 2141 2142 case Primitive::kPrimFloat: 2143 case Primitive::kPrimDouble: 2144 __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0)); 2145 break; 2146 2147 default: 2148 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 2149 } 2150} 2151 2152void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) { 2153 LocationSummary* locations = 2154 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2155 InvokeRuntimeCallingConvention calling_convention; 2156 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2157 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2))); 2158 locations->SetOut(LocationFrom(x0)); 2159 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); 2160 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2161 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2162} 2163 2164void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { 2165 LocationSummary* locations = instruction->GetLocations(); 2166 InvokeRuntimeCallingConvention calling_convention; 2167 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2168 DCHECK(type_index.Is(w0)); 2169 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2170 DCHECK(current_method.Is(w2)); 2171 codegen_->LoadCurrentMethod(current_method); 2172 __ Mov(type_index, instruction->GetTypeIndex()); 2173 codegen_->InvokeRuntime( 2174 GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(), 2175 instruction, 2176 instruction->GetDexPc(), 2177 nullptr); 2178 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, 2179 void*, uint32_t, int32_t, mirror::ArtMethod*>(); 2180} 2181 2182void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { 2183 LocationSummary* locations = 2184 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2185 InvokeRuntimeCallingConvention calling_convention; 2186 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); 2187 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1))); 2188 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); 2189 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2190} 2191 2192void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { 2193 LocationSummary* locations = instruction->GetLocations(); 2194 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); 2195 DCHECK(type_index.Is(w0)); 2196 Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); 2197 DCHECK(current_method.Is(w1)); 2198 codegen_->LoadCurrentMethod(current_method); 2199 __ Mov(type_index, instruction->GetTypeIndex()); 2200 codegen_->InvokeRuntime( 2201 GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(), 2202 instruction, 2203 instruction->GetDexPc(), 2204 nullptr); 2205 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); 2206} 2207 2208void LocationsBuilderARM64::VisitNot(HNot* instruction) { 2209 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2210 locations->SetInAt(0, Location::RequiresRegister()); 2211 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2212} 2213 2214void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) { 2215 switch (instruction->GetResultType()) { 2216 case Primitive::kPrimInt: 2217 case Primitive::kPrimLong: 2218 __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0)); 2219 break; 2220 2221 default: 2222 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType(); 2223 } 2224} 2225 2226void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) { 2227 LocationSummary* locations = 2228 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2229 locations->SetInAt(0, Location::RequiresRegister()); 2230 if (instruction->HasUses()) { 2231 locations->SetOut(Location::SameAsFirstInput()); 2232 } 2233} 2234 2235void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { 2236 if (codegen_->CanMoveNullCheckToUser(instruction)) { 2237 return; 2238 } 2239 Location obj = instruction->GetLocations()->InAt(0); 2240 2241 __ Ldr(wzr, HeapOperandFrom(obj, Offset(0))); 2242 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 2243} 2244 2245void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) { 2246 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction); 2247 codegen_->AddSlowPath(slow_path); 2248 2249 LocationSummary* locations = instruction->GetLocations(); 2250 Location obj = locations->InAt(0); 2251 2252 __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel()); 2253} 2254 2255void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) { 2256 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) { 2257 GenerateImplicitNullCheck(instruction); 2258 } else { 2259 GenerateExplicitNullCheck(instruction); 2260 } 2261} 2262 2263void LocationsBuilderARM64::VisitOr(HOr* instruction) { 2264 HandleBinaryOp(instruction); 2265} 2266 2267void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) { 2268 HandleBinaryOp(instruction); 2269} 2270 2271void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) { 2272 LOG(FATAL) << "Unreachable"; 2273} 2274 2275void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) { 2276 codegen_->GetMoveResolver()->EmitNativeCode(instruction); 2277} 2278 2279void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) { 2280 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2281 Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); 2282 if (location.IsStackSlot()) { 2283 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2284 } else if (location.IsDoubleStackSlot()) { 2285 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2286 } 2287 locations->SetOut(location); 2288} 2289 2290void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) { 2291 // Nothing to do, the parameter is already at its location. 2292 UNUSED(instruction); 2293} 2294 2295void LocationsBuilderARM64::VisitPhi(HPhi* instruction) { 2296 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2297 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 2298 locations->SetInAt(i, Location::Any()); 2299 } 2300 locations->SetOut(Location::Any()); 2301} 2302 2303void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) { 2304 UNUSED(instruction); 2305 LOG(FATAL) << "Unreachable"; 2306} 2307 2308void LocationsBuilderARM64::VisitRem(HRem* rem) { 2309 Primitive::Type type = rem->GetResultType(); 2310 LocationSummary::CallKind call_kind = 2311 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall; 2312 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind); 2313 2314 switch (type) { 2315 case Primitive::kPrimInt: 2316 case Primitive::kPrimLong: 2317 locations->SetInAt(0, Location::RequiresRegister()); 2318 locations->SetInAt(1, Location::RequiresRegister()); 2319 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2320 break; 2321 2322 case Primitive::kPrimFloat: 2323 case Primitive::kPrimDouble: { 2324 InvokeRuntimeCallingConvention calling_convention; 2325 locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0))); 2326 locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1))); 2327 locations->SetOut(calling_convention.GetReturnLocation(type)); 2328 2329 break; 2330 } 2331 2332 default: 2333 LOG(FATAL) << "Unexpected rem type " << type; 2334 } 2335} 2336 2337void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) { 2338 Primitive::Type type = rem->GetResultType(); 2339 2340 switch (type) { 2341 case Primitive::kPrimInt: 2342 case Primitive::kPrimLong: { 2343 UseScratchRegisterScope temps(GetVIXLAssembler()); 2344 Register dividend = InputRegisterAt(rem, 0); 2345 Register divisor = InputRegisterAt(rem, 1); 2346 Register output = OutputRegister(rem); 2347 Register temp = temps.AcquireSameSizeAs(output); 2348 2349 __ Sdiv(temp, dividend, divisor); 2350 __ Msub(output, temp, divisor, dividend); 2351 break; 2352 } 2353 2354 case Primitive::kPrimFloat: 2355 case Primitive::kPrimDouble: { 2356 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf) 2357 : QUICK_ENTRY_POINT(pFmod); 2358 codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc(), nullptr); 2359 break; 2360 } 2361 2362 default: 2363 LOG(FATAL) << "Unexpected rem type " << type; 2364 } 2365} 2366 2367void LocationsBuilderARM64::VisitReturn(HReturn* instruction) { 2368 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 2369 Primitive::Type return_type = instruction->InputAt(0)->GetType(); 2370 locations->SetInAt(0, ARM64ReturnLocation(return_type)); 2371} 2372 2373void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) { 2374 UNUSED(instruction); 2375 codegen_->GenerateFrameExit(); 2376 __ Ret(); 2377} 2378 2379void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) { 2380 instruction->SetLocations(nullptr); 2381} 2382 2383void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) { 2384 UNUSED(instruction); 2385 codegen_->GenerateFrameExit(); 2386 __ Ret(); 2387} 2388 2389void LocationsBuilderARM64::VisitShl(HShl* shl) { 2390 HandleShift(shl); 2391} 2392 2393void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) { 2394 HandleShift(shl); 2395} 2396 2397void LocationsBuilderARM64::VisitShr(HShr* shr) { 2398 HandleShift(shr); 2399} 2400 2401void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) { 2402 HandleShift(shr); 2403} 2404 2405void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) { 2406 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store); 2407 Primitive::Type field_type = store->InputAt(1)->GetType(); 2408 switch (field_type) { 2409 case Primitive::kPrimNot: 2410 case Primitive::kPrimBoolean: 2411 case Primitive::kPrimByte: 2412 case Primitive::kPrimChar: 2413 case Primitive::kPrimShort: 2414 case Primitive::kPrimInt: 2415 case Primitive::kPrimFloat: 2416 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2417 break; 2418 2419 case Primitive::kPrimLong: 2420 case Primitive::kPrimDouble: 2421 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal()))); 2422 break; 2423 2424 default: 2425 LOG(FATAL) << "Unimplemented local type " << field_type; 2426 } 2427} 2428 2429void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) { 2430 UNUSED(store); 2431} 2432 2433void LocationsBuilderARM64::VisitSub(HSub* instruction) { 2434 HandleBinaryOp(instruction); 2435} 2436 2437void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) { 2438 HandleBinaryOp(instruction); 2439} 2440 2441void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2442 LocationSummary* locations = 2443 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2444 locations->SetInAt(0, Location::RequiresRegister()); 2445 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2446} 2447 2448void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2449 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset()); 2450 bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease(); 2451 2452 if (instruction->IsVolatile()) { 2453 if (use_acquire_release) { 2454 // NB: LoadAcquire will record the pc info if needed. 2455 codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field); 2456 } else { 2457 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 2458 // For IRIW sequential consistency kLoadAny is not sufficient. 2459 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 2460 } 2461 } else { 2462 codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field); 2463 } 2464} 2465 2466void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2467 LocationSummary* locations = 2468 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2469 locations->SetInAt(0, Location::RequiresRegister()); 2470 locations->SetInAt(1, Location::RequiresRegister()); 2471} 2472 2473void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2474 Register cls = InputRegisterAt(instruction, 0); 2475 CPURegister value = InputCPURegisterAt(instruction, 1); 2476 Offset offset = instruction->GetFieldOffset(); 2477 Primitive::Type field_type = instruction->GetFieldType(); 2478 bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease(); 2479 2480 if (instruction->IsVolatile()) { 2481 if (use_acquire_release) { 2482 codegen_->StoreRelease(field_type, value, HeapOperand(cls, offset)); 2483 } else { 2484 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 2485 codegen_->Store(field_type, value, HeapOperand(cls, offset)); 2486 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 2487 } 2488 } else { 2489 codegen_->Store(field_type, value, HeapOperand(cls, offset)); 2490 } 2491 2492 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 2493 codegen_->MarkGCCard(cls, Register(value)); 2494 } 2495} 2496 2497void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2498 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); 2499} 2500 2501void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { 2502 HBasicBlock* block = instruction->GetBlock(); 2503 if (block->GetLoopInformation() != nullptr) { 2504 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); 2505 // The back edge will generate the suspend check. 2506 return; 2507 } 2508 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { 2509 // The goto will generate the suspend check. 2510 return; 2511 } 2512 GenerateSuspendCheck(instruction, nullptr); 2513} 2514 2515void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) { 2516 temp->SetLocations(nullptr); 2517} 2518 2519void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) { 2520 // Nothing to do, this is driven by the code generator. 2521 UNUSED(temp); 2522} 2523 2524void LocationsBuilderARM64::VisitThrow(HThrow* instruction) { 2525 LocationSummary* locations = 2526 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2527 InvokeRuntimeCallingConvention calling_convention; 2528 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); 2529} 2530 2531void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) { 2532 codegen_->InvokeRuntime( 2533 QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr); 2534 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>(); 2535} 2536 2537void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) { 2538 LocationSummary* locations = 2539 new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall); 2540 Primitive::Type input_type = conversion->GetInputType(); 2541 Primitive::Type result_type = conversion->GetResultType(); 2542 DCHECK_NE(input_type, result_type); 2543 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) || 2544 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) { 2545 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type; 2546 } 2547 2548 if (Primitive::IsFloatingPointType(input_type)) { 2549 locations->SetInAt(0, Location::RequiresFpuRegister()); 2550 } else { 2551 locations->SetInAt(0, Location::RequiresRegister()); 2552 } 2553 2554 if (Primitive::IsFloatingPointType(result_type)) { 2555 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 2556 } else { 2557 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2558 } 2559} 2560 2561void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) { 2562 Primitive::Type result_type = conversion->GetResultType(); 2563 Primitive::Type input_type = conversion->GetInputType(); 2564 2565 DCHECK_NE(input_type, result_type); 2566 2567 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) { 2568 int result_size = Primitive::ComponentSize(result_type); 2569 int input_size = Primitive::ComponentSize(input_type); 2570 int min_size = std::min(result_size, input_size); 2571 Register output = OutputRegister(conversion); 2572 Register source = InputRegisterAt(conversion, 0); 2573 if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) { 2574 __ Ubfx(output, source, 0, result_size * kBitsPerByte); 2575 } else if ((result_type == Primitive::kPrimChar) || 2576 ((input_type == Primitive::kPrimChar) && (result_size > input_size))) { 2577 __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2578 } else { 2579 __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte); 2580 } 2581 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) { 2582 __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0)); 2583 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) { 2584 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong); 2585 __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0)); 2586 } else if (Primitive::IsFloatingPointType(result_type) && 2587 Primitive::IsFloatingPointType(input_type)) { 2588 __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0)); 2589 } else { 2590 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type 2591 << " to " << result_type; 2592 } 2593} 2594 2595void LocationsBuilderARM64::VisitUShr(HUShr* ushr) { 2596 HandleShift(ushr); 2597} 2598 2599void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) { 2600 HandleShift(ushr); 2601} 2602 2603void LocationsBuilderARM64::VisitXor(HXor* instruction) { 2604 HandleBinaryOp(instruction); 2605} 2606 2607void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) { 2608 HandleBinaryOp(instruction); 2609} 2610 2611void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) { 2612 // Nothing to do, this should be removed during prepare for register allocator. 2613 UNUSED(instruction); 2614 LOG(FATAL) << "Unreachable"; 2615} 2616 2617void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) { 2618 // Nothing to do, this should be removed during prepare for register allocator. 2619 UNUSED(instruction); 2620 LOG(FATAL) << "Unreachable"; 2621} 2622 2623#undef __ 2624#undef QUICK_ENTRY_POINT 2625 2626} // namespace arm64 2627} // namespace art 2628