code_generator_arm64.cc revision d97dc40d186aec46bfd318b6a2026a98241d7e9c
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm64.h"
18
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "entrypoints/quick/quick_entrypoints_enum.h"
21#include "gc/accounting/card_table.h"
22#include "mirror/array-inl.h"
23#include "mirror/art_method.h"
24#include "mirror/class.h"
25#include "offsets.h"
26#include "thread.h"
27#include "utils/arm64/assembler_arm64.h"
28#include "utils/assembler.h"
29#include "utils/stack_checks.h"
30
31
32using namespace vixl;   // NOLINT(build/namespaces)
33
34#ifdef __
35#error "ARM64 Codegen VIXL macro-assembler macro already defined."
36#endif
37
38
39namespace art {
40
41namespace arm64 {
42
43// TODO: Tune the use of Load-Acquire, Store-Release vs Data Memory Barriers.
44// For now we prefer the use of load-acquire, store-release over explicit memory barriers.
45static constexpr bool kUseAcquireRelease = true;
46static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
47static constexpr int kCurrentMethodStackOffset = 0;
48
49namespace {
50
51bool IsFPType(Primitive::Type type) {
52  return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
53}
54
55bool IsIntegralType(Primitive::Type type) {
56  switch (type) {
57    case Primitive::kPrimByte:
58    case Primitive::kPrimChar:
59    case Primitive::kPrimShort:
60    case Primitive::kPrimInt:
61    case Primitive::kPrimLong:
62      return true;
63    default:
64      return false;
65  }
66}
67
68bool Is64BitType(Primitive::Type type) {
69  return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
70}
71
72// Convenience helpers to ease conversion to and from VIXL operands.
73static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
74              "Unexpected values for register codes.");
75
76int VIXLRegCodeFromART(int code) {
77  if (code == SP) {
78    return vixl::kSPRegInternalCode;
79  }
80  if (code == XZR) {
81    return vixl::kZeroRegCode;
82  }
83  return code;
84}
85
86int ARTRegCodeFromVIXL(int code) {
87  if (code == vixl::kSPRegInternalCode) {
88    return SP;
89  }
90  if (code == vixl::kZeroRegCode) {
91    return XZR;
92  }
93  return code;
94}
95
96Register XRegisterFrom(Location location) {
97  DCHECK(location.IsRegister());
98  return Register::XRegFromCode(VIXLRegCodeFromART(location.reg()));
99}
100
101Register WRegisterFrom(Location location) {
102  DCHECK(location.IsRegister());
103  return Register::WRegFromCode(VIXLRegCodeFromART(location.reg()));
104}
105
106Register RegisterFrom(Location location, Primitive::Type type) {
107  DCHECK(type != Primitive::kPrimVoid && !IsFPType(type));
108  return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
109}
110
111Register OutputRegister(HInstruction* instr) {
112  return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
113}
114
115Register InputRegisterAt(HInstruction* instr, int input_index) {
116  return RegisterFrom(instr->GetLocations()->InAt(input_index),
117                      instr->InputAt(input_index)->GetType());
118}
119
120FPRegister DRegisterFrom(Location location) {
121  DCHECK(location.IsFpuRegister());
122  return FPRegister::DRegFromCode(location.reg());
123}
124
125FPRegister SRegisterFrom(Location location) {
126  DCHECK(location.IsFpuRegister());
127  return FPRegister::SRegFromCode(location.reg());
128}
129
130FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
131  DCHECK(IsFPType(type));
132  return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location);
133}
134
135FPRegister OutputFPRegister(HInstruction* instr) {
136  return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
137}
138
139FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
140  return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
141                        instr->InputAt(input_index)->GetType());
142}
143
144CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
145  return IsFPType(type) ? CPURegister(FPRegisterFrom(location, type))
146                        : CPURegister(RegisterFrom(location, type));
147}
148
149CPURegister OutputCPURegister(HInstruction* instr) {
150  return IsFPType(instr->GetType()) ? static_cast<CPURegister>(OutputFPRegister(instr))
151                                    : static_cast<CPURegister>(OutputRegister(instr));
152}
153
154CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
155  return IsFPType(instr->InputAt(index)->GetType())
156      ? static_cast<CPURegister>(InputFPRegisterAt(instr, index))
157      : static_cast<CPURegister>(InputRegisterAt(instr, index));
158}
159
160int64_t Int64ConstantFrom(Location location) {
161  HConstant* instr = location.GetConstant();
162  return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue()
163                                : instr->AsLongConstant()->GetValue();
164}
165
166Operand OperandFrom(Location location, Primitive::Type type) {
167  if (location.IsRegister()) {
168    return Operand(RegisterFrom(location, type));
169  } else {
170    return Operand(Int64ConstantFrom(location));
171  }
172}
173
174Operand InputOperandAt(HInstruction* instr, int input_index) {
175  return OperandFrom(instr->GetLocations()->InAt(input_index),
176                     instr->InputAt(input_index)->GetType());
177}
178
179MemOperand StackOperandFrom(Location location) {
180  return MemOperand(sp, location.GetStackIndex());
181}
182
183MemOperand HeapOperand(const Register& base, size_t offset = 0) {
184  // A heap reference must be 32bit, so fit in a W register.
185  DCHECK(base.IsW());
186  return MemOperand(base.X(), offset);
187}
188
189MemOperand HeapOperand(const Register& base, Offset offset) {
190  return HeapOperand(base, offset.SizeValue());
191}
192
193MemOperand HeapOperandFrom(Location location, Offset offset) {
194  return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset);
195}
196
197Location LocationFrom(const Register& reg) {
198  return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code()));
199}
200
201Location LocationFrom(const FPRegister& fpreg) {
202  return Location::FpuRegisterLocation(fpreg.code());
203}
204
205}  // namespace
206
207inline Condition ARM64Condition(IfCondition cond) {
208  switch (cond) {
209    case kCondEQ: return eq;
210    case kCondNE: return ne;
211    case kCondLT: return lt;
212    case kCondLE: return le;
213    case kCondGT: return gt;
214    case kCondGE: return ge;
215    default:
216      LOG(FATAL) << "Unknown if condition";
217  }
218  return nv;  // Unreachable.
219}
220
221Location ARM64ReturnLocation(Primitive::Type return_type) {
222  DCHECK_NE(return_type, Primitive::kPrimVoid);
223  // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
224  // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
225  // but we use the exact registers for clarity.
226  if (return_type == Primitive::kPrimFloat) {
227    return LocationFrom(s0);
228  } else if (return_type == Primitive::kPrimDouble) {
229    return LocationFrom(d0);
230  } else if (return_type == Primitive::kPrimLong) {
231    return LocationFrom(x0);
232  } else {
233    return LocationFrom(w0);
234  }
235}
236
237static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 };
238static constexpr size_t kRuntimeParameterCoreRegistersLength =
239    arraysize(kRuntimeParameterCoreRegisters);
240static const FPRegister kRuntimeParameterFpuRegisters[] = { d0, d1, d2, d3, d4, d5, d6, d7 };
241static constexpr size_t kRuntimeParameterFpuRegistersLength =
242    arraysize(kRuntimeParameterCoreRegisters);
243
244class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> {
245 public:
246  static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
247
248  InvokeRuntimeCallingConvention()
249      : CallingConvention(kRuntimeParameterCoreRegisters,
250                          kRuntimeParameterCoreRegistersLength,
251                          kRuntimeParameterFpuRegisters,
252                          kRuntimeParameterFpuRegistersLength) {}
253
254  Location GetReturnLocation(Primitive::Type return_type);
255
256 private:
257  DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
258};
259
260Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
261  return ARM64ReturnLocation(return_type);
262}
263
264#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
265#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
266
267class SlowPathCodeARM64 : public SlowPathCode {
268 public:
269  SlowPathCodeARM64() : entry_label_(), exit_label_() {}
270
271  vixl::Label* GetEntryLabel() { return &entry_label_; }
272  vixl::Label* GetExitLabel() { return &exit_label_; }
273
274 private:
275  vixl::Label entry_label_;
276  vixl::Label exit_label_;
277
278  DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
279};
280
281class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
282 public:
283  BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
284                           Location index_location,
285                           Location length_location)
286      : instruction_(instruction),
287        index_location_(index_location),
288        length_location_(length_location) {}
289
290
291  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
292    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
293    __ Bind(GetEntryLabel());
294    // We're moving two locations to locations that could overlap, so we need a parallel
295    // move resolver.
296    InvokeRuntimeCallingConvention calling_convention;
297    codegen->EmitParallelMoves(
298        index_location_, LocationFrom(calling_convention.GetRegisterAt(0)),
299        length_location_, LocationFrom(calling_convention.GetRegisterAt(1)));
300    arm64_codegen->InvokeRuntime(
301        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc());
302    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
303  }
304
305 private:
306  HBoundsCheck* const instruction_;
307  const Location index_location_;
308  const Location length_location_;
309
310  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
311};
312
313class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
314 public:
315  explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {}
316
317  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
318    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
319    __ Bind(GetEntryLabel());
320    arm64_codegen->InvokeRuntime(
321        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc());
322    CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
323  }
324
325 private:
326  HDivZeroCheck* const instruction_;
327  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
328};
329
330class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
331 public:
332  LoadClassSlowPathARM64(HLoadClass* cls,
333                         HInstruction* at,
334                         uint32_t dex_pc,
335                         bool do_clinit)
336      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
337    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
338  }
339
340  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
341    LocationSummary* locations = at_->GetLocations();
342    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
343
344    __ Bind(GetEntryLabel());
345    codegen->SaveLiveRegisters(locations);
346
347    InvokeRuntimeCallingConvention calling_convention;
348    __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
349    arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
350    int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
351                                            : QUICK_ENTRY_POINT(pInitializeType);
352    arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
353    if (do_clinit_) {
354      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*>();
355    } else {
356      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t, mirror::ArtMethod*>();
357    }
358
359    // Move the class to the desired location.
360    Location out = locations->Out();
361    if (out.IsValid()) {
362      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
363      Primitive::Type type = at_->GetType();
364      arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
365    }
366
367    codegen->RestoreLiveRegisters(locations);
368    __ B(GetExitLabel());
369  }
370
371 private:
372  // The class this slow path will load.
373  HLoadClass* const cls_;
374
375  // The instruction where this slow path is happening.
376  // (Might be the load class or an initialization check).
377  HInstruction* const at_;
378
379  // The dex PC of `at_`.
380  const uint32_t dex_pc_;
381
382  // Whether to initialize the class.
383  const bool do_clinit_;
384
385  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
386};
387
388class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
389 public:
390  explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {}
391
392  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
393    LocationSummary* locations = instruction_->GetLocations();
394    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
395    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
396
397    __ Bind(GetEntryLabel());
398    codegen->SaveLiveRegisters(locations);
399
400    InvokeRuntimeCallingConvention calling_convention;
401    arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
402    __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex());
403    arm64_codegen->InvokeRuntime(
404        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
405    CheckEntrypointTypes<kQuickResolveString, void*, uint32_t, mirror::ArtMethod*>();
406    Primitive::Type type = instruction_->GetType();
407    arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
408
409    codegen->RestoreLiveRegisters(locations);
410    __ B(GetExitLabel());
411  }
412
413 private:
414  HLoadString* const instruction_;
415
416  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
417};
418
419class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
420 public:
421  explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
422
423  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
424    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
425    __ Bind(GetEntryLabel());
426    arm64_codegen->InvokeRuntime(
427        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc());
428    CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
429  }
430
431 private:
432  HNullCheck* const instruction_;
433
434  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
435};
436
437class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
438 public:
439  explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction,
440                                     HBasicBlock* successor)
441      : instruction_(instruction), successor_(successor) {}
442
443  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
444    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
445    __ Bind(GetEntryLabel());
446    codegen->SaveLiveRegisters(instruction_->GetLocations());
447    arm64_codegen->InvokeRuntime(
448        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
449    CheckEntrypointTypes<kQuickTestSuspend, void, void>();
450    codegen->RestoreLiveRegisters(instruction_->GetLocations());
451    if (successor_ == nullptr) {
452      __ B(GetReturnLabel());
453    } else {
454      __ B(arm64_codegen->GetLabelOf(successor_));
455    }
456  }
457
458  vixl::Label* GetReturnLabel() {
459    DCHECK(successor_ == nullptr);
460    return &return_label_;
461  }
462
463 private:
464  HSuspendCheck* const instruction_;
465  // If not null, the block to branch to after the suspend check.
466  HBasicBlock* const successor_;
467
468  // If `successor_` is null, the label to branch to after the suspend check.
469  vixl::Label return_label_;
470
471  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
472};
473
474class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
475 public:
476  TypeCheckSlowPathARM64(HInstruction* instruction,
477                         Location class_to_check,
478                         Location object_class,
479                         uint32_t dex_pc)
480      : instruction_(instruction),
481        class_to_check_(class_to_check),
482        object_class_(object_class),
483        dex_pc_(dex_pc) {}
484
485  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
486    LocationSummary* locations = instruction_->GetLocations();
487    DCHECK(instruction_->IsCheckCast()
488           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
489    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
490
491    __ Bind(GetEntryLabel());
492    codegen->SaveLiveRegisters(locations);
493
494    // We're moving two locations to locations that could overlap, so we need a parallel
495    // move resolver.
496    InvokeRuntimeCallingConvention calling_convention;
497    codegen->EmitParallelMoves(
498        class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)),
499        object_class_, LocationFrom(calling_convention.GetRegisterAt(1)));
500
501    if (instruction_->IsInstanceOf()) {
502      arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
503      Primitive::Type ret_type = instruction_->GetType();
504      Location ret_loc = calling_convention.GetReturnLocation(ret_type);
505      arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
506      CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t,
507                           const mirror::Class*, const mirror::Class*>();
508    } else {
509      DCHECK(instruction_->IsCheckCast());
510      arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
511      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
512    }
513
514    codegen->RestoreLiveRegisters(locations);
515    __ B(GetExitLabel());
516  }
517
518 private:
519  HInstruction* const instruction_;
520  const Location class_to_check_;
521  const Location object_class_;
522  uint32_t dex_pc_;
523
524  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
525};
526
527#undef __
528
529Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
530  Location next_location;
531  if (type == Primitive::kPrimVoid) {
532    LOG(FATAL) << "Unreachable type " << type;
533  }
534
535  if (IsFPType(type) && (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) {
536    next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++));
537  } else if (!IsFPType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) {
538    next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
539  } else {
540    size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
541    next_location = Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
542                                      : Location::StackSlot(stack_offset);
543  }
544
545  // Space on the stack is reserved for all arguments.
546  stack_index_ += Is64BitType(type) ? 2 : 1;
547  return next_location;
548}
549
550CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, const CompilerOptions& compiler_options)
551    : CodeGenerator(graph,
552                    kNumberOfAllocatableRegisters,
553                    kNumberOfAllocatableFPRegisters,
554                    kNumberOfAllocatableRegisterPairs,
555                    (1 << LR),
556                    0,
557                    compiler_options),
558      block_labels_(nullptr),
559      location_builder_(graph, this),
560      instruction_visitor_(graph, this),
561      move_resolver_(graph->GetArena(), this) {
562  // Save the link register (containing the return address) to mimic Quick.
563  AddAllocatedRegister(Location::RegisterLocation(LR));
564}
565
566#undef __
567#define __ GetVIXLAssembler()->
568
569void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
570  // Ensure we emit the literal pool.
571  __ FinalizeCode();
572  CodeGenerator::Finalize(allocator);
573}
574
575void ParallelMoveResolverARM64::EmitMove(size_t index) {
576  MoveOperands* move = moves_.Get(index);
577  codegen_->MoveLocation(move->GetDestination(), move->GetSource());
578}
579
580void ParallelMoveResolverARM64::EmitSwap(size_t index) {
581  MoveOperands* move = moves_.Get(index);
582  codegen_->SwapLocations(move->GetDestination(), move->GetSource());
583}
584
585void ParallelMoveResolverARM64::RestoreScratch(int reg) {
586  __ Pop(Register(VIXLRegCodeFromART(reg), kXRegSize));
587}
588
589void ParallelMoveResolverARM64::SpillScratch(int reg) {
590  __ Push(Register(VIXLRegCodeFromART(reg), kXRegSize));
591}
592
593void CodeGeneratorARM64::GenerateFrameEntry() {
594  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
595  if (do_overflow_check) {
596    UseScratchRegisterScope temps(GetVIXLAssembler());
597    Register temp = temps.AcquireX();
598    DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
599    __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
600    __ Ldr(wzr, MemOperand(temp, 0));
601    RecordPcInfo(nullptr, 0);
602  }
603
604  int frame_size = GetFrameSize();
605  __ Str(w0, MemOperand(sp, -frame_size, PreIndex));
606  __ PokeCPURegList(GetFramePreservedRegisters(), frame_size - FrameEntrySpillSize());
607
608  // Stack layout:
609  // sp[frame_size - 8]        : lr.
610  // ...                       : other preserved registers.
611  // sp[frame_size - regs_size]: first preserved register.
612  // ...                       : reserved frame space.
613  // sp[0]                     : current method.
614}
615
616void CodeGeneratorARM64::GenerateFrameExit() {
617  int frame_size = GetFrameSize();
618  __ PeekCPURegList(GetFramePreservedRegisters(), frame_size - FrameEntrySpillSize());
619  __ Drop(frame_size);
620}
621
622void CodeGeneratorARM64::Bind(HBasicBlock* block) {
623  __ Bind(GetLabelOf(block));
624}
625
626void CodeGeneratorARM64::Move(HInstruction* instruction,
627                              Location location,
628                              HInstruction* move_for) {
629  LocationSummary* locations = instruction->GetLocations();
630  if (locations != nullptr && locations->Out().Equals(location)) {
631    return;
632  }
633
634  Primitive::Type type = instruction->GetType();
635  DCHECK_NE(type, Primitive::kPrimVoid);
636
637  if (instruction->IsIntConstant() || instruction->IsLongConstant()) {
638    int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue()
639                                                 : instruction->AsLongConstant()->GetValue();
640    if (location.IsRegister()) {
641      Register dst = RegisterFrom(location, type);
642      DCHECK((instruction->IsIntConstant() && dst.Is32Bits()) ||
643             (instruction->IsLongConstant() && dst.Is64Bits()));
644      __ Mov(dst, value);
645    } else {
646      DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
647      UseScratchRegisterScope temps(GetVIXLAssembler());
648      Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX();
649      __ Mov(temp, value);
650      __ Str(temp, StackOperandFrom(location));
651    }
652  } else if (instruction->IsTemporary()) {
653    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
654    MoveLocation(location, temp_location, type);
655  } else if (instruction->IsLoadLocal()) {
656    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
657    if (Is64BitType(type)) {
658      MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
659    } else {
660      MoveLocation(location, Location::StackSlot(stack_slot), type);
661    }
662
663  } else {
664    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
665    MoveLocation(location, locations->Out(), type);
666  }
667}
668
669Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
670  Primitive::Type type = load->GetType();
671
672  switch (type) {
673    case Primitive::kPrimNot:
674    case Primitive::kPrimInt:
675    case Primitive::kPrimFloat:
676      return Location::StackSlot(GetStackSlot(load->GetLocal()));
677
678    case Primitive::kPrimLong:
679    case Primitive::kPrimDouble:
680      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
681
682    case Primitive::kPrimBoolean:
683    case Primitive::kPrimByte:
684    case Primitive::kPrimChar:
685    case Primitive::kPrimShort:
686    case Primitive::kPrimVoid:
687      LOG(FATAL) << "Unexpected type " << type;
688  }
689
690  LOG(FATAL) << "Unreachable";
691  return Location::NoLocation();
692}
693
694void CodeGeneratorARM64::MarkGCCard(Register object, Register value) {
695  UseScratchRegisterScope temps(GetVIXLAssembler());
696  Register card = temps.AcquireX();
697  Register temp = temps.AcquireW();   // Index within the CardTable - 32bit.
698  vixl::Label done;
699  __ Cbz(value, &done);
700  __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
701  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
702  __ Strb(card, MemOperand(card, temp.X()));
703  __ Bind(&done);
704}
705
706void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
707  // Block reserved registers:
708  //   ip0 (VIXL temporary)
709  //   ip1 (VIXL temporary)
710  //   tr
711  //   lr
712  // sp is not part of the allocatable registers, so we don't need to block it.
713  // TODO: Avoid blocking callee-saved registers, and instead preserve them
714  // where necessary.
715  CPURegList reserved_core_registers = vixl_reserved_core_registers;
716  reserved_core_registers.Combine(runtime_reserved_core_registers);
717  reserved_core_registers.Combine(quick_callee_saved_registers);
718  while (!reserved_core_registers.IsEmpty()) {
719    blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
720  }
721  CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
722  reserved_fp_registers.Combine(CPURegList::GetCalleeSavedFP());
723  while (!reserved_core_registers.IsEmpty()) {
724    blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
725  }
726}
727
728Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
729  if (type == Primitive::kPrimVoid) {
730    LOG(FATAL) << "Unreachable type " << type;
731  }
732
733  if (IsFPType(type)) {
734    ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
735    DCHECK_NE(reg, -1);
736    return Location::FpuRegisterLocation(reg);
737  } else {
738    ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
739    DCHECK_NE(reg, -1);
740    return Location::RegisterLocation(reg);
741  }
742}
743
744size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
745  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
746  __ Str(reg, MemOperand(sp, stack_index));
747  return kArm64WordSize;
748}
749
750size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
751  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
752  __ Ldr(reg, MemOperand(sp, stack_index));
753  return kArm64WordSize;
754}
755
756size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
757  FPRegister reg = FPRegister(reg_id, kDRegSize);
758  __ Str(reg, MemOperand(sp, stack_index));
759  return kArm64WordSize;
760}
761
762size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
763  FPRegister reg = FPRegister(reg_id, kDRegSize);
764  __ Ldr(reg, MemOperand(sp, stack_index));
765  return kArm64WordSize;
766}
767
768void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
769  stream << Arm64ManagedRegister::FromXRegister(XRegister(reg));
770}
771
772void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
773  stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
774}
775
776void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
777  if (constant->IsIntConstant() || constant->IsLongConstant()) {
778    __ Mov(Register(destination),
779           constant->IsIntConstant() ? constant->AsIntConstant()->GetValue()
780                                     : constant->AsLongConstant()->GetValue());
781  } else if (constant->IsFloatConstant()) {
782    __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
783  } else {
784    DCHECK(constant->IsDoubleConstant());
785    __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
786  }
787}
788
789
790static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
791  DCHECK(constant.IsConstant());
792  HConstant* cst = constant.GetConstant();
793  return (cst->IsIntConstant() && type == Primitive::kPrimInt) ||
794         (cst->IsLongConstant() && type == Primitive::kPrimLong) ||
795         (cst->IsFloatConstant() && type == Primitive::kPrimFloat) ||
796         (cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
797}
798
799void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
800  if (source.Equals(destination)) {
801    return;
802  }
803
804  // A valid move can always be inferred from the destination and source
805  // locations. When moving from and to a register, the argument type can be
806  // used to generate 32bit instead of 64bit moves. In debug mode we also
807  // checks the coherency of the locations and the type.
808  bool unspecified_type = (type == Primitive::kPrimVoid);
809
810  if (destination.IsRegister() || destination.IsFpuRegister()) {
811    if (unspecified_type) {
812      HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
813      if (source.IsStackSlot() ||
814          (src_cst != nullptr && (src_cst->IsIntConstant() || src_cst->IsFloatConstant()))) {
815        // For stack slots and 32bit constants, a 64bit type is appropriate.
816        type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
817      } else {
818        // If the source is a double stack slot or a 64bit constant, a 64bit
819        // type is appropriate. Else the source is a register, and since the
820        // type has not been specified, we chose a 64bit type to force a 64bit
821        // move.
822        type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
823      }
824    }
825    DCHECK((destination.IsFpuRegister() && IsFPType(type)) ||
826           (destination.IsRegister() && !IsFPType(type)));
827    CPURegister dst = CPURegisterFrom(destination, type);
828    if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
829      DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
830      __ Ldr(dst, StackOperandFrom(source));
831    } else if (source.IsConstant()) {
832      DCHECK(CoherentConstantAndType(source, type));
833      MoveConstant(dst, source.GetConstant());
834    } else {
835      if (destination.IsRegister()) {
836        __ Mov(Register(dst), RegisterFrom(source, type));
837      } else {
838        __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
839      }
840    }
841
842  } else {  // The destination is not a register. It must be a stack slot.
843    DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
844    if (source.IsRegister() || source.IsFpuRegister()) {
845      if (unspecified_type) {
846        if (source.IsRegister()) {
847          type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
848        } else {
849          type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
850        }
851      }
852      DCHECK((destination.IsDoubleStackSlot() == Is64BitType(type)) &&
853             (source.IsFpuRegister() == IsFPType(type)));
854      __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
855    } else if (source.IsConstant()) {
856      DCHECK(unspecified_type || CoherentConstantAndType(source, type));
857      UseScratchRegisterScope temps(GetVIXLAssembler());
858      HConstant* src_cst = source.GetConstant();
859      CPURegister temp;
860      if (src_cst->IsIntConstant()) {
861        temp = temps.AcquireW();
862      } else if (src_cst->IsLongConstant()) {
863        temp = temps.AcquireX();
864      } else if (src_cst->IsFloatConstant()) {
865        temp = temps.AcquireS();
866      } else {
867        DCHECK(src_cst->IsDoubleConstant());
868        temp = temps.AcquireD();
869      }
870      MoveConstant(temp, src_cst);
871      __ Str(temp, StackOperandFrom(destination));
872    } else {
873      DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
874      DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
875      UseScratchRegisterScope temps(GetVIXLAssembler());
876      // There is generally less pressure on FP registers.
877      FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
878      __ Ldr(temp, StackOperandFrom(source));
879      __ Str(temp, StackOperandFrom(destination));
880    }
881  }
882}
883
884void CodeGeneratorARM64::SwapLocations(Location loc1, Location loc2) {
885  DCHECK(!loc1.IsConstant());
886  DCHECK(!loc2.IsConstant());
887
888  if (loc1.Equals(loc2)) {
889    return;
890  }
891
892  UseScratchRegisterScope temps(GetAssembler()->vixl_masm_);
893
894  bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
895  bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
896  bool is_fp_reg1 = loc1.IsFpuRegister();
897  bool is_fp_reg2 = loc2.IsFpuRegister();
898
899  if (loc2.IsRegister() && loc1.IsRegister()) {
900    Register r1 = XRegisterFrom(loc1);
901    Register r2 = XRegisterFrom(loc2);
902    Register tmp = temps.AcquireSameSizeAs(r1);
903    __ Mov(tmp, r2);
904    __ Mov(r2, r1);
905    __ Mov(r1, tmp);
906  } else if (is_fp_reg2 && is_fp_reg1) {
907    FPRegister r1 = DRegisterFrom(loc1);
908    FPRegister r2 = DRegisterFrom(loc2);
909    FPRegister tmp = temps.AcquireSameSizeAs(r1);
910    __ Fmov(tmp, r2);
911    __ Fmov(r2, r1);
912    __ Fmov(r1, tmp);
913  } else if (is_slot1 != is_slot2) {
914    MemOperand mem = StackOperandFrom(is_slot1 ? loc1 : loc2);
915    Location reg_loc = is_slot1 ? loc2 : loc1;
916    CPURegister reg, tmp;
917    if (reg_loc.IsFpuRegister()) {
918      reg = DRegisterFrom(reg_loc);
919      tmp = temps.AcquireD();
920    } else {
921      reg = XRegisterFrom(reg_loc);
922      tmp = temps.AcquireX();
923    }
924    __ Ldr(tmp, mem);
925    __ Str(reg, mem);
926    if (reg_loc.IsFpuRegister()) {
927      __ Fmov(FPRegister(reg), FPRegister(tmp));
928    } else {
929      __ Mov(Register(reg), Register(tmp));
930    }
931  } else if (is_slot1 && is_slot2) {
932    MemOperand mem1 = StackOperandFrom(loc1);
933    MemOperand mem2 = StackOperandFrom(loc2);
934    Register tmp1 = loc1.IsStackSlot() ? temps.AcquireW() : temps.AcquireX();
935    Register tmp2 = temps.AcquireSameSizeAs(tmp1);
936    __ Ldr(tmp1, mem1);
937    __ Ldr(tmp2, mem2);
938    __ Str(tmp1, mem2);
939    __ Str(tmp2, mem1);
940  } else {
941    LOG(FATAL) << "Unimplemented";
942  }
943}
944
945void CodeGeneratorARM64::Load(Primitive::Type type,
946                              CPURegister dst,
947                              const MemOperand& src) {
948  switch (type) {
949    case Primitive::kPrimBoolean:
950      __ Ldrb(Register(dst), src);
951      break;
952    case Primitive::kPrimByte:
953      __ Ldrsb(Register(dst), src);
954      break;
955    case Primitive::kPrimShort:
956      __ Ldrsh(Register(dst), src);
957      break;
958    case Primitive::kPrimChar:
959      __ Ldrh(Register(dst), src);
960      break;
961    case Primitive::kPrimInt:
962    case Primitive::kPrimNot:
963    case Primitive::kPrimLong:
964    case Primitive::kPrimFloat:
965    case Primitive::kPrimDouble:
966      DCHECK_EQ(dst.Is64Bits(), Is64BitType(type));
967      __ Ldr(dst, src);
968      break;
969    case Primitive::kPrimVoid:
970      LOG(FATAL) << "Unreachable type " << type;
971  }
972}
973
974void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
975                                     CPURegister dst,
976                                     const MemOperand& src) {
977  UseScratchRegisterScope temps(GetVIXLAssembler());
978  Register temp_base = temps.AcquireX();
979  Primitive::Type type = instruction->GetType();
980
981  DCHECK(!src.IsRegisterOffset());
982  DCHECK(!src.IsPreIndex());
983  DCHECK(!src.IsPostIndex());
984
985  // TODO(vixl): Let the MacroAssembler handle MemOperand.
986  __ Add(temp_base, src.base(), src.offset());
987  MemOperand base = MemOperand(temp_base);
988  switch (type) {
989    case Primitive::kPrimBoolean:
990      __ Ldarb(Register(dst), base);
991      MaybeRecordImplicitNullCheck(instruction);
992      break;
993    case Primitive::kPrimByte:
994      __ Ldarb(Register(dst), base);
995      MaybeRecordImplicitNullCheck(instruction);
996      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
997      break;
998    case Primitive::kPrimChar:
999      __ Ldarh(Register(dst), base);
1000      MaybeRecordImplicitNullCheck(instruction);
1001      break;
1002    case Primitive::kPrimShort:
1003      __ Ldarh(Register(dst), base);
1004      MaybeRecordImplicitNullCheck(instruction);
1005      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
1006      break;
1007    case Primitive::kPrimInt:
1008    case Primitive::kPrimNot:
1009    case Primitive::kPrimLong:
1010      DCHECK_EQ(dst.Is64Bits(), Is64BitType(type));
1011      __ Ldar(Register(dst), base);
1012      MaybeRecordImplicitNullCheck(instruction);
1013      break;
1014    case Primitive::kPrimFloat:
1015    case Primitive::kPrimDouble: {
1016      DCHECK(dst.IsFPRegister());
1017      DCHECK_EQ(dst.Is64Bits(), Is64BitType(type));
1018
1019      Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1020      __ Ldar(temp, base);
1021      MaybeRecordImplicitNullCheck(instruction);
1022      __ Fmov(FPRegister(dst), temp);
1023      break;
1024    }
1025    case Primitive::kPrimVoid:
1026      LOG(FATAL) << "Unreachable type " << type;
1027  }
1028}
1029
1030void CodeGeneratorARM64::Store(Primitive::Type type,
1031                               CPURegister src,
1032                               const MemOperand& dst) {
1033  switch (type) {
1034    case Primitive::kPrimBoolean:
1035    case Primitive::kPrimByte:
1036      __ Strb(Register(src), dst);
1037      break;
1038    case Primitive::kPrimChar:
1039    case Primitive::kPrimShort:
1040      __ Strh(Register(src), dst);
1041      break;
1042    case Primitive::kPrimInt:
1043    case Primitive::kPrimNot:
1044    case Primitive::kPrimLong:
1045    case Primitive::kPrimFloat:
1046    case Primitive::kPrimDouble:
1047      DCHECK_EQ(src.Is64Bits(), Is64BitType(type));
1048      __ Str(src, dst);
1049      break;
1050    case Primitive::kPrimVoid:
1051      LOG(FATAL) << "Unreachable type " << type;
1052  }
1053}
1054
1055void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
1056                                      CPURegister src,
1057                                      const MemOperand& dst) {
1058  UseScratchRegisterScope temps(GetVIXLAssembler());
1059  Register temp_base = temps.AcquireX();
1060
1061  DCHECK(!dst.IsRegisterOffset());
1062  DCHECK(!dst.IsPreIndex());
1063  DCHECK(!dst.IsPostIndex());
1064
1065  // TODO(vixl): Let the MacroAssembler handle this.
1066  __ Add(temp_base, dst.base(), dst.offset());
1067  MemOperand base = MemOperand(temp_base);
1068  switch (type) {
1069    case Primitive::kPrimBoolean:
1070    case Primitive::kPrimByte:
1071      __ Stlrb(Register(src), base);
1072      break;
1073    case Primitive::kPrimChar:
1074    case Primitive::kPrimShort:
1075      __ Stlrh(Register(src), base);
1076      break;
1077    case Primitive::kPrimInt:
1078    case Primitive::kPrimNot:
1079    case Primitive::kPrimLong:
1080      DCHECK_EQ(src.Is64Bits(), Is64BitType(type));
1081      __ Stlr(Register(src), base);
1082      break;
1083    case Primitive::kPrimFloat:
1084    case Primitive::kPrimDouble: {
1085      DCHECK(src.IsFPRegister());
1086      DCHECK_EQ(src.Is64Bits(), Is64BitType(type));
1087
1088      Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1089      __ Fmov(temp, FPRegister(src));
1090      __ Stlr(temp, base);
1091      break;
1092    }
1093    case Primitive::kPrimVoid:
1094      LOG(FATAL) << "Unreachable type " << type;
1095  }
1096}
1097
1098void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) {
1099  DCHECK(current_method.IsW());
1100  __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
1101}
1102
1103void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
1104                                       HInstruction* instruction,
1105                                       uint32_t dex_pc) {
1106  __ Ldr(lr, MemOperand(tr, entry_point_offset));
1107  __ Blr(lr);
1108  if (instruction != nullptr) {
1109    RecordPcInfo(instruction, dex_pc);
1110    DCHECK(instruction->IsSuspendCheck()
1111        || instruction->IsBoundsCheck()
1112        || instruction->IsNullCheck()
1113        || instruction->IsDivZeroCheck()
1114        || !IsLeafMethod());
1115    }
1116}
1117
1118void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
1119                                                                     vixl::Register class_reg) {
1120  UseScratchRegisterScope temps(GetVIXLAssembler());
1121  Register temp = temps.AcquireW();
1122  size_t status_offset = mirror::Class::StatusOffset().SizeValue();
1123
1124  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
1125  if (kUseAcquireRelease) {
1126    // TODO(vixl): Let the MacroAssembler handle MemOperand.
1127    __ Add(temp, class_reg, status_offset);
1128    __ Ldar(temp, HeapOperand(temp));
1129    __ Cmp(temp, mirror::Class::kStatusInitialized);
1130    __ B(lt, slow_path->GetEntryLabel());
1131  } else {
1132    __ Ldr(temp, HeapOperand(class_reg, status_offset));
1133    __ Cmp(temp, mirror::Class::kStatusInitialized);
1134    __ B(lt, slow_path->GetEntryLabel());
1135    __ Dmb(InnerShareable, BarrierReads);
1136  }
1137  __ Bind(slow_path->GetExitLabel());
1138}
1139
1140void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) {
1141  BarrierType type = BarrierAll;
1142
1143  switch (kind) {
1144    case MemBarrierKind::kAnyAny:
1145    case MemBarrierKind::kAnyStore: {
1146      type = BarrierAll;
1147      break;
1148    }
1149    case MemBarrierKind::kLoadAny: {
1150      type = BarrierReads;
1151      break;
1152    }
1153    case MemBarrierKind::kStoreStore: {
1154      type = BarrierWrites;
1155      break;
1156    }
1157    default:
1158      LOG(FATAL) << "Unexpected memory barrier " << kind;
1159  }
1160  __ Dmb(InnerShareable, type);
1161}
1162
1163void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
1164                                                         HBasicBlock* successor) {
1165  SuspendCheckSlowPathARM64* slow_path =
1166    new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
1167  codegen_->AddSlowPath(slow_path);
1168  UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
1169  Register temp = temps.AcquireW();
1170
1171  __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
1172  if (successor == nullptr) {
1173    __ Cbnz(temp, slow_path->GetEntryLabel());
1174    __ Bind(slow_path->GetReturnLabel());
1175  } else {
1176    __ Cbz(temp, codegen_->GetLabelOf(successor));
1177    __ B(slow_path->GetEntryLabel());
1178    // slow_path will return to GetLabelOf(successor).
1179  }
1180}
1181
1182InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
1183                                                             CodeGeneratorARM64* codegen)
1184      : HGraphVisitor(graph),
1185        assembler_(codegen->GetAssembler()),
1186        codegen_(codegen) {}
1187
1188#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)              \
1189  /* No unimplemented IR. */
1190
1191#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
1192
1193enum UnimplementedInstructionBreakCode {
1194  // Using a base helps identify when we hit such breakpoints.
1195  UnimplementedInstructionBreakCodeBaseCode = 0x900,
1196#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
1197  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
1198#undef ENUM_UNIMPLEMENTED_INSTRUCTION
1199};
1200
1201#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name)                               \
1202  void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) {                   \
1203    UNUSED(instr);                                                                    \
1204    __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name));                               \
1205  }                                                                                   \
1206  void LocationsBuilderARM64::Visit##name(H##name* instr) {                           \
1207    LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
1208    locations->SetOut(Location::Any());                                               \
1209  }
1210  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
1211#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
1212
1213#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
1214#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
1215
1216void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
1217  DCHECK_EQ(instr->InputCount(), 2U);
1218  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1219  Primitive::Type type = instr->GetResultType();
1220  switch (type) {
1221    case Primitive::kPrimInt:
1222    case Primitive::kPrimLong:
1223      locations->SetInAt(0, Location::RequiresRegister());
1224      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1225      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1226      break;
1227
1228    case Primitive::kPrimFloat:
1229    case Primitive::kPrimDouble:
1230      locations->SetInAt(0, Location::RequiresFpuRegister());
1231      locations->SetInAt(1, Location::RequiresFpuRegister());
1232      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1233      break;
1234
1235    default:
1236      LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
1237  }
1238}
1239
1240void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
1241  Primitive::Type type = instr->GetType();
1242
1243  switch (type) {
1244    case Primitive::kPrimInt:
1245    case Primitive::kPrimLong: {
1246      Register dst = OutputRegister(instr);
1247      Register lhs = InputRegisterAt(instr, 0);
1248      Operand rhs = InputOperandAt(instr, 1);
1249      if (instr->IsAdd()) {
1250        __ Add(dst, lhs, rhs);
1251      } else if (instr->IsAnd()) {
1252        __ And(dst, lhs, rhs);
1253      } else if (instr->IsOr()) {
1254        __ Orr(dst, lhs, rhs);
1255      } else if (instr->IsSub()) {
1256        __ Sub(dst, lhs, rhs);
1257      } else {
1258        DCHECK(instr->IsXor());
1259        __ Eor(dst, lhs, rhs);
1260      }
1261      break;
1262    }
1263    case Primitive::kPrimFloat:
1264    case Primitive::kPrimDouble: {
1265      FPRegister dst = OutputFPRegister(instr);
1266      FPRegister lhs = InputFPRegisterAt(instr, 0);
1267      FPRegister rhs = InputFPRegisterAt(instr, 1);
1268      if (instr->IsAdd()) {
1269        __ Fadd(dst, lhs, rhs);
1270      } else if (instr->IsSub()) {
1271        __ Fsub(dst, lhs, rhs);
1272      } else {
1273        LOG(FATAL) << "Unexpected floating-point binary operation";
1274      }
1275      break;
1276    }
1277    default:
1278      LOG(FATAL) << "Unexpected binary operation type " << type;
1279  }
1280}
1281
1282void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
1283  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1284
1285  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1286  Primitive::Type type = instr->GetResultType();
1287  switch (type) {
1288    case Primitive::kPrimInt:
1289    case Primitive::kPrimLong: {
1290      locations->SetInAt(0, Location::RequiresRegister());
1291      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1292      locations->SetOut(Location::RequiresRegister());
1293      break;
1294    }
1295    default:
1296      LOG(FATAL) << "Unexpected shift type " << type;
1297  }
1298}
1299
1300void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) {
1301  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1302
1303  Primitive::Type type = instr->GetType();
1304  switch (type) {
1305    case Primitive::kPrimInt:
1306    case Primitive::kPrimLong: {
1307      Register dst = OutputRegister(instr);
1308      Register lhs = InputRegisterAt(instr, 0);
1309      Operand rhs = InputOperandAt(instr, 1);
1310      if (rhs.IsImmediate()) {
1311        uint32_t shift_value = (type == Primitive::kPrimInt)
1312          ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue)
1313          : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue);
1314        if (instr->IsShl()) {
1315          __ Lsl(dst, lhs, shift_value);
1316        } else if (instr->IsShr()) {
1317          __ Asr(dst, lhs, shift_value);
1318        } else {
1319          __ Lsr(dst, lhs, shift_value);
1320        }
1321      } else {
1322        Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
1323
1324        if (instr->IsShl()) {
1325          __ Lsl(dst, lhs, rhs_reg);
1326        } else if (instr->IsShr()) {
1327          __ Asr(dst, lhs, rhs_reg);
1328        } else {
1329          __ Lsr(dst, lhs, rhs_reg);
1330        }
1331      }
1332      break;
1333    }
1334    default:
1335      LOG(FATAL) << "Unexpected shift operation type " << type;
1336  }
1337}
1338
1339void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
1340  HandleBinaryOp(instruction);
1341}
1342
1343void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
1344  HandleBinaryOp(instruction);
1345}
1346
1347void LocationsBuilderARM64::VisitAnd(HAnd* instruction) {
1348  HandleBinaryOp(instruction);
1349}
1350
1351void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
1352  HandleBinaryOp(instruction);
1353}
1354
1355void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
1356  LocationSummary* locations =
1357      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1358  locations->SetInAt(0, Location::RequiresRegister());
1359  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1360  locations->SetOut(Location::RequiresRegister());
1361}
1362
1363void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
1364  LocationSummary* locations = instruction->GetLocations();
1365  Primitive::Type type = instruction->GetType();
1366  Register obj = InputRegisterAt(instruction, 0);
1367  Location index = locations->InAt(1);
1368  size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
1369  MemOperand source = HeapOperand(obj);
1370  UseScratchRegisterScope temps(GetVIXLAssembler());
1371
1372  if (index.IsConstant()) {
1373    offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
1374    source = HeapOperand(obj, offset);
1375  } else {
1376    Register temp = temps.AcquireSameSizeAs(obj);
1377    Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
1378    __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type)));
1379    source = HeapOperand(temp, offset);
1380  }
1381
1382  codegen_->Load(type, OutputCPURegister(instruction), source);
1383  codegen_->MaybeRecordImplicitNullCheck(instruction);
1384}
1385
1386void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
1387  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1388  locations->SetInAt(0, Location::RequiresRegister());
1389  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1390}
1391
1392void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
1393  __ Ldr(OutputRegister(instruction),
1394         HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
1395  codegen_->MaybeRecordImplicitNullCheck(instruction);
1396}
1397
1398void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
1399  Primitive::Type value_type = instruction->GetComponentType();
1400  bool is_object = value_type == Primitive::kPrimNot;
1401  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1402      instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
1403  if (is_object) {
1404    InvokeRuntimeCallingConvention calling_convention;
1405    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
1406    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
1407    locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
1408  } else {
1409    locations->SetInAt(0, Location::RequiresRegister());
1410    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1411    locations->SetInAt(2, Location::RequiresRegister());
1412  }
1413}
1414
1415void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
1416  Primitive::Type value_type = instruction->GetComponentType();
1417  if (value_type == Primitive::kPrimNot) {
1418    codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc());
1419    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
1420  } else {
1421    LocationSummary* locations = instruction->GetLocations();
1422    Register obj = InputRegisterAt(instruction, 0);
1423    CPURegister value = InputCPURegisterAt(instruction, 2);
1424    Location index = locations->InAt(1);
1425    size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
1426    MemOperand destination = HeapOperand(obj);
1427    UseScratchRegisterScope temps(GetVIXLAssembler());
1428
1429    if (index.IsConstant()) {
1430      offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
1431      destination = HeapOperand(obj, offset);
1432    } else {
1433      Register temp = temps.AcquireSameSizeAs(obj);
1434      Register index_reg = InputRegisterAt(instruction, 1);
1435      __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
1436      destination = HeapOperand(temp, offset);
1437    }
1438
1439    codegen_->Store(value_type, value, destination);
1440    codegen_->MaybeRecordImplicitNullCheck(instruction);
1441  }
1442}
1443
1444void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1445  LocationSummary* locations =
1446      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1447  locations->SetInAt(0, Location::RequiresRegister());
1448  locations->SetInAt(1, Location::RequiresRegister());
1449  if (instruction->HasUses()) {
1450    locations->SetOut(Location::SameAsFirstInput());
1451  }
1452}
1453
1454void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1455  LocationSummary* locations = instruction->GetLocations();
1456  BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
1457      instruction, locations->InAt(0), locations->InAt(1));
1458  codegen_->AddSlowPath(slow_path);
1459
1460  __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
1461  __ B(slow_path->GetEntryLabel(), hs);
1462}
1463
1464void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
1465  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1466      instruction, LocationSummary::kCallOnSlowPath);
1467  locations->SetInAt(0, Location::RequiresRegister());
1468  locations->SetInAt(1, Location::RequiresRegister());
1469  locations->AddTemp(Location::RequiresRegister());
1470}
1471
1472void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
1473  LocationSummary* locations = instruction->GetLocations();
1474  Register obj = InputRegisterAt(instruction, 0);;
1475  Register cls = InputRegisterAt(instruction, 1);;
1476  Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
1477
1478  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
1479      instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
1480  codegen_->AddSlowPath(slow_path);
1481
1482  // TODO: avoid this check if we know obj is not null.
1483  __ Cbz(obj, slow_path->GetExitLabel());
1484  // Compare the class of `obj` with `cls`.
1485  __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset()));
1486  __ Cmp(obj_cls, cls);
1487  __ B(ne, slow_path->GetEntryLabel());
1488  __ Bind(slow_path->GetExitLabel());
1489}
1490
1491void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
1492  LocationSummary* locations =
1493      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1494  locations->SetInAt(0, Location::RequiresRegister());
1495  if (check->HasUses()) {
1496    locations->SetOut(Location::SameAsFirstInput());
1497  }
1498}
1499
1500void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
1501  // We assume the class is not null.
1502  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
1503      check->GetLoadClass(), check, check->GetDexPc(), true);
1504  codegen_->AddSlowPath(slow_path);
1505  GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
1506}
1507
1508void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
1509  LocationSummary* locations =
1510      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
1511  Primitive::Type in_type = compare->InputAt(0)->GetType();
1512  switch (in_type) {
1513    case Primitive::kPrimLong: {
1514      locations->SetInAt(0, Location::RequiresRegister());
1515      locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1)));
1516      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1517      break;
1518    }
1519    case Primitive::kPrimFloat:
1520    case Primitive::kPrimDouble: {
1521      locations->SetInAt(0, Location::RequiresFpuRegister());
1522      locations->SetInAt(1, Location::RequiresFpuRegister());
1523      locations->SetOut(Location::RequiresRegister());
1524      break;
1525    }
1526    default:
1527      LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1528  }
1529}
1530
1531void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) {
1532  Primitive::Type in_type = compare->InputAt(0)->GetType();
1533
1534  //  0 if: left == right
1535  //  1 if: left  > right
1536  // -1 if: left  < right
1537  switch (in_type) {
1538    case Primitive::kPrimLong: {
1539      Register result = OutputRegister(compare);
1540      Register left = InputRegisterAt(compare, 0);
1541      Operand right = InputOperandAt(compare, 1);
1542
1543      __ Cmp(left, right);
1544      __ Cset(result, ne);
1545      __ Cneg(result, result, lt);
1546      break;
1547    }
1548    case Primitive::kPrimFloat:
1549    case Primitive::kPrimDouble: {
1550      Register result = OutputRegister(compare);
1551      FPRegister left = InputFPRegisterAt(compare, 0);
1552      FPRegister right = InputFPRegisterAt(compare, 1);
1553
1554      __ Fcmp(left, right);
1555      if (compare->IsGtBias()) {
1556        __ Cset(result, ne);
1557      } else {
1558        __ Csetm(result, ne);
1559      }
1560      __ Cneg(result, result, compare->IsGtBias() ? mi : gt);
1561      break;
1562    }
1563    default:
1564      LOG(FATAL) << "Unimplemented compare type " << in_type;
1565  }
1566}
1567
1568void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
1569  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1570  locations->SetInAt(0, Location::RequiresRegister());
1571  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1572  if (instruction->NeedsMaterialization()) {
1573    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1574  }
1575}
1576
1577void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
1578  if (!instruction->NeedsMaterialization()) {
1579    return;
1580  }
1581
1582  LocationSummary* locations = instruction->GetLocations();
1583  Register lhs = InputRegisterAt(instruction, 0);
1584  Operand rhs = InputOperandAt(instruction, 1);
1585  Register res = RegisterFrom(locations->Out(), instruction->GetType());
1586  Condition cond = ARM64Condition(instruction->GetCondition());
1587
1588  __ Cmp(lhs, rhs);
1589  __ Cset(res, cond);
1590}
1591
1592#define FOR_EACH_CONDITION_INSTRUCTION(M)                                                \
1593  M(Equal)                                                                               \
1594  M(NotEqual)                                                                            \
1595  M(LessThan)                                                                            \
1596  M(LessThanOrEqual)                                                                     \
1597  M(GreaterThan)                                                                         \
1598  M(GreaterThanOrEqual)
1599#define DEFINE_CONDITION_VISITORS(Name)                                                  \
1600void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }         \
1601void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
1602FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
1603#undef DEFINE_CONDITION_VISITORS
1604#undef FOR_EACH_CONDITION_INSTRUCTION
1605
1606void LocationsBuilderARM64::VisitDiv(HDiv* div) {
1607  LocationSummary* locations =
1608      new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1609  switch (div->GetResultType()) {
1610    case Primitive::kPrimInt:
1611    case Primitive::kPrimLong:
1612      locations->SetInAt(0, Location::RequiresRegister());
1613      locations->SetInAt(1, Location::RequiresRegister());
1614      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1615      break;
1616
1617    case Primitive::kPrimFloat:
1618    case Primitive::kPrimDouble:
1619      locations->SetInAt(0, Location::RequiresFpuRegister());
1620      locations->SetInAt(1, Location::RequiresFpuRegister());
1621      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1622      break;
1623
1624    default:
1625      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1626  }
1627}
1628
1629void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
1630  Primitive::Type type = div->GetResultType();
1631  switch (type) {
1632    case Primitive::kPrimInt:
1633    case Primitive::kPrimLong:
1634      __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
1635      break;
1636
1637    case Primitive::kPrimFloat:
1638    case Primitive::kPrimDouble:
1639      __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
1640      break;
1641
1642    default:
1643      LOG(FATAL) << "Unexpected div type " << type;
1644  }
1645}
1646
1647void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1648  LocationSummary* locations =
1649      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1650  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1651  if (instruction->HasUses()) {
1652    locations->SetOut(Location::SameAsFirstInput());
1653  }
1654}
1655
1656void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1657  SlowPathCodeARM64* slow_path =
1658      new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
1659  codegen_->AddSlowPath(slow_path);
1660  Location value = instruction->GetLocations()->InAt(0);
1661
1662  Primitive::Type type = instruction->GetType();
1663
1664  if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) {
1665      LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck.";
1666    return;
1667  }
1668
1669  if (value.IsConstant()) {
1670    int64_t divisor = Int64ConstantFrom(value);
1671    if (divisor == 0) {
1672      __ B(slow_path->GetEntryLabel());
1673    } else {
1674      // A division by a non-null constant is valid. We don't need to perform
1675      // any check, so simply fall through.
1676    }
1677  } else {
1678    __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
1679  }
1680}
1681
1682void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
1683  LocationSummary* locations =
1684      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1685  locations->SetOut(Location::ConstantLocation(constant));
1686}
1687
1688void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
1689  UNUSED(constant);
1690  // Will be generated at use site.
1691}
1692
1693void LocationsBuilderARM64::VisitExit(HExit* exit) {
1694  exit->SetLocations(nullptr);
1695}
1696
1697void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
1698  UNUSED(exit);
1699  if (kIsDebugBuild) {
1700    down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
1701    __ Brk(__LINE__);    // TODO: Introduce special markers for such code locations.
1702  }
1703}
1704
1705void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
1706  LocationSummary* locations =
1707      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1708  locations->SetOut(Location::ConstantLocation(constant));
1709}
1710
1711void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
1712  UNUSED(constant);
1713  // Will be generated at use site.
1714}
1715
1716void LocationsBuilderARM64::VisitGoto(HGoto* got) {
1717  got->SetLocations(nullptr);
1718}
1719
1720void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
1721  HBasicBlock* successor = got->GetSuccessor();
1722  DCHECK(!successor->IsExitBlock());
1723  HBasicBlock* block = got->GetBlock();
1724  HInstruction* previous = got->GetPrevious();
1725  HLoopInformation* info = block->GetLoopInformation();
1726
1727  if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) {
1728    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1729    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1730    return;
1731  }
1732  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1733    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1734  }
1735  if (!codegen_->GoesToNextBlock(block, successor)) {
1736    __ B(codegen_->GetLabelOf(successor));
1737  }
1738}
1739
1740void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
1741  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
1742  HInstruction* cond = if_instr->InputAt(0);
1743  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1744    locations->SetInAt(0, Location::RequiresRegister());
1745  }
1746}
1747
1748void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
1749  HInstruction* cond = if_instr->InputAt(0);
1750  HCondition* condition = cond->AsCondition();
1751  vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1752  vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1753
1754  if (cond->IsIntConstant()) {
1755    int32_t cond_value = cond->AsIntConstant()->GetValue();
1756    if (cond_value == 1) {
1757      if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) {
1758        __ B(true_target);
1759      }
1760      return;
1761    } else {
1762      DCHECK_EQ(cond_value, 0);
1763    }
1764  } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
1765    // The condition instruction has been materialized, compare the output to 0.
1766    Location cond_val = if_instr->GetLocations()->InAt(0);
1767    DCHECK(cond_val.IsRegister());
1768    __ Cbnz(InputRegisterAt(if_instr, 0), true_target);
1769  } else {
1770    // The condition instruction has not been materialized, use its inputs as
1771    // the comparison and its condition as the branch condition.
1772    Register lhs = InputRegisterAt(condition, 0);
1773    Operand rhs = InputOperandAt(condition, 1);
1774    Condition arm64_cond = ARM64Condition(condition->GetCondition());
1775    if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
1776      if (arm64_cond == eq) {
1777        __ Cbz(lhs, true_target);
1778      } else {
1779        __ Cbnz(lhs, true_target);
1780      }
1781    } else {
1782      __ Cmp(lhs, rhs);
1783      __ B(arm64_cond, true_target);
1784    }
1785  }
1786  if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
1787    __ B(false_target);
1788  }
1789}
1790
1791void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
1792  LocationSummary* locations =
1793      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1794  locations->SetInAt(0, Location::RequiresRegister());
1795  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1796}
1797
1798void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
1799  MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
1800
1801  if (instruction->IsVolatile()) {
1802    if (kUseAcquireRelease) {
1803      // NB: LoadAcquire will record the pc info if needed.
1804      codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
1805    } else {
1806      codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
1807      codegen_->MaybeRecordImplicitNullCheck(instruction);
1808      // For IRIW sequential consistency kLoadAny is not sufficient.
1809      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1810    }
1811  } else {
1812    codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
1813    codegen_->MaybeRecordImplicitNullCheck(instruction);
1814  }
1815}
1816
1817void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
1818  LocationSummary* locations =
1819      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1820  locations->SetInAt(0, Location::RequiresRegister());
1821  locations->SetInAt(1, Location::RequiresRegister());
1822}
1823
1824void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
1825  Register obj = InputRegisterAt(instruction, 0);
1826  CPURegister value = InputCPURegisterAt(instruction, 1);
1827  Offset offset = instruction->GetFieldOffset();
1828  Primitive::Type field_type = instruction->GetFieldType();
1829
1830  if (instruction->IsVolatile()) {
1831    if (kUseAcquireRelease) {
1832      codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset));
1833      codegen_->MaybeRecordImplicitNullCheck(instruction);
1834    } else {
1835      GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
1836      codegen_->Store(field_type, value, HeapOperand(obj, offset));
1837      codegen_->MaybeRecordImplicitNullCheck(instruction);
1838      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1839    }
1840  } else {
1841    codegen_->Store(field_type, value, HeapOperand(obj, offset));
1842    codegen_->MaybeRecordImplicitNullCheck(instruction);
1843  }
1844
1845  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
1846    codegen_->MarkGCCard(obj, Register(value));
1847  }
1848}
1849
1850void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
1851  LocationSummary::CallKind call_kind =
1852      instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
1853  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
1854  locations->SetInAt(0, Location::RequiresRegister());
1855  locations->SetInAt(1, Location::RequiresRegister());
1856  // The output does overlap inputs.
1857  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1858}
1859
1860void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
1861  LocationSummary* locations = instruction->GetLocations();
1862  Register obj = InputRegisterAt(instruction, 0);;
1863  Register cls = InputRegisterAt(instruction, 1);;
1864  Register out = OutputRegister(instruction);
1865
1866  vixl::Label done;
1867
1868  // Return 0 if `obj` is null.
1869  // TODO: Avoid this check if we know `obj` is not null.
1870  __ Mov(out, 0);
1871  __ Cbz(obj, &done);
1872
1873  // Compare the class of `obj` with `cls`.
1874  __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset()));
1875  __ Cmp(out, cls);
1876  if (instruction->IsClassFinal()) {
1877    // Classes must be equal for the instanceof to succeed.
1878    __ Cset(out, eq);
1879  } else {
1880    // If the classes are not equal, we go into a slow path.
1881    DCHECK(locations->OnlyCallsOnSlowPath());
1882    SlowPathCodeARM64* slow_path =
1883        new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
1884        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
1885    codegen_->AddSlowPath(slow_path);
1886    __ B(ne, slow_path->GetEntryLabel());
1887    __ Mov(out, 1);
1888    __ Bind(slow_path->GetExitLabel());
1889  }
1890
1891  __ Bind(&done);
1892}
1893
1894void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
1895  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
1896  locations->SetOut(Location::ConstantLocation(constant));
1897}
1898
1899void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
1900  // Will be generated at use site.
1901  UNUSED(constant);
1902}
1903
1904void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
1905  LocationSummary* locations =
1906      new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
1907  locations->AddTemp(LocationFrom(x0));
1908
1909  InvokeDexCallingConventionVisitor calling_convention_visitor;
1910  for (size_t i = 0; i < invoke->InputCount(); i++) {
1911    HInstruction* input = invoke->InputAt(i);
1912    locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
1913  }
1914
1915  Primitive::Type return_type = invoke->GetType();
1916  if (return_type != Primitive::kPrimVoid) {
1917    locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type));
1918  }
1919}
1920
1921void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
1922  HandleInvoke(invoke);
1923}
1924
1925void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
1926  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1927  Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
1928  uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
1929          (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
1930  Location receiver = invoke->GetLocations()->InAt(0);
1931  Offset class_offset = mirror::Object::ClassOffset();
1932  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
1933
1934  // The register ip1 is required to be used for the hidden argument in
1935  // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
1936  UseScratchRegisterScope scratch_scope(GetVIXLAssembler());
1937  scratch_scope.Exclude(ip1);
1938  __ Mov(ip1, invoke->GetDexMethodIndex());
1939
1940  // temp = object->GetClass();
1941  if (receiver.IsStackSlot()) {
1942    __ Ldr(temp, StackOperandFrom(receiver));
1943    __ Ldr(temp, HeapOperand(temp, class_offset));
1944  } else {
1945    __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
1946  }
1947  codegen_->MaybeRecordImplicitNullCheck(invoke);
1948  // temp = temp->GetImtEntryAt(method_offset);
1949  __ Ldr(temp, HeapOperand(temp, method_offset));
1950  // lr = temp->GetEntryPoint();
1951  __ Ldr(lr, HeapOperand(temp, entry_point));
1952  // lr();
1953  __ Blr(lr);
1954  DCHECK(!codegen_->IsLeafMethod());
1955  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1956}
1957
1958void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1959  HandleInvoke(invoke);
1960}
1961
1962void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1963  HandleInvoke(invoke);
1964}
1965
1966void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1967  Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
1968  // Make sure that ArtMethod* is passed in W0 as per the calling convention
1969  DCHECK(temp.Is(w0));
1970  size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() +
1971    invoke->GetDexMethodIndex() * kHeapRefSize;
1972
1973  // TODO: Implement all kinds of calls:
1974  // 1) boot -> boot
1975  // 2) app -> boot
1976  // 3) app -> app
1977  //
1978  // Currently we implement the app -> app logic, which looks up in the resolve cache.
1979
1980  // temp = method;
1981  codegen_->LoadCurrentMethod(temp);
1982  // temp = temp->dex_cache_resolved_methods_;
1983  __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
1984  // temp = temp[index_in_cache];
1985  __ Ldr(temp, HeapOperand(temp, index_in_cache));
1986  // lr = temp->entry_point_from_quick_compiled_code_;
1987  __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1988                          kArm64WordSize)));
1989  // lr();
1990  __ Blr(lr);
1991
1992  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1993  DCHECK(!codegen_->IsLeafMethod());
1994}
1995
1996void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1997  LocationSummary* locations = invoke->GetLocations();
1998  Location receiver = locations->InAt(0);
1999  Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
2000  size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
2001    invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
2002  Offset class_offset = mirror::Object::ClassOffset();
2003  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
2004
2005  // temp = object->GetClass();
2006  if (receiver.IsStackSlot()) {
2007    __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex()));
2008    __ Ldr(temp, HeapOperand(temp, class_offset));
2009  } else {
2010    DCHECK(receiver.IsRegister());
2011    __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
2012  }
2013  codegen_->MaybeRecordImplicitNullCheck(invoke);
2014  // temp = temp->GetMethodAt(method_offset);
2015  __ Ldr(temp, HeapOperand(temp, method_offset));
2016  // lr = temp->GetEntryPoint();
2017  __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue()));
2018  // lr();
2019  __ Blr(lr);
2020  DCHECK(!codegen_->IsLeafMethod());
2021  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2022}
2023
2024void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
2025  LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2026                                                              : LocationSummary::kNoCall;
2027  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2028  locations->SetOut(Location::RequiresRegister());
2029}
2030
2031void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
2032  Register out = OutputRegister(cls);
2033  if (cls->IsReferrersClass()) {
2034    DCHECK(!cls->CanCallRuntime());
2035    DCHECK(!cls->MustGenerateClinitCheck());
2036    codegen_->LoadCurrentMethod(out);
2037    __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
2038  } else {
2039    DCHECK(cls->CanCallRuntime());
2040    codegen_->LoadCurrentMethod(out);
2041    __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
2042    __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
2043
2044    SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
2045        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
2046    codegen_->AddSlowPath(slow_path);
2047    __ Cbz(out, slow_path->GetEntryLabel());
2048    if (cls->MustGenerateClinitCheck()) {
2049      GenerateClassInitializationCheck(slow_path, out);
2050    } else {
2051      __ Bind(slow_path->GetExitLabel());
2052    }
2053  }
2054}
2055
2056void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
2057  LocationSummary* locations =
2058      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2059  locations->SetOut(Location::RequiresRegister());
2060}
2061
2062void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
2063  MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
2064  __ Ldr(OutputRegister(instruction), exception);
2065  __ Str(wzr, exception);
2066}
2067
2068void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
2069  load->SetLocations(nullptr);
2070}
2071
2072void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
2073  // Nothing to do, this is driven by the code generator.
2074  UNUSED(load);
2075}
2076
2077void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
2078  LocationSummary* locations =
2079      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2080  locations->SetOut(Location::RequiresRegister());
2081}
2082
2083void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
2084  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
2085  codegen_->AddSlowPath(slow_path);
2086
2087  Register out = OutputRegister(load);
2088  codegen_->LoadCurrentMethod(out);
2089  __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
2090  __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
2091  __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
2092  __ Cbz(out, slow_path->GetEntryLabel());
2093  __ Bind(slow_path->GetExitLabel());
2094}
2095
2096void LocationsBuilderARM64::VisitLocal(HLocal* local) {
2097  local->SetLocations(nullptr);
2098}
2099
2100void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
2101  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2102}
2103
2104void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
2105  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2106  locations->SetOut(Location::ConstantLocation(constant));
2107}
2108
2109void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
2110  // Will be generated at use site.
2111  UNUSED(constant);
2112}
2113
2114void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2115  LocationSummary* locations =
2116      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2117  InvokeRuntimeCallingConvention calling_convention;
2118  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2119}
2120
2121void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2122  codegen_->InvokeRuntime(instruction->IsEnter()
2123        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
2124      instruction,
2125      instruction->GetDexPc());
2126  CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2127}
2128
2129void LocationsBuilderARM64::VisitMul(HMul* mul) {
2130  LocationSummary* locations =
2131      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2132  switch (mul->GetResultType()) {
2133    case Primitive::kPrimInt:
2134    case Primitive::kPrimLong:
2135      locations->SetInAt(0, Location::RequiresRegister());
2136      locations->SetInAt(1, Location::RequiresRegister());
2137      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2138      break;
2139
2140    case Primitive::kPrimFloat:
2141    case Primitive::kPrimDouble:
2142      locations->SetInAt(0, Location::RequiresFpuRegister());
2143      locations->SetInAt(1, Location::RequiresFpuRegister());
2144      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2145      break;
2146
2147    default:
2148      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2149  }
2150}
2151
2152void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
2153  switch (mul->GetResultType()) {
2154    case Primitive::kPrimInt:
2155    case Primitive::kPrimLong:
2156      __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
2157      break;
2158
2159    case Primitive::kPrimFloat:
2160    case Primitive::kPrimDouble:
2161      __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
2162      break;
2163
2164    default:
2165      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2166  }
2167}
2168
2169void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
2170  LocationSummary* locations =
2171      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2172  switch (neg->GetResultType()) {
2173    case Primitive::kPrimInt:
2174    case Primitive::kPrimLong:
2175      locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0)));
2176      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2177      break;
2178
2179    case Primitive::kPrimFloat:
2180    case Primitive::kPrimDouble:
2181      locations->SetInAt(0, Location::RequiresFpuRegister());
2182      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2183      break;
2184
2185    default:
2186      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2187  }
2188}
2189
2190void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
2191  switch (neg->GetResultType()) {
2192    case Primitive::kPrimInt:
2193    case Primitive::kPrimLong:
2194      __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
2195      break;
2196
2197    case Primitive::kPrimFloat:
2198    case Primitive::kPrimDouble:
2199      __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0));
2200      break;
2201
2202    default:
2203      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2204  }
2205}
2206
2207void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
2208  LocationSummary* locations =
2209      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2210  InvokeRuntimeCallingConvention calling_convention;
2211  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2212  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
2213  locations->SetOut(LocationFrom(x0));
2214  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
2215  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
2216                       void*, uint32_t, int32_t, mirror::ArtMethod*>();
2217}
2218
2219void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
2220  LocationSummary* locations = instruction->GetLocations();
2221  InvokeRuntimeCallingConvention calling_convention;
2222  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
2223  DCHECK(type_index.Is(w0));
2224  Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
2225  DCHECK(current_method.Is(w2));
2226  codegen_->LoadCurrentMethod(current_method);
2227  __ Mov(type_index, instruction->GetTypeIndex());
2228  codegen_->InvokeRuntime(
2229      QUICK_ENTRY_POINT(pAllocArrayWithAccessCheck), instruction, instruction->GetDexPc());
2230  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
2231                       void*, uint32_t, int32_t, mirror::ArtMethod*>();
2232}
2233
2234void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
2235  LocationSummary* locations =
2236      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2237  InvokeRuntimeCallingConvention calling_convention;
2238  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2239  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
2240  locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2241  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
2242}
2243
2244void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
2245  LocationSummary* locations = instruction->GetLocations();
2246  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
2247  DCHECK(type_index.Is(w0));
2248  Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
2249  DCHECK(current_method.Is(w1));
2250  codegen_->LoadCurrentMethod(current_method);
2251  __ Mov(type_index, instruction->GetTypeIndex());
2252  codegen_->InvokeRuntime(
2253      QUICK_ENTRY_POINT(pAllocObjectWithAccessCheck), instruction, instruction->GetDexPc());
2254  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
2255}
2256
2257void LocationsBuilderARM64::VisitNot(HNot* instruction) {
2258  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2259  locations->SetInAt(0, Location::RequiresRegister());
2260  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2261}
2262
2263void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
2264  switch (instruction->InputAt(0)->GetType()) {
2265    case Primitive::kPrimInt:
2266    case Primitive::kPrimLong:
2267      __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
2268      break;
2269
2270    default:
2271      LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2272  }
2273}
2274
2275void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
2276  LocationSummary* locations =
2277      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2278  locations->SetInAt(0, Location::RequiresRegister());
2279  if (instruction->HasUses()) {
2280    locations->SetOut(Location::SameAsFirstInput());
2281  }
2282}
2283
2284void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2285  if (codegen_->CanMoveNullCheckToUser(instruction)) {
2286    return;
2287  }
2288  Location obj = instruction->GetLocations()->InAt(0);
2289
2290  __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
2291  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2292}
2293
2294void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2295  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
2296  codegen_->AddSlowPath(slow_path);
2297
2298  LocationSummary* locations = instruction->GetLocations();
2299  Location obj = locations->InAt(0);
2300
2301  __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
2302}
2303
2304void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
2305  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
2306    GenerateImplicitNullCheck(instruction);
2307  } else {
2308    GenerateExplicitNullCheck(instruction);
2309  }
2310}
2311
2312void LocationsBuilderARM64::VisitOr(HOr* instruction) {
2313  HandleBinaryOp(instruction);
2314}
2315
2316void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
2317  HandleBinaryOp(instruction);
2318}
2319
2320void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2321  LOG(FATAL) << "Unreachable";
2322}
2323
2324void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
2325  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2326}
2327
2328void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
2329  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2330  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2331  if (location.IsStackSlot()) {
2332    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2333  } else if (location.IsDoubleStackSlot()) {
2334    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2335  }
2336  locations->SetOut(location);
2337}
2338
2339void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
2340  // Nothing to do, the parameter is already at its location.
2341  UNUSED(instruction);
2342}
2343
2344void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
2345  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2346  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2347    locations->SetInAt(i, Location::Any());
2348  }
2349  locations->SetOut(Location::Any());
2350}
2351
2352void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
2353  UNUSED(instruction);
2354  LOG(FATAL) << "Unreachable";
2355}
2356
2357void LocationsBuilderARM64::VisitRem(HRem* rem) {
2358  Primitive::Type type = rem->GetResultType();
2359  LocationSummary::CallKind call_kind = IsFPType(type) ? LocationSummary::kCall
2360                                                       : LocationSummary::kNoCall;
2361  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2362
2363  switch (type) {
2364    case Primitive::kPrimInt:
2365    case Primitive::kPrimLong:
2366      locations->SetInAt(0, Location::RequiresRegister());
2367      locations->SetInAt(1, Location::RequiresRegister());
2368      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2369      break;
2370
2371    case Primitive::kPrimFloat:
2372    case Primitive::kPrimDouble: {
2373      InvokeRuntimeCallingConvention calling_convention;
2374      locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
2375      locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1)));
2376      locations->SetOut(calling_convention.GetReturnLocation(type));
2377
2378      break;
2379    }
2380
2381    default:
2382      LOG(FATAL) << "Unexpected rem type " << type;
2383  }
2384}
2385
2386void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
2387  Primitive::Type type = rem->GetResultType();
2388
2389  switch (type) {
2390    case Primitive::kPrimInt:
2391    case Primitive::kPrimLong: {
2392      UseScratchRegisterScope temps(GetVIXLAssembler());
2393      Register dividend = InputRegisterAt(rem, 0);
2394      Register divisor = InputRegisterAt(rem, 1);
2395      Register output = OutputRegister(rem);
2396      Register temp = temps.AcquireSameSizeAs(output);
2397
2398      __ Sdiv(temp, dividend, divisor);
2399      __ Msub(output, temp, divisor, dividend);
2400      break;
2401    }
2402
2403    case Primitive::kPrimFloat:
2404    case Primitive::kPrimDouble: {
2405      int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
2406                                                             : QUICK_ENTRY_POINT(pFmod);
2407      codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc());
2408      break;
2409    }
2410
2411    default:
2412      LOG(FATAL) << "Unexpected rem type " << type;
2413  }
2414}
2415
2416void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
2417  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2418  Primitive::Type return_type = instruction->InputAt(0)->GetType();
2419  locations->SetInAt(0, ARM64ReturnLocation(return_type));
2420}
2421
2422void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
2423  UNUSED(instruction);
2424  codegen_->GenerateFrameExit();
2425  __ Ret();
2426}
2427
2428void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
2429  instruction->SetLocations(nullptr);
2430}
2431
2432void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
2433  UNUSED(instruction);
2434  codegen_->GenerateFrameExit();
2435  __ Ret();
2436}
2437
2438void LocationsBuilderARM64::VisitShl(HShl* shl) {
2439  HandleShift(shl);
2440}
2441
2442void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) {
2443  HandleShift(shl);
2444}
2445
2446void LocationsBuilderARM64::VisitShr(HShr* shr) {
2447  HandleShift(shr);
2448}
2449
2450void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) {
2451  HandleShift(shr);
2452}
2453
2454void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
2455  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
2456  Primitive::Type field_type = store->InputAt(1)->GetType();
2457  switch (field_type) {
2458    case Primitive::kPrimNot:
2459    case Primitive::kPrimBoolean:
2460    case Primitive::kPrimByte:
2461    case Primitive::kPrimChar:
2462    case Primitive::kPrimShort:
2463    case Primitive::kPrimInt:
2464    case Primitive::kPrimFloat:
2465      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
2466      break;
2467
2468    case Primitive::kPrimLong:
2469    case Primitive::kPrimDouble:
2470      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
2471      break;
2472
2473    default:
2474      LOG(FATAL) << "Unimplemented local type " << field_type;
2475  }
2476}
2477
2478void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
2479  UNUSED(store);
2480}
2481
2482void LocationsBuilderARM64::VisitSub(HSub* instruction) {
2483  HandleBinaryOp(instruction);
2484}
2485
2486void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
2487  HandleBinaryOp(instruction);
2488}
2489
2490void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2491  LocationSummary* locations =
2492      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2493  locations->SetInAt(0, Location::RequiresRegister());
2494  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2495}
2496
2497void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2498  MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), instruction->GetFieldOffset());
2499
2500  if (instruction->IsVolatile()) {
2501    if (kUseAcquireRelease) {
2502      // NB: LoadAcquire will record the pc info if needed.
2503      codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
2504    } else {
2505      codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
2506      // For IRIW sequential consistency kLoadAny is not sufficient.
2507      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
2508    }
2509  } else {
2510    codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
2511  }
2512}
2513
2514void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
2515  LocationSummary* locations =
2516      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2517  locations->SetInAt(0, Location::RequiresRegister());
2518  locations->SetInAt(1, Location::RequiresRegister());
2519}
2520
2521void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
2522  Register cls = InputRegisterAt(instruction, 0);
2523  CPURegister value = InputCPURegisterAt(instruction, 1);
2524  Offset offset = instruction->GetFieldOffset();
2525  Primitive::Type field_type = instruction->GetFieldType();
2526
2527  if (instruction->IsVolatile()) {
2528    if (kUseAcquireRelease) {
2529      codegen_->StoreRelease(field_type, value, HeapOperand(cls, offset));
2530    } else {
2531      GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
2532      codegen_->Store(field_type, value, HeapOperand(cls, offset));
2533      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
2534    }
2535  } else {
2536    codegen_->Store(field_type, value, HeapOperand(cls, offset));
2537  }
2538
2539  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
2540    codegen_->MarkGCCard(cls, Register(value));
2541  }
2542}
2543
2544void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
2545  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
2546}
2547
2548void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
2549  HBasicBlock* block = instruction->GetBlock();
2550  if (block->GetLoopInformation() != nullptr) {
2551    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
2552    // The back edge will generate the suspend check.
2553    return;
2554  }
2555  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
2556    // The goto will generate the suspend check.
2557    return;
2558  }
2559  GenerateSuspendCheck(instruction, nullptr);
2560}
2561
2562void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
2563  temp->SetLocations(nullptr);
2564}
2565
2566void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
2567  // Nothing to do, this is driven by the code generator.
2568  UNUSED(temp);
2569}
2570
2571void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
2572  LocationSummary* locations =
2573      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2574  InvokeRuntimeCallingConvention calling_convention;
2575  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2576}
2577
2578void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
2579  codegen_->InvokeRuntime(
2580      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
2581  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
2582}
2583
2584void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
2585  LocationSummary* locations =
2586      new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
2587  Primitive::Type input_type = conversion->GetInputType();
2588  Primitive::Type result_type = conversion->GetResultType();
2589  DCHECK_NE(input_type, result_type);
2590  if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
2591      (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
2592    LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
2593  }
2594
2595  if (IsFPType(input_type)) {
2596    locations->SetInAt(0, Location::RequiresFpuRegister());
2597  } else {
2598    locations->SetInAt(0, Location::RequiresRegister());
2599  }
2600
2601  if (IsFPType(result_type)) {
2602    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2603  } else {
2604    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2605  }
2606}
2607
2608void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) {
2609  Primitive::Type result_type = conversion->GetResultType();
2610  Primitive::Type input_type = conversion->GetInputType();
2611
2612  DCHECK_NE(input_type, result_type);
2613
2614  if (IsIntegralType(result_type) && IsIntegralType(input_type)) {
2615    int result_size = Primitive::ComponentSize(result_type);
2616    int input_size = Primitive::ComponentSize(input_type);
2617    int min_size = std::min(result_size, input_size);
2618    Register output = OutputRegister(conversion);
2619    Register source = InputRegisterAt(conversion, 0);
2620    if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) {
2621      __ Ubfx(output, source, 0, result_size * kBitsPerByte);
2622    } else if ((result_type == Primitive::kPrimChar) ||
2623               ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
2624      __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
2625    } else {
2626      __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
2627    }
2628  } else if (IsFPType(result_type) && IsIntegralType(input_type)) {
2629    __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
2630  } else if (IsIntegralType(result_type) && IsFPType(input_type)) {
2631    CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
2632    __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0));
2633  } else if (IsFPType(result_type) && IsFPType(input_type)) {
2634    __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0));
2635  } else {
2636    LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
2637                << " to " << result_type;
2638  }
2639}
2640
2641void LocationsBuilderARM64::VisitUShr(HUShr* ushr) {
2642  HandleShift(ushr);
2643}
2644
2645void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) {
2646  HandleShift(ushr);
2647}
2648
2649void LocationsBuilderARM64::VisitXor(HXor* instruction) {
2650  HandleBinaryOp(instruction);
2651}
2652
2653void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
2654  HandleBinaryOp(instruction);
2655}
2656
2657#undef __
2658#undef QUICK_ENTRY_POINT
2659
2660}  // namespace arm64
2661}  // namespace art
2662