code_generator_arm64.cc revision 0379f82393237798616d485ad99952e73e480e12
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm64.h"
18
19#include "arch/arm64/instruction_set_features_arm64.h"
20#include "common_arm64.h"
21#include "entrypoints/quick/quick_entrypoints.h"
22#include "entrypoints/quick/quick_entrypoints_enum.h"
23#include "gc/accounting/card_table.h"
24#include "intrinsics.h"
25#include "intrinsics_arm64.h"
26#include "mirror/array-inl.h"
27#include "mirror/art_method.h"
28#include "mirror/class.h"
29#include "offsets.h"
30#include "thread.h"
31#include "utils/arm64/assembler_arm64.h"
32#include "utils/assembler.h"
33#include "utils/stack_checks.h"
34
35
36using namespace vixl;   // NOLINT(build/namespaces)
37
38#ifdef __
39#error "ARM64 Codegen VIXL macro-assembler macro already defined."
40#endif
41
42namespace art {
43
44namespace arm64 {
45
46using helpers::CPURegisterFrom;
47using helpers::DRegisterFrom;
48using helpers::FPRegisterFrom;
49using helpers::HeapOperand;
50using helpers::HeapOperandFrom;
51using helpers::InputCPURegisterAt;
52using helpers::InputFPRegisterAt;
53using helpers::InputRegisterAt;
54using helpers::InputOperandAt;
55using helpers::Int64ConstantFrom;
56using helpers::LocationFrom;
57using helpers::OperandFromMemOperand;
58using helpers::OutputCPURegister;
59using helpers::OutputFPRegister;
60using helpers::OutputRegister;
61using helpers::RegisterFrom;
62using helpers::StackOperandFrom;
63using helpers::VIXLRegCodeFromART;
64using helpers::WRegisterFrom;
65using helpers::XRegisterFrom;
66using helpers::ARM64EncodableConstantOrRegister;
67
68static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
69static constexpr int kCurrentMethodStackOffset = 0;
70
71inline Condition ARM64Condition(IfCondition cond) {
72  switch (cond) {
73    case kCondEQ: return eq;
74    case kCondNE: return ne;
75    case kCondLT: return lt;
76    case kCondLE: return le;
77    case kCondGT: return gt;
78    case kCondGE: return ge;
79    default:
80      LOG(FATAL) << "Unknown if condition";
81  }
82  return nv;  // Unreachable.
83}
84
85Location ARM64ReturnLocation(Primitive::Type return_type) {
86  DCHECK_NE(return_type, Primitive::kPrimVoid);
87  // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
88  // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
89  // but we use the exact registers for clarity.
90  if (return_type == Primitive::kPrimFloat) {
91    return LocationFrom(s0);
92  } else if (return_type == Primitive::kPrimDouble) {
93    return LocationFrom(d0);
94  } else if (return_type == Primitive::kPrimLong) {
95    return LocationFrom(x0);
96  } else {
97    return LocationFrom(w0);
98  }
99}
100
101Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
102  return ARM64ReturnLocation(return_type);
103}
104
105#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
106#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
107
108class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
109 public:
110  BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
111                           Location index_location,
112                           Location length_location)
113      : instruction_(instruction),
114        index_location_(index_location),
115        length_location_(length_location) {}
116
117
118  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
119    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
120    __ Bind(GetEntryLabel());
121    // We're moving two locations to locations that could overlap, so we need a parallel
122    // move resolver.
123    InvokeRuntimeCallingConvention calling_convention;
124    codegen->EmitParallelMoves(
125        index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
126        length_location_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
127    arm64_codegen->InvokeRuntime(
128        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
129    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
130  }
131
132 private:
133  HBoundsCheck* const instruction_;
134  const Location index_location_;
135  const Location length_location_;
136
137  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
138};
139
140class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
141 public:
142  explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {}
143
144  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
145    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
146    __ Bind(GetEntryLabel());
147    arm64_codegen->InvokeRuntime(
148        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
149    CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
150  }
151
152 private:
153  HDivZeroCheck* const instruction_;
154  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
155};
156
157class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
158 public:
159  LoadClassSlowPathARM64(HLoadClass* cls,
160                         HInstruction* at,
161                         uint32_t dex_pc,
162                         bool do_clinit)
163      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
164    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
165  }
166
167  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
168    LocationSummary* locations = at_->GetLocations();
169    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
170
171    __ Bind(GetEntryLabel());
172    SaveLiveRegisters(codegen, locations);
173
174    InvokeRuntimeCallingConvention calling_convention;
175    __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
176    int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
177                                            : QUICK_ENTRY_POINT(pInitializeType);
178    arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
179    if (do_clinit_) {
180      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
181    } else {
182      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
183    }
184
185    // Move the class to the desired location.
186    Location out = locations->Out();
187    if (out.IsValid()) {
188      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
189      Primitive::Type type = at_->GetType();
190      arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
191    }
192
193    RestoreLiveRegisters(codegen, locations);
194    __ B(GetExitLabel());
195  }
196
197 private:
198  // The class this slow path will load.
199  HLoadClass* const cls_;
200
201  // The instruction where this slow path is happening.
202  // (Might be the load class or an initialization check).
203  HInstruction* const at_;
204
205  // The dex PC of `at_`.
206  const uint32_t dex_pc_;
207
208  // Whether to initialize the class.
209  const bool do_clinit_;
210
211  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
212};
213
214class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
215 public:
216  explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {}
217
218  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
219    LocationSummary* locations = instruction_->GetLocations();
220    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
221    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
222
223    __ Bind(GetEntryLabel());
224    SaveLiveRegisters(codegen, locations);
225
226    InvokeRuntimeCallingConvention calling_convention;
227    __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex());
228    arm64_codegen->InvokeRuntime(
229        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
230    CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
231    Primitive::Type type = instruction_->GetType();
232    arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
233
234    RestoreLiveRegisters(codegen, locations);
235    __ B(GetExitLabel());
236  }
237
238 private:
239  HLoadString* const instruction_;
240
241  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
242};
243
244class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
245 public:
246  explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
247
248  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
249    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
250    __ Bind(GetEntryLabel());
251    arm64_codegen->InvokeRuntime(
252        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
253    CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
254  }
255
256 private:
257  HNullCheck* const instruction_;
258
259  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
260};
261
262class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
263 public:
264  explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction,
265                                     HBasicBlock* successor)
266      : instruction_(instruction), successor_(successor) {}
267
268  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
269    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
270    __ Bind(GetEntryLabel());
271    SaveLiveRegisters(codegen, instruction_->GetLocations());
272    arm64_codegen->InvokeRuntime(
273        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
274    CheckEntrypointTypes<kQuickTestSuspend, void, void>();
275    RestoreLiveRegisters(codegen, instruction_->GetLocations());
276    if (successor_ == nullptr) {
277      __ B(GetReturnLabel());
278    } else {
279      __ B(arm64_codegen->GetLabelOf(successor_));
280    }
281  }
282
283  vixl::Label* GetReturnLabel() {
284    DCHECK(successor_ == nullptr);
285    return &return_label_;
286  }
287
288 private:
289  HSuspendCheck* const instruction_;
290  // If not null, the block to branch to after the suspend check.
291  HBasicBlock* const successor_;
292
293  // If `successor_` is null, the label to branch to after the suspend check.
294  vixl::Label return_label_;
295
296  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
297};
298
299class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
300 public:
301  TypeCheckSlowPathARM64(HInstruction* instruction,
302                         Location class_to_check,
303                         Location object_class,
304                         uint32_t dex_pc)
305      : instruction_(instruction),
306        class_to_check_(class_to_check),
307        object_class_(object_class),
308        dex_pc_(dex_pc) {}
309
310  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
311    LocationSummary* locations = instruction_->GetLocations();
312    DCHECK(instruction_->IsCheckCast()
313           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
314    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
315
316    __ Bind(GetEntryLabel());
317    SaveLiveRegisters(codegen, locations);
318
319    // We're moving two locations to locations that could overlap, so we need a parallel
320    // move resolver.
321    InvokeRuntimeCallingConvention calling_convention;
322    codegen->EmitParallelMoves(
323        class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
324        object_class_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
325
326    if (instruction_->IsInstanceOf()) {
327      arm64_codegen->InvokeRuntime(
328          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
329      Primitive::Type ret_type = instruction_->GetType();
330      Location ret_loc = calling_convention.GetReturnLocation(ret_type);
331      arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
332      CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t,
333                           const mirror::Class*, const mirror::Class*>();
334    } else {
335      DCHECK(instruction_->IsCheckCast());
336      arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
337      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
338    }
339
340    RestoreLiveRegisters(codegen, locations);
341    __ B(GetExitLabel());
342  }
343
344 private:
345  HInstruction* const instruction_;
346  const Location class_to_check_;
347  const Location object_class_;
348  uint32_t dex_pc_;
349
350  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
351};
352
353class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
354 public:
355  explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
356    : instruction_(instruction) {}
357
358  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
359    __ Bind(GetEntryLabel());
360    SaveLiveRegisters(codegen, instruction_->GetLocations());
361    DCHECK(instruction_->IsDeoptimize());
362    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
363    uint32_t dex_pc = deoptimize->GetDexPc();
364    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
365    arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
366  }
367
368 private:
369  HInstruction* const instruction_;
370  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
371};
372
373#undef __
374
375Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
376  Location next_location;
377  if (type == Primitive::kPrimVoid) {
378    LOG(FATAL) << "Unreachable type " << type;
379  }
380
381  if (Primitive::IsFloatingPointType(type) &&
382      (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) {
383    next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++));
384  } else if (!Primitive::IsFloatingPointType(type) &&
385             (gp_index_ < calling_convention.GetNumberOfRegisters())) {
386    next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
387  } else {
388    size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
389    next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
390                                                 : Location::StackSlot(stack_offset);
391  }
392
393  // Space on the stack is reserved for all arguments.
394  stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
395  return next_location;
396}
397
398CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
399                                       const Arm64InstructionSetFeatures& isa_features,
400                                       const CompilerOptions& compiler_options)
401    : CodeGenerator(graph,
402                    kNumberOfAllocatableRegisters,
403                    kNumberOfAllocatableFPRegisters,
404                    kNumberOfAllocatableRegisterPairs,
405                    callee_saved_core_registers.list(),
406                    callee_saved_fp_registers.list(),
407                    compiler_options),
408      block_labels_(nullptr),
409      location_builder_(graph, this),
410      instruction_visitor_(graph, this),
411      move_resolver_(graph->GetArena(), this),
412      isa_features_(isa_features) {
413  // Save the link register (containing the return address) to mimic Quick.
414  AddAllocatedRegister(LocationFrom(lr));
415}
416
417#undef __
418#define __ GetVIXLAssembler()->
419
420void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
421  // Ensure we emit the literal pool.
422  __ FinalizeCode();
423  CodeGenerator::Finalize(allocator);
424}
425
426void ParallelMoveResolverARM64::PrepareForEmitNativeCode() {
427  // Note: There are 6 kinds of moves:
428  // 1. constant -> GPR/FPR (non-cycle)
429  // 2. constant -> stack (non-cycle)
430  // 3. GPR/FPR -> GPR/FPR
431  // 4. GPR/FPR -> stack
432  // 5. stack -> GPR/FPR
433  // 6. stack -> stack (non-cycle)
434  // Case 1, 2 and 6 should never be included in a dependency cycle on ARM64. For case 3, 4, and 5
435  // VIXL uses at most 1 GPR. VIXL has 2 GPR and 1 FPR temps, and there should be no intersecting
436  // cycles on ARM64, so we always have 1 GPR and 1 FPR available VIXL temps to resolve the
437  // dependency.
438  vixl_temps_.Open(GetVIXLAssembler());
439}
440
441void ParallelMoveResolverARM64::FinishEmitNativeCode() {
442  vixl_temps_.Close();
443}
444
445Location ParallelMoveResolverARM64::AllocateScratchLocationFor(Location::Kind kind) {
446  DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister ||
447         kind == Location::kStackSlot || kind == Location::kDoubleStackSlot);
448  kind = (kind == Location::kFpuRegister) ? Location::kFpuRegister : Location::kRegister;
449  Location scratch = GetScratchLocation(kind);
450  if (!scratch.Equals(Location::NoLocation())) {
451    return scratch;
452  }
453  // Allocate from VIXL temp registers.
454  if (kind == Location::kRegister) {
455    scratch = LocationFrom(vixl_temps_.AcquireX());
456  } else {
457    DCHECK(kind == Location::kFpuRegister);
458    scratch = LocationFrom(vixl_temps_.AcquireD());
459  }
460  AddScratchLocation(scratch);
461  return scratch;
462}
463
464void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
465  if (loc.IsRegister()) {
466    vixl_temps_.Release(XRegisterFrom(loc));
467  } else {
468    DCHECK(loc.IsFpuRegister());
469    vixl_temps_.Release(DRegisterFrom(loc));
470  }
471  RemoveScratchLocation(loc);
472}
473
474void ParallelMoveResolverARM64::EmitMove(size_t index) {
475  MoveOperands* move = moves_.Get(index);
476  codegen_->MoveLocation(move->GetDestination(), move->GetSource());
477}
478
479void CodeGeneratorARM64::GenerateFrameEntry() {
480  MacroAssembler* masm = GetVIXLAssembler();
481  BlockPoolsScope block_pools(masm);
482  __ Bind(&frame_entry_label_);
483
484  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
485  if (do_overflow_check) {
486    UseScratchRegisterScope temps(masm);
487    Register temp = temps.AcquireX();
488    DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
489    __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
490    __ Ldr(wzr, MemOperand(temp, 0));
491    RecordPcInfo(nullptr, 0);
492  }
493
494  if (!HasEmptyFrame()) {
495    int frame_size = GetFrameSize();
496    // Stack layout:
497    //      sp[frame_size - 8]        : lr.
498    //      ...                       : other preserved core registers.
499    //      ...                       : other preserved fp registers.
500    //      ...                       : reserved frame space.
501    //      sp[0]                     : current method.
502    __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
503    GetAssembler()->cfi().AdjustCFAOffset(frame_size);
504    GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
505        frame_size - GetCoreSpillSize());
506    GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
507        frame_size - FrameEntrySpillSize());
508  }
509}
510
511void CodeGeneratorARM64::GenerateFrameExit() {
512  BlockPoolsScope block_pools(GetVIXLAssembler());
513  GetAssembler()->cfi().RememberState();
514  if (!HasEmptyFrame()) {
515    int frame_size = GetFrameSize();
516    GetAssembler()->UnspillRegisters(GetFramePreservedFPRegisters(),
517        frame_size - FrameEntrySpillSize());
518    GetAssembler()->UnspillRegisters(GetFramePreservedCoreRegisters(),
519        frame_size - GetCoreSpillSize());
520    __ Drop(frame_size);
521    GetAssembler()->cfi().AdjustCFAOffset(-frame_size);
522  }
523  __ Ret();
524  GetAssembler()->cfi().RestoreState();
525  GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
526}
527
528void CodeGeneratorARM64::Bind(HBasicBlock* block) {
529  __ Bind(GetLabelOf(block));
530}
531
532void CodeGeneratorARM64::Move(HInstruction* instruction,
533                              Location location,
534                              HInstruction* move_for) {
535  LocationSummary* locations = instruction->GetLocations();
536  if (locations != nullptr && locations->Out().Equals(location)) {
537    return;
538  }
539
540  Primitive::Type type = instruction->GetType();
541  DCHECK_NE(type, Primitive::kPrimVoid);
542
543  if (instruction->IsIntConstant()
544      || instruction->IsLongConstant()
545      || instruction->IsNullConstant()) {
546    int64_t value = GetInt64ValueOf(instruction->AsConstant());
547    if (location.IsRegister()) {
548      Register dst = RegisterFrom(location, type);
549      DCHECK(((instruction->IsIntConstant() || instruction->IsNullConstant()) && dst.Is32Bits()) ||
550             (instruction->IsLongConstant() && dst.Is64Bits()));
551      __ Mov(dst, value);
552    } else {
553      DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
554      UseScratchRegisterScope temps(GetVIXLAssembler());
555      Register temp = (instruction->IsIntConstant() || instruction->IsNullConstant())
556          ? temps.AcquireW()
557          : temps.AcquireX();
558      __ Mov(temp, value);
559      __ Str(temp, StackOperandFrom(location));
560    }
561  } else if (instruction->IsTemporary()) {
562    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
563    MoveLocation(location, temp_location, type);
564  } else if (instruction->IsLoadLocal()) {
565    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
566    if (Primitive::Is64BitType(type)) {
567      MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
568    } else {
569      MoveLocation(location, Location::StackSlot(stack_slot), type);
570    }
571
572  } else {
573    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
574    MoveLocation(location, locations->Out(), type);
575  }
576}
577
578Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
579  Primitive::Type type = load->GetType();
580
581  switch (type) {
582    case Primitive::kPrimNot:
583    case Primitive::kPrimInt:
584    case Primitive::kPrimFloat:
585      return Location::StackSlot(GetStackSlot(load->GetLocal()));
586
587    case Primitive::kPrimLong:
588    case Primitive::kPrimDouble:
589      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
590
591    case Primitive::kPrimBoolean:
592    case Primitive::kPrimByte:
593    case Primitive::kPrimChar:
594    case Primitive::kPrimShort:
595    case Primitive::kPrimVoid:
596      LOG(FATAL) << "Unexpected type " << type;
597  }
598
599  LOG(FATAL) << "Unreachable";
600  return Location::NoLocation();
601}
602
603void CodeGeneratorARM64::MarkGCCard(Register object, Register value) {
604  UseScratchRegisterScope temps(GetVIXLAssembler());
605  Register card = temps.AcquireX();
606  Register temp = temps.AcquireW();   // Index within the CardTable - 32bit.
607  vixl::Label done;
608  __ Cbz(value, &done);
609  __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
610  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
611  __ Strb(card, MemOperand(card, temp.X()));
612  __ Bind(&done);
613}
614
615void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const {
616  // Blocked core registers:
617  //      lr        : Runtime reserved.
618  //      tr        : Runtime reserved.
619  //      xSuspend  : Runtime reserved. TODO: Unblock this when the runtime stops using it.
620  //      ip1       : VIXL core temp.
621  //      ip0       : VIXL core temp.
622  //
623  // Blocked fp registers:
624  //      d31       : VIXL fp temp.
625  CPURegList reserved_core_registers = vixl_reserved_core_registers;
626  reserved_core_registers.Combine(runtime_reserved_core_registers);
627  while (!reserved_core_registers.IsEmpty()) {
628    blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
629  }
630
631  CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
632  while (!reserved_fp_registers.IsEmpty()) {
633    blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
634  }
635
636  if (is_baseline) {
637    CPURegList reserved_core_baseline_registers = callee_saved_core_registers;
638    while (!reserved_core_baseline_registers.IsEmpty()) {
639      blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true;
640    }
641
642    CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers;
643    while (!reserved_fp_baseline_registers.IsEmpty()) {
644      blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true;
645    }
646  }
647}
648
649Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
650  if (type == Primitive::kPrimVoid) {
651    LOG(FATAL) << "Unreachable type " << type;
652  }
653
654  if (Primitive::IsFloatingPointType(type)) {
655    ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
656    DCHECK_NE(reg, -1);
657    return Location::FpuRegisterLocation(reg);
658  } else {
659    ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
660    DCHECK_NE(reg, -1);
661    return Location::RegisterLocation(reg);
662  }
663}
664
665size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
666  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
667  __ Str(reg, MemOperand(sp, stack_index));
668  return kArm64WordSize;
669}
670
671size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
672  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
673  __ Ldr(reg, MemOperand(sp, stack_index));
674  return kArm64WordSize;
675}
676
677size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
678  FPRegister reg = FPRegister(reg_id, kDRegSize);
679  __ Str(reg, MemOperand(sp, stack_index));
680  return kArm64WordSize;
681}
682
683size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
684  FPRegister reg = FPRegister(reg_id, kDRegSize);
685  __ Ldr(reg, MemOperand(sp, stack_index));
686  return kArm64WordSize;
687}
688
689void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
690  stream << Arm64ManagedRegister::FromXRegister(XRegister(reg));
691}
692
693void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
694  stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
695}
696
697void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
698  if (constant->IsIntConstant()) {
699    __ Mov(Register(destination), constant->AsIntConstant()->GetValue());
700  } else if (constant->IsLongConstant()) {
701    __ Mov(Register(destination), constant->AsLongConstant()->GetValue());
702  } else if (constant->IsNullConstant()) {
703    __ Mov(Register(destination), 0);
704  } else if (constant->IsFloatConstant()) {
705    __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
706  } else {
707    DCHECK(constant->IsDoubleConstant());
708    __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
709  }
710}
711
712
713static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
714  DCHECK(constant.IsConstant());
715  HConstant* cst = constant.GetConstant();
716  return (cst->IsIntConstant() && type == Primitive::kPrimInt) ||
717         // Null is mapped to a core W register, which we associate with kPrimInt.
718         (cst->IsNullConstant() && type == Primitive::kPrimInt) ||
719         (cst->IsLongConstant() && type == Primitive::kPrimLong) ||
720         (cst->IsFloatConstant() && type == Primitive::kPrimFloat) ||
721         (cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
722}
723
724void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
725  if (source.Equals(destination)) {
726    return;
727  }
728
729  // A valid move can always be inferred from the destination and source
730  // locations. When moving from and to a register, the argument type can be
731  // used to generate 32bit instead of 64bit moves. In debug mode we also
732  // checks the coherency of the locations and the type.
733  bool unspecified_type = (type == Primitive::kPrimVoid);
734
735  if (destination.IsRegister() || destination.IsFpuRegister()) {
736    if (unspecified_type) {
737      HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
738      if (source.IsStackSlot() ||
739          (src_cst != nullptr && (src_cst->IsIntConstant()
740                                  || src_cst->IsFloatConstant()
741                                  || src_cst->IsNullConstant()))) {
742        // For stack slots and 32bit constants, a 64bit type is appropriate.
743        type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
744      } else {
745        // If the source is a double stack slot or a 64bit constant, a 64bit
746        // type is appropriate. Else the source is a register, and since the
747        // type has not been specified, we chose a 64bit type to force a 64bit
748        // move.
749        type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
750      }
751    }
752    DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
753           (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
754    CPURegister dst = CPURegisterFrom(destination, type);
755    if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
756      DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
757      __ Ldr(dst, StackOperandFrom(source));
758    } else if (source.IsConstant()) {
759      DCHECK(CoherentConstantAndType(source, type));
760      MoveConstant(dst, source.GetConstant());
761    } else {
762      if (destination.IsRegister()) {
763        __ Mov(Register(dst), RegisterFrom(source, type));
764      } else {
765        DCHECK(destination.IsFpuRegister());
766        __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
767      }
768    }
769  } else {  // The destination is not a register. It must be a stack slot.
770    DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
771    if (source.IsRegister() || source.IsFpuRegister()) {
772      if (unspecified_type) {
773        if (source.IsRegister()) {
774          type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
775        } else {
776          type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
777        }
778      }
779      DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
780             (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
781      __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
782    } else if (source.IsConstant()) {
783      DCHECK(unspecified_type || CoherentConstantAndType(source, type));
784      UseScratchRegisterScope temps(GetVIXLAssembler());
785      HConstant* src_cst = source.GetConstant();
786      CPURegister temp;
787      if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) {
788        temp = temps.AcquireW();
789      } else if (src_cst->IsLongConstant()) {
790        temp = temps.AcquireX();
791      } else if (src_cst->IsFloatConstant()) {
792        temp = temps.AcquireS();
793      } else {
794        DCHECK(src_cst->IsDoubleConstant());
795        temp = temps.AcquireD();
796      }
797      MoveConstant(temp, src_cst);
798      __ Str(temp, StackOperandFrom(destination));
799    } else {
800      DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
801      DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
802      UseScratchRegisterScope temps(GetVIXLAssembler());
803      // There is generally less pressure on FP registers.
804      FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
805      __ Ldr(temp, StackOperandFrom(source));
806      __ Str(temp, StackOperandFrom(destination));
807    }
808  }
809}
810
811void CodeGeneratorARM64::Load(Primitive::Type type,
812                              CPURegister dst,
813                              const MemOperand& src) {
814  switch (type) {
815    case Primitive::kPrimBoolean:
816      __ Ldrb(Register(dst), src);
817      break;
818    case Primitive::kPrimByte:
819      __ Ldrsb(Register(dst), src);
820      break;
821    case Primitive::kPrimShort:
822      __ Ldrsh(Register(dst), src);
823      break;
824    case Primitive::kPrimChar:
825      __ Ldrh(Register(dst), src);
826      break;
827    case Primitive::kPrimInt:
828    case Primitive::kPrimNot:
829    case Primitive::kPrimLong:
830    case Primitive::kPrimFloat:
831    case Primitive::kPrimDouble:
832      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
833      __ Ldr(dst, src);
834      break;
835    case Primitive::kPrimVoid:
836      LOG(FATAL) << "Unreachable type " << type;
837  }
838}
839
840void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
841                                     CPURegister dst,
842                                     const MemOperand& src) {
843  MacroAssembler* masm = GetVIXLAssembler();
844  BlockPoolsScope block_pools(masm);
845  UseScratchRegisterScope temps(masm);
846  Register temp_base = temps.AcquireX();
847  Primitive::Type type = instruction->GetType();
848
849  DCHECK(!src.IsPreIndex());
850  DCHECK(!src.IsPostIndex());
851
852  // TODO(vixl): Let the MacroAssembler handle MemOperand.
853  __ Add(temp_base, src.base(), OperandFromMemOperand(src));
854  MemOperand base = MemOperand(temp_base);
855  switch (type) {
856    case Primitive::kPrimBoolean:
857      __ Ldarb(Register(dst), base);
858      MaybeRecordImplicitNullCheck(instruction);
859      break;
860    case Primitive::kPrimByte:
861      __ Ldarb(Register(dst), base);
862      MaybeRecordImplicitNullCheck(instruction);
863      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
864      break;
865    case Primitive::kPrimChar:
866      __ Ldarh(Register(dst), base);
867      MaybeRecordImplicitNullCheck(instruction);
868      break;
869    case Primitive::kPrimShort:
870      __ Ldarh(Register(dst), base);
871      MaybeRecordImplicitNullCheck(instruction);
872      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
873      break;
874    case Primitive::kPrimInt:
875    case Primitive::kPrimNot:
876    case Primitive::kPrimLong:
877      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
878      __ Ldar(Register(dst), base);
879      MaybeRecordImplicitNullCheck(instruction);
880      break;
881    case Primitive::kPrimFloat:
882    case Primitive::kPrimDouble: {
883      DCHECK(dst.IsFPRegister());
884      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
885
886      Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
887      __ Ldar(temp, base);
888      MaybeRecordImplicitNullCheck(instruction);
889      __ Fmov(FPRegister(dst), temp);
890      break;
891    }
892    case Primitive::kPrimVoid:
893      LOG(FATAL) << "Unreachable type " << type;
894  }
895}
896
897void CodeGeneratorARM64::Store(Primitive::Type type,
898                               CPURegister src,
899                               const MemOperand& dst) {
900  switch (type) {
901    case Primitive::kPrimBoolean:
902    case Primitive::kPrimByte:
903      __ Strb(Register(src), dst);
904      break;
905    case Primitive::kPrimChar:
906    case Primitive::kPrimShort:
907      __ Strh(Register(src), dst);
908      break;
909    case Primitive::kPrimInt:
910    case Primitive::kPrimNot:
911    case Primitive::kPrimLong:
912    case Primitive::kPrimFloat:
913    case Primitive::kPrimDouble:
914      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
915      __ Str(src, dst);
916      break;
917    case Primitive::kPrimVoid:
918      LOG(FATAL) << "Unreachable type " << type;
919  }
920}
921
922void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
923                                      CPURegister src,
924                                      const MemOperand& dst) {
925  UseScratchRegisterScope temps(GetVIXLAssembler());
926  Register temp_base = temps.AcquireX();
927
928  DCHECK(!dst.IsPreIndex());
929  DCHECK(!dst.IsPostIndex());
930
931  // TODO(vixl): Let the MacroAssembler handle this.
932  Operand op = OperandFromMemOperand(dst);
933  __ Add(temp_base, dst.base(), op);
934  MemOperand base = MemOperand(temp_base);
935  switch (type) {
936    case Primitive::kPrimBoolean:
937    case Primitive::kPrimByte:
938      __ Stlrb(Register(src), base);
939      break;
940    case Primitive::kPrimChar:
941    case Primitive::kPrimShort:
942      __ Stlrh(Register(src), base);
943      break;
944    case Primitive::kPrimInt:
945    case Primitive::kPrimNot:
946    case Primitive::kPrimLong:
947      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
948      __ Stlr(Register(src), base);
949      break;
950    case Primitive::kPrimFloat:
951    case Primitive::kPrimDouble: {
952      DCHECK(src.IsFPRegister());
953      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
954
955      Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
956      __ Fmov(temp, FPRegister(src));
957      __ Stlr(temp, base);
958      break;
959    }
960    case Primitive::kPrimVoid:
961      LOG(FATAL) << "Unreachable type " << type;
962  }
963}
964
965void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) {
966  DCHECK(RequiresCurrentMethod());
967  DCHECK(current_method.IsW());
968  __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
969}
970
971void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
972                                       HInstruction* instruction,
973                                       uint32_t dex_pc,
974                                       SlowPathCode* slow_path) {
975  BlockPoolsScope block_pools(GetVIXLAssembler());
976  __ Ldr(lr, MemOperand(tr, entry_point_offset));
977  __ Blr(lr);
978  if (instruction != nullptr) {
979    RecordPcInfo(instruction, dex_pc, slow_path);
980    DCHECK(instruction->IsSuspendCheck()
981        || instruction->IsBoundsCheck()
982        || instruction->IsNullCheck()
983        || instruction->IsDivZeroCheck()
984        || !IsLeafMethod());
985    }
986}
987
988void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
989                                                                     vixl::Register class_reg) {
990  UseScratchRegisterScope temps(GetVIXLAssembler());
991  Register temp = temps.AcquireW();
992  size_t status_offset = mirror::Class::StatusOffset().SizeValue();
993  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
994
995  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
996  if (use_acquire_release) {
997    // TODO(vixl): Let the MacroAssembler handle MemOperand.
998    __ Add(temp, class_reg, status_offset);
999    __ Ldar(temp, HeapOperand(temp));
1000    __ Cmp(temp, mirror::Class::kStatusInitialized);
1001    __ B(lt, slow_path->GetEntryLabel());
1002  } else {
1003    __ Ldr(temp, HeapOperand(class_reg, status_offset));
1004    __ Cmp(temp, mirror::Class::kStatusInitialized);
1005    __ B(lt, slow_path->GetEntryLabel());
1006    __ Dmb(InnerShareable, BarrierReads);
1007  }
1008  __ Bind(slow_path->GetExitLabel());
1009}
1010
1011void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) {
1012  BarrierType type = BarrierAll;
1013
1014  switch (kind) {
1015    case MemBarrierKind::kAnyAny:
1016    case MemBarrierKind::kAnyStore: {
1017      type = BarrierAll;
1018      break;
1019    }
1020    case MemBarrierKind::kLoadAny: {
1021      type = BarrierReads;
1022      break;
1023    }
1024    case MemBarrierKind::kStoreStore: {
1025      type = BarrierWrites;
1026      break;
1027    }
1028    default:
1029      LOG(FATAL) << "Unexpected memory barrier " << kind;
1030  }
1031  __ Dmb(InnerShareable, type);
1032}
1033
1034void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
1035                                                         HBasicBlock* successor) {
1036  SuspendCheckSlowPathARM64* slow_path =
1037    new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
1038  codegen_->AddSlowPath(slow_path);
1039  UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
1040  Register temp = temps.AcquireW();
1041
1042  __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
1043  if (successor == nullptr) {
1044    __ Cbnz(temp, slow_path->GetEntryLabel());
1045    __ Bind(slow_path->GetReturnLabel());
1046  } else {
1047    __ Cbz(temp, codegen_->GetLabelOf(successor));
1048    __ B(slow_path->GetEntryLabel());
1049    // slow_path will return to GetLabelOf(successor).
1050  }
1051}
1052
1053InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
1054                                                             CodeGeneratorARM64* codegen)
1055      : HGraphVisitor(graph),
1056        assembler_(codegen->GetAssembler()),
1057        codegen_(codegen) {}
1058
1059#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)              \
1060  /* No unimplemented IR. */
1061
1062#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
1063
1064enum UnimplementedInstructionBreakCode {
1065  // Using a base helps identify when we hit such breakpoints.
1066  UnimplementedInstructionBreakCodeBaseCode = 0x900,
1067#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
1068  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
1069#undef ENUM_UNIMPLEMENTED_INSTRUCTION
1070};
1071
1072#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name)                               \
1073  void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) {                   \
1074    UNUSED(instr);                                                                    \
1075    __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name));                               \
1076  }                                                                                   \
1077  void LocationsBuilderARM64::Visit##name(H##name* instr) {                           \
1078    LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
1079    locations->SetOut(Location::Any());                                               \
1080  }
1081  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
1082#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
1083
1084#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
1085#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
1086
1087void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
1088  DCHECK_EQ(instr->InputCount(), 2U);
1089  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1090  Primitive::Type type = instr->GetResultType();
1091  switch (type) {
1092    case Primitive::kPrimInt:
1093    case Primitive::kPrimLong:
1094      locations->SetInAt(0, Location::RequiresRegister());
1095      locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr));
1096      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1097      break;
1098
1099    case Primitive::kPrimFloat:
1100    case Primitive::kPrimDouble:
1101      locations->SetInAt(0, Location::RequiresFpuRegister());
1102      locations->SetInAt(1, Location::RequiresFpuRegister());
1103      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1104      break;
1105
1106    default:
1107      LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
1108  }
1109}
1110
1111void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
1112  LocationSummary* locations =
1113      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1114  locations->SetInAt(0, Location::RequiresRegister());
1115  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1116    locations->SetOut(Location::RequiresFpuRegister());
1117  } else {
1118    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1119  }
1120}
1121
1122void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
1123                                                   const FieldInfo& field_info) {
1124  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
1125  BlockPoolsScope block_pools(GetVIXLAssembler());
1126
1127  MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
1128  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1129
1130  if (field_info.IsVolatile()) {
1131    if (use_acquire_release) {
1132      // NB: LoadAcquire will record the pc info if needed.
1133      codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
1134    } else {
1135      codegen_->Load(field_info.GetFieldType(), OutputCPURegister(instruction), field);
1136      codegen_->MaybeRecordImplicitNullCheck(instruction);
1137      // For IRIW sequential consistency kLoadAny is not sufficient.
1138      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1139    }
1140  } else {
1141    codegen_->Load(field_info.GetFieldType(), OutputCPURegister(instruction), field);
1142    codegen_->MaybeRecordImplicitNullCheck(instruction);
1143  }
1144}
1145
1146void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
1147  LocationSummary* locations =
1148      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1149  locations->SetInAt(0, Location::RequiresRegister());
1150  if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
1151    locations->SetInAt(1, Location::RequiresFpuRegister());
1152  } else {
1153    locations->SetInAt(1, Location::RequiresRegister());
1154  }
1155}
1156
1157void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
1158                                                   const FieldInfo& field_info) {
1159  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
1160  BlockPoolsScope block_pools(GetVIXLAssembler());
1161
1162  Register obj = InputRegisterAt(instruction, 0);
1163  CPURegister value = InputCPURegisterAt(instruction, 1);
1164  Offset offset = field_info.GetFieldOffset();
1165  Primitive::Type field_type = field_info.GetFieldType();
1166  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1167
1168  if (field_info.IsVolatile()) {
1169    if (use_acquire_release) {
1170      codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset));
1171      codegen_->MaybeRecordImplicitNullCheck(instruction);
1172    } else {
1173      GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
1174      codegen_->Store(field_type, value, HeapOperand(obj, offset));
1175      codegen_->MaybeRecordImplicitNullCheck(instruction);
1176      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1177    }
1178  } else {
1179    codegen_->Store(field_type, value, HeapOperand(obj, offset));
1180    codegen_->MaybeRecordImplicitNullCheck(instruction);
1181  }
1182
1183  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
1184    codegen_->MarkGCCard(obj, Register(value));
1185  }
1186}
1187
1188void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
1189  Primitive::Type type = instr->GetType();
1190
1191  switch (type) {
1192    case Primitive::kPrimInt:
1193    case Primitive::kPrimLong: {
1194      Register dst = OutputRegister(instr);
1195      Register lhs = InputRegisterAt(instr, 0);
1196      Operand rhs = InputOperandAt(instr, 1);
1197      if (instr->IsAdd()) {
1198        __ Add(dst, lhs, rhs);
1199      } else if (instr->IsAnd()) {
1200        __ And(dst, lhs, rhs);
1201      } else if (instr->IsOr()) {
1202        __ Orr(dst, lhs, rhs);
1203      } else if (instr->IsSub()) {
1204        __ Sub(dst, lhs, rhs);
1205      } else {
1206        DCHECK(instr->IsXor());
1207        __ Eor(dst, lhs, rhs);
1208      }
1209      break;
1210    }
1211    case Primitive::kPrimFloat:
1212    case Primitive::kPrimDouble: {
1213      FPRegister dst = OutputFPRegister(instr);
1214      FPRegister lhs = InputFPRegisterAt(instr, 0);
1215      FPRegister rhs = InputFPRegisterAt(instr, 1);
1216      if (instr->IsAdd()) {
1217        __ Fadd(dst, lhs, rhs);
1218      } else if (instr->IsSub()) {
1219        __ Fsub(dst, lhs, rhs);
1220      } else {
1221        LOG(FATAL) << "Unexpected floating-point binary operation";
1222      }
1223      break;
1224    }
1225    default:
1226      LOG(FATAL) << "Unexpected binary operation type " << type;
1227  }
1228}
1229
1230void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
1231  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1232
1233  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1234  Primitive::Type type = instr->GetResultType();
1235  switch (type) {
1236    case Primitive::kPrimInt:
1237    case Primitive::kPrimLong: {
1238      locations->SetInAt(0, Location::RequiresRegister());
1239      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1240      locations->SetOut(Location::RequiresRegister());
1241      break;
1242    }
1243    default:
1244      LOG(FATAL) << "Unexpected shift type " << type;
1245  }
1246}
1247
1248void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) {
1249  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1250
1251  Primitive::Type type = instr->GetType();
1252  switch (type) {
1253    case Primitive::kPrimInt:
1254    case Primitive::kPrimLong: {
1255      Register dst = OutputRegister(instr);
1256      Register lhs = InputRegisterAt(instr, 0);
1257      Operand rhs = InputOperandAt(instr, 1);
1258      if (rhs.IsImmediate()) {
1259        uint32_t shift_value = (type == Primitive::kPrimInt)
1260          ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue)
1261          : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue);
1262        if (instr->IsShl()) {
1263          __ Lsl(dst, lhs, shift_value);
1264        } else if (instr->IsShr()) {
1265          __ Asr(dst, lhs, shift_value);
1266        } else {
1267          __ Lsr(dst, lhs, shift_value);
1268        }
1269      } else {
1270        Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
1271
1272        if (instr->IsShl()) {
1273          __ Lsl(dst, lhs, rhs_reg);
1274        } else if (instr->IsShr()) {
1275          __ Asr(dst, lhs, rhs_reg);
1276        } else {
1277          __ Lsr(dst, lhs, rhs_reg);
1278        }
1279      }
1280      break;
1281    }
1282    default:
1283      LOG(FATAL) << "Unexpected shift operation type " << type;
1284  }
1285}
1286
1287void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
1288  HandleBinaryOp(instruction);
1289}
1290
1291void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
1292  HandleBinaryOp(instruction);
1293}
1294
1295void LocationsBuilderARM64::VisitAnd(HAnd* instruction) {
1296  HandleBinaryOp(instruction);
1297}
1298
1299void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
1300  HandleBinaryOp(instruction);
1301}
1302
1303void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
1304  LocationSummary* locations =
1305      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1306  locations->SetInAt(0, Location::RequiresRegister());
1307  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1308  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1309    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1310  } else {
1311    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1312  }
1313}
1314
1315void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
1316  LocationSummary* locations = instruction->GetLocations();
1317  Primitive::Type type = instruction->GetType();
1318  Register obj = InputRegisterAt(instruction, 0);
1319  Location index = locations->InAt(1);
1320  size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
1321  MemOperand source = HeapOperand(obj);
1322  MacroAssembler* masm = GetVIXLAssembler();
1323  UseScratchRegisterScope temps(masm);
1324  BlockPoolsScope block_pools(masm);
1325
1326  if (index.IsConstant()) {
1327    offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
1328    source = HeapOperand(obj, offset);
1329  } else {
1330    Register temp = temps.AcquireSameSizeAs(obj);
1331    Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
1332    __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type)));
1333    source = HeapOperand(temp, offset);
1334  }
1335
1336  codegen_->Load(type, OutputCPURegister(instruction), source);
1337  codegen_->MaybeRecordImplicitNullCheck(instruction);
1338}
1339
1340void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
1341  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1342  locations->SetInAt(0, Location::RequiresRegister());
1343  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1344}
1345
1346void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
1347  BlockPoolsScope block_pools(GetVIXLAssembler());
1348  __ Ldr(OutputRegister(instruction),
1349         HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
1350  codegen_->MaybeRecordImplicitNullCheck(instruction);
1351}
1352
1353void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
1354  if (instruction->NeedsTypeCheck()) {
1355    LocationSummary* locations =
1356        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
1357    InvokeRuntimeCallingConvention calling_convention;
1358    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
1359    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
1360    locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
1361  } else {
1362    LocationSummary* locations =
1363        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1364    locations->SetInAt(0, Location::RequiresRegister());
1365    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1366    if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1367      locations->SetInAt(2, Location::RequiresFpuRegister());
1368    } else {
1369      locations->SetInAt(2, Location::RequiresRegister());
1370    }
1371  }
1372}
1373
1374void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
1375  Primitive::Type value_type = instruction->GetComponentType();
1376  LocationSummary* locations = instruction->GetLocations();
1377  bool needs_runtime_call = locations->WillCall();
1378
1379  if (needs_runtime_call) {
1380    codegen_->InvokeRuntime(
1381        QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr);
1382    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
1383  } else {
1384    Register obj = InputRegisterAt(instruction, 0);
1385    CPURegister value = InputCPURegisterAt(instruction, 2);
1386    Location index = locations->InAt(1);
1387    size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
1388    MemOperand destination = HeapOperand(obj);
1389    MacroAssembler* masm = GetVIXLAssembler();
1390    BlockPoolsScope block_pools(masm);
1391    {
1392      // We use a block to end the scratch scope before the write barrier, thus
1393      // freeing the temporary registers so they can be used in `MarkGCCard`.
1394      UseScratchRegisterScope temps(masm);
1395
1396      if (index.IsConstant()) {
1397        offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
1398        destination = HeapOperand(obj, offset);
1399      } else {
1400        Register temp = temps.AcquireSameSizeAs(obj);
1401        Register index_reg = InputRegisterAt(instruction, 1);
1402        __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
1403        destination = HeapOperand(temp, offset);
1404      }
1405
1406      codegen_->Store(value_type, value, destination);
1407      codegen_->MaybeRecordImplicitNullCheck(instruction);
1408    }
1409    if (CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue())) {
1410      codegen_->MarkGCCard(obj, value.W());
1411    }
1412  }
1413}
1414
1415void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1416  LocationSummary* locations =
1417      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1418  locations->SetInAt(0, Location::RequiresRegister());
1419  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
1420  if (instruction->HasUses()) {
1421    locations->SetOut(Location::SameAsFirstInput());
1422  }
1423}
1424
1425void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1426  LocationSummary* locations = instruction->GetLocations();
1427  BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
1428      instruction, locations->InAt(0), locations->InAt(1));
1429  codegen_->AddSlowPath(slow_path);
1430
1431  __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
1432  __ B(slow_path->GetEntryLabel(), hs);
1433}
1434
1435void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
1436  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1437      instruction, LocationSummary::kCallOnSlowPath);
1438  locations->SetInAt(0, Location::RequiresRegister());
1439  locations->SetInAt(1, Location::RequiresRegister());
1440  locations->AddTemp(Location::RequiresRegister());
1441}
1442
1443void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
1444  LocationSummary* locations = instruction->GetLocations();
1445  Register obj = InputRegisterAt(instruction, 0);;
1446  Register cls = InputRegisterAt(instruction, 1);;
1447  Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
1448
1449  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
1450      instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
1451  codegen_->AddSlowPath(slow_path);
1452
1453  // Avoid null check if we know obj is not null.
1454  if (instruction->MustDoNullCheck()) {
1455    __ Cbz(obj, slow_path->GetExitLabel());
1456  }
1457  // Compare the class of `obj` with `cls`.
1458  __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset()));
1459  __ Cmp(obj_cls, cls);
1460  __ B(ne, slow_path->GetEntryLabel());
1461  __ Bind(slow_path->GetExitLabel());
1462}
1463
1464void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
1465  LocationSummary* locations =
1466      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1467  locations->SetInAt(0, Location::RequiresRegister());
1468  if (check->HasUses()) {
1469    locations->SetOut(Location::SameAsFirstInput());
1470  }
1471}
1472
1473void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
1474  // We assume the class is not null.
1475  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
1476      check->GetLoadClass(), check, check->GetDexPc(), true);
1477  codegen_->AddSlowPath(slow_path);
1478  GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
1479}
1480
1481void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
1482  LocationSummary* locations =
1483      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
1484  Primitive::Type in_type = compare->InputAt(0)->GetType();
1485  switch (in_type) {
1486    case Primitive::kPrimLong: {
1487      locations->SetInAt(0, Location::RequiresRegister());
1488      locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare));
1489      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1490      break;
1491    }
1492    case Primitive::kPrimFloat:
1493    case Primitive::kPrimDouble: {
1494      locations->SetInAt(0, Location::RequiresFpuRegister());
1495      HInstruction* right = compare->InputAt(1);
1496      if ((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) ||
1497          (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0))) {
1498        locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1499      } else {
1500        locations->SetInAt(1, Location::RequiresFpuRegister());
1501      }
1502      locations->SetOut(Location::RequiresRegister());
1503      break;
1504    }
1505    default:
1506      LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1507  }
1508}
1509
1510void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) {
1511  Primitive::Type in_type = compare->InputAt(0)->GetType();
1512
1513  //  0 if: left == right
1514  //  1 if: left  > right
1515  // -1 if: left  < right
1516  switch (in_type) {
1517    case Primitive::kPrimLong: {
1518      Register result = OutputRegister(compare);
1519      Register left = InputRegisterAt(compare, 0);
1520      Operand right = InputOperandAt(compare, 1);
1521
1522      __ Cmp(left, right);
1523      __ Cset(result, ne);
1524      __ Cneg(result, result, lt);
1525      break;
1526    }
1527    case Primitive::kPrimFloat:
1528    case Primitive::kPrimDouble: {
1529      Register result = OutputRegister(compare);
1530      FPRegister left = InputFPRegisterAt(compare, 0);
1531      if (compare->GetLocations()->InAt(1).IsConstant()) {
1532        if (kIsDebugBuild) {
1533          HInstruction* right = compare->GetLocations()->InAt(1).GetConstant();
1534          DCHECK((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) ||
1535                  (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0)));
1536        }
1537        // 0.0 is the only immediate that can be encoded directly in a FCMP instruction.
1538        __ Fcmp(left, 0.0);
1539      } else {
1540        __ Fcmp(left, InputFPRegisterAt(compare, 1));
1541      }
1542      if (compare->IsGtBias()) {
1543        __ Cset(result, ne);
1544      } else {
1545        __ Csetm(result, ne);
1546      }
1547      __ Cneg(result, result, compare->IsGtBias() ? mi : gt);
1548      break;
1549    }
1550    default:
1551      LOG(FATAL) << "Unimplemented compare type " << in_type;
1552  }
1553}
1554
1555void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
1556  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1557  locations->SetInAt(0, Location::RequiresRegister());
1558  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
1559  if (instruction->NeedsMaterialization()) {
1560    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1561  }
1562}
1563
1564void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
1565  if (!instruction->NeedsMaterialization()) {
1566    return;
1567  }
1568
1569  LocationSummary* locations = instruction->GetLocations();
1570  Register lhs = InputRegisterAt(instruction, 0);
1571  Operand rhs = InputOperandAt(instruction, 1);
1572  Register res = RegisterFrom(locations->Out(), instruction->GetType());
1573  Condition cond = ARM64Condition(instruction->GetCondition());
1574
1575  __ Cmp(lhs, rhs);
1576  __ Cset(res, cond);
1577}
1578
1579#define FOR_EACH_CONDITION_INSTRUCTION(M)                                                \
1580  M(Equal)                                                                               \
1581  M(NotEqual)                                                                            \
1582  M(LessThan)                                                                            \
1583  M(LessThanOrEqual)                                                                     \
1584  M(GreaterThan)                                                                         \
1585  M(GreaterThanOrEqual)
1586#define DEFINE_CONDITION_VISITORS(Name)                                                  \
1587void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }         \
1588void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
1589FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
1590#undef DEFINE_CONDITION_VISITORS
1591#undef FOR_EACH_CONDITION_INSTRUCTION
1592
1593void LocationsBuilderARM64::VisitDiv(HDiv* div) {
1594  LocationSummary* locations =
1595      new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1596  switch (div->GetResultType()) {
1597    case Primitive::kPrimInt:
1598    case Primitive::kPrimLong:
1599      locations->SetInAt(0, Location::RequiresRegister());
1600      locations->SetInAt(1, Location::RequiresRegister());
1601      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1602      break;
1603
1604    case Primitive::kPrimFloat:
1605    case Primitive::kPrimDouble:
1606      locations->SetInAt(0, Location::RequiresFpuRegister());
1607      locations->SetInAt(1, Location::RequiresFpuRegister());
1608      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1609      break;
1610
1611    default:
1612      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1613  }
1614}
1615
1616void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
1617  Primitive::Type type = div->GetResultType();
1618  switch (type) {
1619    case Primitive::kPrimInt:
1620    case Primitive::kPrimLong:
1621      __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
1622      break;
1623
1624    case Primitive::kPrimFloat:
1625    case Primitive::kPrimDouble:
1626      __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
1627      break;
1628
1629    default:
1630      LOG(FATAL) << "Unexpected div type " << type;
1631  }
1632}
1633
1634void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1635  LocationSummary* locations =
1636      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1637  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1638  if (instruction->HasUses()) {
1639    locations->SetOut(Location::SameAsFirstInput());
1640  }
1641}
1642
1643void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1644  SlowPathCodeARM64* slow_path =
1645      new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
1646  codegen_->AddSlowPath(slow_path);
1647  Location value = instruction->GetLocations()->InAt(0);
1648
1649  Primitive::Type type = instruction->GetType();
1650
1651  if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) {
1652      LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck.";
1653    return;
1654  }
1655
1656  if (value.IsConstant()) {
1657    int64_t divisor = Int64ConstantFrom(value);
1658    if (divisor == 0) {
1659      __ B(slow_path->GetEntryLabel());
1660    } else {
1661      // A division by a non-null constant is valid. We don't need to perform
1662      // any check, so simply fall through.
1663    }
1664  } else {
1665    __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
1666  }
1667}
1668
1669void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
1670  LocationSummary* locations =
1671      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1672  locations->SetOut(Location::ConstantLocation(constant));
1673}
1674
1675void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
1676  UNUSED(constant);
1677  // Will be generated at use site.
1678}
1679
1680void LocationsBuilderARM64::VisitExit(HExit* exit) {
1681  exit->SetLocations(nullptr);
1682}
1683
1684void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
1685  UNUSED(exit);
1686}
1687
1688void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
1689  LocationSummary* locations =
1690      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1691  locations->SetOut(Location::ConstantLocation(constant));
1692}
1693
1694void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
1695  UNUSED(constant);
1696  // Will be generated at use site.
1697}
1698
1699void LocationsBuilderARM64::VisitGoto(HGoto* got) {
1700  got->SetLocations(nullptr);
1701}
1702
1703void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
1704  HBasicBlock* successor = got->GetSuccessor();
1705  DCHECK(!successor->IsExitBlock());
1706  HBasicBlock* block = got->GetBlock();
1707  HInstruction* previous = got->GetPrevious();
1708  HLoopInformation* info = block->GetLoopInformation();
1709
1710  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1711    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1712    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1713    return;
1714  }
1715  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1716    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1717  }
1718  if (!codegen_->GoesToNextBlock(block, successor)) {
1719    __ B(codegen_->GetLabelOf(successor));
1720  }
1721}
1722
1723void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruction,
1724                                                          vixl::Label* true_target,
1725                                                          vixl::Label* false_target,
1726                                                          vixl::Label* always_true_target) {
1727  HInstruction* cond = instruction->InputAt(0);
1728  HCondition* condition = cond->AsCondition();
1729
1730  if (cond->IsIntConstant()) {
1731    int32_t cond_value = cond->AsIntConstant()->GetValue();
1732    if (cond_value == 1) {
1733      if (always_true_target != nullptr) {
1734        __ B(always_true_target);
1735      }
1736      return;
1737    } else {
1738      DCHECK_EQ(cond_value, 0);
1739    }
1740  } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
1741    // The condition instruction has been materialized, compare the output to 0.
1742    Location cond_val = instruction->GetLocations()->InAt(0);
1743    DCHECK(cond_val.IsRegister());
1744    __ Cbnz(InputRegisterAt(instruction, 0), true_target);
1745  } else {
1746    // The condition instruction has not been materialized, use its inputs as
1747    // the comparison and its condition as the branch condition.
1748    Register lhs = InputRegisterAt(condition, 0);
1749    Operand rhs = InputOperandAt(condition, 1);
1750    Condition arm64_cond = ARM64Condition(condition->GetCondition());
1751    if ((arm64_cond != gt && arm64_cond != le) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
1752      switch (arm64_cond) {
1753        case eq:
1754          __ Cbz(lhs, true_target);
1755          break;
1756        case ne:
1757          __ Cbnz(lhs, true_target);
1758          break;
1759        case lt:
1760          // Test the sign bit and branch accordingly.
1761          __ Tbnz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target);
1762          break;
1763        case ge:
1764          // Test the sign bit and branch accordingly.
1765          __ Tbz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target);
1766          break;
1767        default:
1768          // Without the `static_cast` the compiler throws an error for
1769          // `-Werror=sign-promo`.
1770          LOG(FATAL) << "Unexpected condition: " << static_cast<int>(arm64_cond);
1771      }
1772    } else {
1773      __ Cmp(lhs, rhs);
1774      __ B(arm64_cond, true_target);
1775    }
1776  }
1777  if (false_target != nullptr) {
1778    __ B(false_target);
1779  }
1780}
1781
1782void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
1783  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
1784  HInstruction* cond = if_instr->InputAt(0);
1785  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1786    locations->SetInAt(0, Location::RequiresRegister());
1787  }
1788}
1789
1790void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
1791  vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1792  vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1793  vixl::Label* always_true_target = true_target;
1794  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1795                                if_instr->IfTrueSuccessor())) {
1796    always_true_target = nullptr;
1797  }
1798  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1799                                if_instr->IfFalseSuccessor())) {
1800    false_target = nullptr;
1801  }
1802  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1803}
1804
1805void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
1806  LocationSummary* locations = new (GetGraph()->GetArena())
1807      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1808  HInstruction* cond = deoptimize->InputAt(0);
1809  DCHECK(cond->IsCondition());
1810  if (cond->AsCondition()->NeedsMaterialization()) {
1811    locations->SetInAt(0, Location::RequiresRegister());
1812  }
1813}
1814
1815void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
1816  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
1817      DeoptimizationSlowPathARM64(deoptimize);
1818  codegen_->AddSlowPath(slow_path);
1819  vixl::Label* slow_path_entry = slow_path->GetEntryLabel();
1820  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1821}
1822
1823void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
1824  HandleFieldGet(instruction);
1825}
1826
1827void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
1828  HandleFieldGet(instruction, instruction->GetFieldInfo());
1829}
1830
1831void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
1832  HandleFieldSet(instruction);
1833}
1834
1835void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
1836  HandleFieldSet(instruction, instruction->GetFieldInfo());
1837}
1838
1839void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
1840  LocationSummary::CallKind call_kind =
1841      instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
1842  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
1843  locations->SetInAt(0, Location::RequiresRegister());
1844  locations->SetInAt(1, Location::RequiresRegister());
1845  // The output does overlap inputs.
1846  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1847}
1848
1849void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
1850  LocationSummary* locations = instruction->GetLocations();
1851  Register obj = InputRegisterAt(instruction, 0);;
1852  Register cls = InputRegisterAt(instruction, 1);;
1853  Register out = OutputRegister(instruction);
1854
1855  vixl::Label done;
1856
1857  // Return 0 if `obj` is null.
1858  // Avoid null check if we know `obj` is not null.
1859  if (instruction->MustDoNullCheck()) {
1860    __ Mov(out, 0);
1861    __ Cbz(obj, &done);
1862  }
1863
1864  // Compare the class of `obj` with `cls`.
1865  __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset()));
1866  __ Cmp(out, cls);
1867  if (instruction->IsClassFinal()) {
1868    // Classes must be equal for the instanceof to succeed.
1869    __ Cset(out, eq);
1870  } else {
1871    // If the classes are not equal, we go into a slow path.
1872    DCHECK(locations->OnlyCallsOnSlowPath());
1873    SlowPathCodeARM64* slow_path =
1874        new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
1875        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
1876    codegen_->AddSlowPath(slow_path);
1877    __ B(ne, slow_path->GetEntryLabel());
1878    __ Mov(out, 1);
1879    __ Bind(slow_path->GetExitLabel());
1880  }
1881
1882  __ Bind(&done);
1883}
1884
1885void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
1886  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
1887  locations->SetOut(Location::ConstantLocation(constant));
1888}
1889
1890void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
1891  // Will be generated at use site.
1892  UNUSED(constant);
1893}
1894
1895void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
1896  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
1897  locations->SetOut(Location::ConstantLocation(constant));
1898}
1899
1900void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) {
1901  // Will be generated at use site.
1902  UNUSED(constant);
1903}
1904
1905void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
1906  LocationSummary* locations =
1907      new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
1908  locations->AddTemp(LocationFrom(x0));
1909
1910  InvokeDexCallingConventionVisitor calling_convention_visitor;
1911  for (size_t i = 0; i < invoke->InputCount(); i++) {
1912    HInstruction* input = invoke->InputAt(i);
1913    locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
1914  }
1915
1916  Primitive::Type return_type = invoke->GetType();
1917  if (return_type != Primitive::kPrimVoid) {
1918    locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type));
1919  }
1920}
1921
1922void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
1923  HandleInvoke(invoke);
1924}
1925
1926void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
1927  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1928  Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
1929  uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
1930          (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
1931  Location receiver = invoke->GetLocations()->InAt(0);
1932  Offset class_offset = mirror::Object::ClassOffset();
1933  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
1934
1935  // The register ip1 is required to be used for the hidden argument in
1936  // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
1937  MacroAssembler* masm = GetVIXLAssembler();
1938  UseScratchRegisterScope scratch_scope(masm);
1939  BlockPoolsScope block_pools(masm);
1940  scratch_scope.Exclude(ip1);
1941  __ Mov(ip1, invoke->GetDexMethodIndex());
1942
1943  // temp = object->GetClass();
1944  if (receiver.IsStackSlot()) {
1945    __ Ldr(temp, StackOperandFrom(receiver));
1946    __ Ldr(temp, HeapOperand(temp, class_offset));
1947  } else {
1948    __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
1949  }
1950  codegen_->MaybeRecordImplicitNullCheck(invoke);
1951  // temp = temp->GetImtEntryAt(method_offset);
1952  __ Ldr(temp, HeapOperand(temp, method_offset));
1953  // lr = temp->GetEntryPoint();
1954  __ Ldr(lr, HeapOperand(temp, entry_point));
1955  // lr();
1956  __ Blr(lr);
1957  DCHECK(!codegen_->IsLeafMethod());
1958  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1959}
1960
1961void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1962  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
1963  if (intrinsic.TryDispatch(invoke)) {
1964    return;
1965  }
1966
1967  HandleInvoke(invoke);
1968}
1969
1970void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1971  // Explicit clinit checks triggered by static invokes must have been
1972  // pruned by art::PrepareForRegisterAllocation, but this step is not
1973  // run in baseline. So we remove them manually here if we find them.
1974  // TODO: Instead of this local workaround, address this properly.
1975  if (invoke->IsStaticWithExplicitClinitCheck()) {
1976    invoke->RemoveClinitCheckOrLoadClassAsLastInput();
1977  }
1978
1979  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
1980  if (intrinsic.TryDispatch(invoke)) {
1981    return;
1982  }
1983
1984  HandleInvoke(invoke);
1985}
1986
1987static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codegen) {
1988  if (invoke->GetLocations()->Intrinsified()) {
1989    IntrinsicCodeGeneratorARM64 intrinsic(codegen);
1990    intrinsic.Dispatch(invoke);
1991    return true;
1992  }
1993  return false;
1994}
1995
1996void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
1997  // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
1998  DCHECK(temp.Is(kArtMethodRegister));
1999  size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() +
2000      invoke->GetDexMethodIndex() * kHeapRefSize;
2001
2002  // TODO: Implement all kinds of calls:
2003  // 1) boot -> boot
2004  // 2) app -> boot
2005  // 3) app -> app
2006  //
2007  // Currently we implement the app -> app logic, which looks up in the resolve cache.
2008
2009  // temp = method;
2010  LoadCurrentMethod(temp);
2011  if (!invoke->IsRecursive()) {
2012    // temp = temp->dex_cache_resolved_methods_;
2013    __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
2014    // temp = temp[index_in_cache];
2015    __ Ldr(temp, HeapOperand(temp, index_in_cache));
2016    // lr = temp->entry_point_from_quick_compiled_code_;
2017    __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2018        kArm64WordSize)));
2019    // lr();
2020    __ Blr(lr);
2021  } else {
2022    __ Bl(&frame_entry_label_);
2023  }
2024
2025  DCHECK(!IsLeafMethod());
2026}
2027
2028void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2029  // Explicit clinit checks triggered by static invokes must have been
2030  // pruned by art::PrepareForRegisterAllocation.
2031  DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
2032
2033  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2034    return;
2035  }
2036
2037  BlockPoolsScope block_pools(GetVIXLAssembler());
2038  Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
2039  codegen_->GenerateStaticOrDirectCall(invoke, temp);
2040  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2041}
2042
2043void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2044  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2045    return;
2046  }
2047
2048  LocationSummary* locations = invoke->GetLocations();
2049  Location receiver = locations->InAt(0);
2050  Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
2051  size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
2052    invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
2053  Offset class_offset = mirror::Object::ClassOffset();
2054  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
2055
2056  BlockPoolsScope block_pools(GetVIXLAssembler());
2057
2058  // temp = object->GetClass();
2059  if (receiver.IsStackSlot()) {
2060    __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex()));
2061    __ Ldr(temp, HeapOperand(temp, class_offset));
2062  } else {
2063    DCHECK(receiver.IsRegister());
2064    __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
2065  }
2066  codegen_->MaybeRecordImplicitNullCheck(invoke);
2067  // temp = temp->GetMethodAt(method_offset);
2068  __ Ldr(temp, HeapOperand(temp, method_offset));
2069  // lr = temp->GetEntryPoint();
2070  __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue()));
2071  // lr();
2072  __ Blr(lr);
2073  DCHECK(!codegen_->IsLeafMethod());
2074  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2075}
2076
2077void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
2078  LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2079                                                              : LocationSummary::kNoCall;
2080  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2081  locations->SetOut(Location::RequiresRegister());
2082}
2083
2084void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
2085  Register out = OutputRegister(cls);
2086  if (cls->IsReferrersClass()) {
2087    DCHECK(!cls->CanCallRuntime());
2088    DCHECK(!cls->MustGenerateClinitCheck());
2089    codegen_->LoadCurrentMethod(out);
2090    __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
2091  } else {
2092    DCHECK(cls->CanCallRuntime());
2093    codegen_->LoadCurrentMethod(out);
2094    __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
2095    __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
2096
2097    SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
2098        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
2099    codegen_->AddSlowPath(slow_path);
2100    __ Cbz(out, slow_path->GetEntryLabel());
2101    if (cls->MustGenerateClinitCheck()) {
2102      GenerateClassInitializationCheck(slow_path, out);
2103    } else {
2104      __ Bind(slow_path->GetExitLabel());
2105    }
2106  }
2107}
2108
2109void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
2110  LocationSummary* locations =
2111      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2112  locations->SetOut(Location::RequiresRegister());
2113}
2114
2115void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
2116  MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
2117  __ Ldr(OutputRegister(instruction), exception);
2118  __ Str(wzr, exception);
2119}
2120
2121void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
2122  load->SetLocations(nullptr);
2123}
2124
2125void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
2126  // Nothing to do, this is driven by the code generator.
2127  UNUSED(load);
2128}
2129
2130void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
2131  LocationSummary* locations =
2132      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2133  locations->SetOut(Location::RequiresRegister());
2134}
2135
2136void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
2137  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
2138  codegen_->AddSlowPath(slow_path);
2139
2140  Register out = OutputRegister(load);
2141  codegen_->LoadCurrentMethod(out);
2142  __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
2143  __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
2144  __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
2145  __ Cbz(out, slow_path->GetEntryLabel());
2146  __ Bind(slow_path->GetExitLabel());
2147}
2148
2149void LocationsBuilderARM64::VisitLocal(HLocal* local) {
2150  local->SetLocations(nullptr);
2151}
2152
2153void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
2154  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2155}
2156
2157void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
2158  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2159  locations->SetOut(Location::ConstantLocation(constant));
2160}
2161
2162void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
2163  // Will be generated at use site.
2164  UNUSED(constant);
2165}
2166
2167void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2168  LocationSummary* locations =
2169      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2170  InvokeRuntimeCallingConvention calling_convention;
2171  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2172}
2173
2174void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2175  codegen_->InvokeRuntime(instruction->IsEnter()
2176        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
2177      instruction,
2178      instruction->GetDexPc(),
2179      nullptr);
2180  CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2181}
2182
2183void LocationsBuilderARM64::VisitMul(HMul* mul) {
2184  LocationSummary* locations =
2185      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2186  switch (mul->GetResultType()) {
2187    case Primitive::kPrimInt:
2188    case Primitive::kPrimLong:
2189      locations->SetInAt(0, Location::RequiresRegister());
2190      locations->SetInAt(1, Location::RequiresRegister());
2191      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2192      break;
2193
2194    case Primitive::kPrimFloat:
2195    case Primitive::kPrimDouble:
2196      locations->SetInAt(0, Location::RequiresFpuRegister());
2197      locations->SetInAt(1, Location::RequiresFpuRegister());
2198      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2199      break;
2200
2201    default:
2202      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2203  }
2204}
2205
2206void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
2207  switch (mul->GetResultType()) {
2208    case Primitive::kPrimInt:
2209    case Primitive::kPrimLong:
2210      __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
2211      break;
2212
2213    case Primitive::kPrimFloat:
2214    case Primitive::kPrimDouble:
2215      __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
2216      break;
2217
2218    default:
2219      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2220  }
2221}
2222
2223void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
2224  LocationSummary* locations =
2225      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2226  switch (neg->GetResultType()) {
2227    case Primitive::kPrimInt:
2228    case Primitive::kPrimLong:
2229      locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg));
2230      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2231      break;
2232
2233    case Primitive::kPrimFloat:
2234    case Primitive::kPrimDouble:
2235      locations->SetInAt(0, Location::RequiresFpuRegister());
2236      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2237      break;
2238
2239    default:
2240      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2241  }
2242}
2243
2244void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
2245  switch (neg->GetResultType()) {
2246    case Primitive::kPrimInt:
2247    case Primitive::kPrimLong:
2248      __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
2249      break;
2250
2251    case Primitive::kPrimFloat:
2252    case Primitive::kPrimDouble:
2253      __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0));
2254      break;
2255
2256    default:
2257      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2258  }
2259}
2260
2261void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
2262  LocationSummary* locations =
2263      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2264  InvokeRuntimeCallingConvention calling_convention;
2265  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2266  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
2267  locations->SetOut(LocationFrom(x0));
2268  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
2269  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
2270                       void*, uint32_t, int32_t, mirror::ArtMethod*>();
2271}
2272
2273void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
2274  LocationSummary* locations = instruction->GetLocations();
2275  InvokeRuntimeCallingConvention calling_convention;
2276  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
2277  DCHECK(type_index.Is(w0));
2278  Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
2279  DCHECK(current_method.Is(w2));
2280  codegen_->LoadCurrentMethod(current_method);
2281  __ Mov(type_index, instruction->GetTypeIndex());
2282  codegen_->InvokeRuntime(
2283      GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2284      instruction,
2285      instruction->GetDexPc(),
2286      nullptr);
2287  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
2288                       void*, uint32_t, int32_t, mirror::ArtMethod*>();
2289}
2290
2291void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
2292  LocationSummary* locations =
2293      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2294  InvokeRuntimeCallingConvention calling_convention;
2295  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2296  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
2297  locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2298  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
2299}
2300
2301void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
2302  LocationSummary* locations = instruction->GetLocations();
2303  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
2304  DCHECK(type_index.Is(w0));
2305  Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
2306  DCHECK(current_method.Is(w1));
2307  codegen_->LoadCurrentMethod(current_method);
2308  __ Mov(type_index, instruction->GetTypeIndex());
2309  codegen_->InvokeRuntime(
2310      GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2311      instruction,
2312      instruction->GetDexPc(),
2313      nullptr);
2314  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
2315}
2316
2317void LocationsBuilderARM64::VisitNot(HNot* instruction) {
2318  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2319  locations->SetInAt(0, Location::RequiresRegister());
2320  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2321}
2322
2323void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
2324  switch (instruction->GetResultType()) {
2325    case Primitive::kPrimInt:
2326    case Primitive::kPrimLong:
2327      __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
2328      break;
2329
2330    default:
2331      LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2332  }
2333}
2334
2335void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
2336  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2337  locations->SetInAt(0, Location::RequiresRegister());
2338  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2339}
2340
2341void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
2342  __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::Operand(1));
2343}
2344
2345void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
2346  LocationSummary* locations =
2347      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2348  locations->SetInAt(0, Location::RequiresRegister());
2349  if (instruction->HasUses()) {
2350    locations->SetOut(Location::SameAsFirstInput());
2351  }
2352}
2353
2354void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2355  if (codegen_->CanMoveNullCheckToUser(instruction)) {
2356    return;
2357  }
2358
2359  BlockPoolsScope block_pools(GetVIXLAssembler());
2360  Location obj = instruction->GetLocations()->InAt(0);
2361  __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
2362  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2363}
2364
2365void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2366  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
2367  codegen_->AddSlowPath(slow_path);
2368
2369  LocationSummary* locations = instruction->GetLocations();
2370  Location obj = locations->InAt(0);
2371
2372  __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
2373}
2374
2375void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
2376  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
2377    GenerateImplicitNullCheck(instruction);
2378  } else {
2379    GenerateExplicitNullCheck(instruction);
2380  }
2381}
2382
2383void LocationsBuilderARM64::VisitOr(HOr* instruction) {
2384  HandleBinaryOp(instruction);
2385}
2386
2387void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
2388  HandleBinaryOp(instruction);
2389}
2390
2391void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2392  LOG(FATAL) << "Unreachable";
2393}
2394
2395void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
2396  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2397}
2398
2399void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
2400  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2401  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2402  if (location.IsStackSlot()) {
2403    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2404  } else if (location.IsDoubleStackSlot()) {
2405    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2406  }
2407  locations->SetOut(location);
2408}
2409
2410void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
2411  // Nothing to do, the parameter is already at its location.
2412  UNUSED(instruction);
2413}
2414
2415void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
2416  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2417  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2418    locations->SetInAt(i, Location::Any());
2419  }
2420  locations->SetOut(Location::Any());
2421}
2422
2423void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
2424  UNUSED(instruction);
2425  LOG(FATAL) << "Unreachable";
2426}
2427
2428void LocationsBuilderARM64::VisitRem(HRem* rem) {
2429  Primitive::Type type = rem->GetResultType();
2430  LocationSummary::CallKind call_kind =
2431      Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2432  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2433
2434  switch (type) {
2435    case Primitive::kPrimInt:
2436    case Primitive::kPrimLong:
2437      locations->SetInAt(0, Location::RequiresRegister());
2438      locations->SetInAt(1, Location::RequiresRegister());
2439      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2440      break;
2441
2442    case Primitive::kPrimFloat:
2443    case Primitive::kPrimDouble: {
2444      InvokeRuntimeCallingConvention calling_convention;
2445      locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
2446      locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1)));
2447      locations->SetOut(calling_convention.GetReturnLocation(type));
2448
2449      break;
2450    }
2451
2452    default:
2453      LOG(FATAL) << "Unexpected rem type " << type;
2454  }
2455}
2456
2457void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
2458  Primitive::Type type = rem->GetResultType();
2459
2460  switch (type) {
2461    case Primitive::kPrimInt:
2462    case Primitive::kPrimLong: {
2463      UseScratchRegisterScope temps(GetVIXLAssembler());
2464      Register dividend = InputRegisterAt(rem, 0);
2465      Register divisor = InputRegisterAt(rem, 1);
2466      Register output = OutputRegister(rem);
2467      Register temp = temps.AcquireSameSizeAs(output);
2468
2469      __ Sdiv(temp, dividend, divisor);
2470      __ Msub(output, temp, divisor, dividend);
2471      break;
2472    }
2473
2474    case Primitive::kPrimFloat:
2475    case Primitive::kPrimDouble: {
2476      int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
2477                                                             : QUICK_ENTRY_POINT(pFmod);
2478      codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc(), nullptr);
2479      break;
2480    }
2481
2482    default:
2483      LOG(FATAL) << "Unexpected rem type " << type;
2484  }
2485}
2486
2487void LocationsBuilderARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2488  memory_barrier->SetLocations(nullptr);
2489}
2490
2491void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2492  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
2493}
2494
2495void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
2496  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2497  Primitive::Type return_type = instruction->InputAt(0)->GetType();
2498  locations->SetInAt(0, ARM64ReturnLocation(return_type));
2499}
2500
2501void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
2502  UNUSED(instruction);
2503  codegen_->GenerateFrameExit();
2504}
2505
2506void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
2507  instruction->SetLocations(nullptr);
2508}
2509
2510void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
2511  UNUSED(instruction);
2512  codegen_->GenerateFrameExit();
2513}
2514
2515void LocationsBuilderARM64::VisitShl(HShl* shl) {
2516  HandleShift(shl);
2517}
2518
2519void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) {
2520  HandleShift(shl);
2521}
2522
2523void LocationsBuilderARM64::VisitShr(HShr* shr) {
2524  HandleShift(shr);
2525}
2526
2527void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) {
2528  HandleShift(shr);
2529}
2530
2531void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
2532  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
2533  Primitive::Type field_type = store->InputAt(1)->GetType();
2534  switch (field_type) {
2535    case Primitive::kPrimNot:
2536    case Primitive::kPrimBoolean:
2537    case Primitive::kPrimByte:
2538    case Primitive::kPrimChar:
2539    case Primitive::kPrimShort:
2540    case Primitive::kPrimInt:
2541    case Primitive::kPrimFloat:
2542      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
2543      break;
2544
2545    case Primitive::kPrimLong:
2546    case Primitive::kPrimDouble:
2547      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
2548      break;
2549
2550    default:
2551      LOG(FATAL) << "Unimplemented local type " << field_type;
2552  }
2553}
2554
2555void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
2556  UNUSED(store);
2557}
2558
2559void LocationsBuilderARM64::VisitSub(HSub* instruction) {
2560  HandleBinaryOp(instruction);
2561}
2562
2563void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
2564  HandleBinaryOp(instruction);
2565}
2566
2567void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2568  HandleFieldGet(instruction);
2569}
2570
2571void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2572  HandleFieldGet(instruction, instruction->GetFieldInfo());
2573}
2574
2575void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
2576  HandleFieldSet(instruction);
2577}
2578
2579void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
2580  HandleFieldSet(instruction, instruction->GetFieldInfo());
2581}
2582
2583void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
2584  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
2585}
2586
2587void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
2588  HBasicBlock* block = instruction->GetBlock();
2589  if (block->GetLoopInformation() != nullptr) {
2590    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
2591    // The back edge will generate the suspend check.
2592    return;
2593  }
2594  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
2595    // The goto will generate the suspend check.
2596    return;
2597  }
2598  GenerateSuspendCheck(instruction, nullptr);
2599}
2600
2601void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
2602  temp->SetLocations(nullptr);
2603}
2604
2605void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
2606  // Nothing to do, this is driven by the code generator.
2607  UNUSED(temp);
2608}
2609
2610void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
2611  LocationSummary* locations =
2612      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2613  InvokeRuntimeCallingConvention calling_convention;
2614  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2615}
2616
2617void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
2618  codegen_->InvokeRuntime(
2619      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
2620  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
2621}
2622
2623void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
2624  LocationSummary* locations =
2625      new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
2626  Primitive::Type input_type = conversion->GetInputType();
2627  Primitive::Type result_type = conversion->GetResultType();
2628  DCHECK_NE(input_type, result_type);
2629  if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
2630      (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
2631    LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
2632  }
2633
2634  if (Primitive::IsFloatingPointType(input_type)) {
2635    locations->SetInAt(0, Location::RequiresFpuRegister());
2636  } else {
2637    locations->SetInAt(0, Location::RequiresRegister());
2638  }
2639
2640  if (Primitive::IsFloatingPointType(result_type)) {
2641    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2642  } else {
2643    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2644  }
2645}
2646
2647void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) {
2648  Primitive::Type result_type = conversion->GetResultType();
2649  Primitive::Type input_type = conversion->GetInputType();
2650
2651  DCHECK_NE(input_type, result_type);
2652
2653  if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
2654    int result_size = Primitive::ComponentSize(result_type);
2655    int input_size = Primitive::ComponentSize(input_type);
2656    int min_size = std::min(result_size, input_size);
2657    Register output = OutputRegister(conversion);
2658    Register source = InputRegisterAt(conversion, 0);
2659    if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) {
2660      __ Ubfx(output, source, 0, result_size * kBitsPerByte);
2661    } else if ((result_type == Primitive::kPrimChar) ||
2662               ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
2663      __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
2664    } else {
2665      __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
2666    }
2667  } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
2668    __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
2669  } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
2670    CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
2671    __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0));
2672  } else if (Primitive::IsFloatingPointType(result_type) &&
2673             Primitive::IsFloatingPointType(input_type)) {
2674    __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0));
2675  } else {
2676    LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
2677                << " to " << result_type;
2678  }
2679}
2680
2681void LocationsBuilderARM64::VisitUShr(HUShr* ushr) {
2682  HandleShift(ushr);
2683}
2684
2685void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) {
2686  HandleShift(ushr);
2687}
2688
2689void LocationsBuilderARM64::VisitXor(HXor* instruction) {
2690  HandleBinaryOp(instruction);
2691}
2692
2693void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
2694  HandleBinaryOp(instruction);
2695}
2696
2697void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) {
2698  // Nothing to do, this should be removed during prepare for register allocator.
2699  UNUSED(instruction);
2700  LOG(FATAL) << "Unreachable";
2701}
2702
2703void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) {
2704  // Nothing to do, this should be removed during prepare for register allocator.
2705  UNUSED(instruction);
2706  LOG(FATAL) << "Unreachable";
2707}
2708
2709#undef __
2710#undef QUICK_ENTRY_POINT
2711
2712}  // namespace arm64
2713}  // namespace art
2714