code_generator_arm64.cc revision 418318f4d50e0cfc2d54330d7623ee030d4d727d
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm64.h"
18
19#include "arch/arm64/instruction_set_features_arm64.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "compiled_method.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "entrypoints/quick/quick_entrypoints_enum.h"
25#include "gc/accounting/card_table.h"
26#include "intrinsics.h"
27#include "intrinsics_arm64.h"
28#include "mirror/array-inl.h"
29#include "mirror/class-inl.h"
30#include "offsets.h"
31#include "thread.h"
32#include "utils/arm64/assembler_arm64.h"
33#include "utils/assembler.h"
34#include "utils/stack_checks.h"
35
36
37using namespace vixl;   // NOLINT(build/namespaces)
38
39#ifdef __
40#error "ARM64 Codegen VIXL macro-assembler macro already defined."
41#endif
42
43namespace art {
44
45namespace arm64 {
46
47using helpers::CPURegisterFrom;
48using helpers::DRegisterFrom;
49using helpers::FPRegisterFrom;
50using helpers::HeapOperand;
51using helpers::HeapOperandFrom;
52using helpers::InputCPURegisterAt;
53using helpers::InputFPRegisterAt;
54using helpers::InputRegisterAt;
55using helpers::InputOperandAt;
56using helpers::Int64ConstantFrom;
57using helpers::LocationFrom;
58using helpers::OperandFromMemOperand;
59using helpers::OutputCPURegister;
60using helpers::OutputFPRegister;
61using helpers::OutputRegister;
62using helpers::RegisterFrom;
63using helpers::StackOperandFrom;
64using helpers::VIXLRegCodeFromART;
65using helpers::WRegisterFrom;
66using helpers::XRegisterFrom;
67using helpers::ARM64EncodableConstantOrRegister;
68using helpers::ArtVixlRegCodeCoherentForRegSet;
69
70static constexpr int kCurrentMethodStackOffset = 0;
71// The compare/jump sequence will generate about (2 * num_entries + 1) instructions. While jump
72// table version generates 7 instructions and num_entries literals. Compare/jump sequence will
73// generates less code/data with a small num_entries.
74static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
75
76inline Condition ARM64Condition(IfCondition cond) {
77  switch (cond) {
78    case kCondEQ: return eq;
79    case kCondNE: return ne;
80    case kCondLT: return lt;
81    case kCondLE: return le;
82    case kCondGT: return gt;
83    case kCondGE: return ge;
84    case kCondB:  return lo;
85    case kCondBE: return ls;
86    case kCondA:  return hi;
87    case kCondAE: return hs;
88  }
89  LOG(FATAL) << "Unreachable";
90  UNREACHABLE();
91}
92
93Location ARM64ReturnLocation(Primitive::Type return_type) {
94  // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
95  // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
96  // but we use the exact registers for clarity.
97  if (return_type == Primitive::kPrimFloat) {
98    return LocationFrom(s0);
99  } else if (return_type == Primitive::kPrimDouble) {
100    return LocationFrom(d0);
101  } else if (return_type == Primitive::kPrimLong) {
102    return LocationFrom(x0);
103  } else if (return_type == Primitive::kPrimVoid) {
104    return Location::NoLocation();
105  } else {
106    return LocationFrom(w0);
107  }
108}
109
110Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
111  return ARM64ReturnLocation(return_type);
112}
113
114#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
115#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
116
117// Calculate memory accessing operand for save/restore live registers.
118static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
119                                           RegisterSet* register_set,
120                                           int64_t spill_offset,
121                                           bool is_save) {
122  DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(),
123                                         codegen->GetNumberOfCoreRegisters(),
124                                         register_set->GetFloatingPointRegisters(),
125                                         codegen->GetNumberOfFloatingPointRegisters()));
126
127  CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
128      register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
129  CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
130      register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
131
132  MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
133  UseScratchRegisterScope temps(masm);
134
135  Register base = masm->StackPointer();
136  int64_t core_spill_size = core_list.TotalSizeInBytes();
137  int64_t fp_spill_size = fp_list.TotalSizeInBytes();
138  int64_t reg_size = kXRegSizeInBytes;
139  int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size;
140  uint32_t ls_access_size = WhichPowerOf2(reg_size);
141  if (((core_list.Count() > 1) || (fp_list.Count() > 1)) &&
142      !masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) {
143    // If the offset does not fit in the instruction's immediate field, use an alternate register
144    // to compute the base address(float point registers spill base address).
145    Register new_base = temps.AcquireSameSizeAs(base);
146    __ Add(new_base, base, Operand(spill_offset + core_spill_size));
147    base = new_base;
148    spill_offset = -core_spill_size;
149    int64_t new_max_ls_pair_offset = fp_spill_size - 2 * reg_size;
150    DCHECK(masm->IsImmLSPair(spill_offset, ls_access_size));
151    DCHECK(masm->IsImmLSPair(new_max_ls_pair_offset, ls_access_size));
152  }
153
154  if (is_save) {
155    __ StoreCPURegList(core_list, MemOperand(base, spill_offset));
156    __ StoreCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
157  } else {
158    __ LoadCPURegList(core_list, MemOperand(base, spill_offset));
159    __ LoadCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
160  }
161}
162
163void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
164  RegisterSet* register_set = locations->GetLiveRegisters();
165  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
166  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
167    if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) {
168      // If the register holds an object, update the stack mask.
169      if (locations->RegisterContainsObject(i)) {
170        locations->SetStackBit(stack_offset / kVRegSize);
171      }
172      DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
173      DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
174      saved_core_stack_offsets_[i] = stack_offset;
175      stack_offset += kXRegSizeInBytes;
176    }
177  }
178
179  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
180    if (!codegen->IsFloatingPointCalleeSaveRegister(i) &&
181        register_set->ContainsFloatingPointRegister(i)) {
182      DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
183      DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
184      saved_fpu_stack_offsets_[i] = stack_offset;
185      stack_offset += kDRegSizeInBytes;
186    }
187  }
188
189  SaveRestoreLiveRegistersHelper(codegen, register_set,
190                                 codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
191}
192
193void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
194  RegisterSet* register_set = locations->GetLiveRegisters();
195  SaveRestoreLiveRegistersHelper(codegen, register_set,
196                                 codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
197}
198
199class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
200 public:
201  explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : instruction_(instruction) {}
202
203  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
204    LocationSummary* locations = instruction_->GetLocations();
205    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
206
207    __ Bind(GetEntryLabel());
208    if (instruction_->CanThrowIntoCatchBlock()) {
209      // Live registers will be restored in the catch block if caught.
210      SaveLiveRegisters(codegen, instruction_->GetLocations());
211    }
212    // We're moving two locations to locations that could overlap, so we need a parallel
213    // move resolver.
214    InvokeRuntimeCallingConvention calling_convention;
215    codegen->EmitParallelMoves(
216        locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
217        locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
218    arm64_codegen->InvokeRuntime(
219        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
220    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
221  }
222
223  bool IsFatal() const OVERRIDE { return true; }
224
225  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
226
227 private:
228  HBoundsCheck* const instruction_;
229
230  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
231};
232
233class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
234 public:
235  explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {}
236
237  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
238    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
239    __ Bind(GetEntryLabel());
240    if (instruction_->CanThrowIntoCatchBlock()) {
241      // Live registers will be restored in the catch block if caught.
242      SaveLiveRegisters(codegen, instruction_->GetLocations());
243    }
244    arm64_codegen->InvokeRuntime(
245        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
246    CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
247  }
248
249  bool IsFatal() const OVERRIDE { return true; }
250
251  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
252
253 private:
254  HDivZeroCheck* const instruction_;
255  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
256};
257
258class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
259 public:
260  LoadClassSlowPathARM64(HLoadClass* cls,
261                         HInstruction* at,
262                         uint32_t dex_pc,
263                         bool do_clinit)
264      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
265    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
266  }
267
268  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
269    LocationSummary* locations = at_->GetLocations();
270    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
271
272    __ Bind(GetEntryLabel());
273    SaveLiveRegisters(codegen, locations);
274
275    InvokeRuntimeCallingConvention calling_convention;
276    __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
277    int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
278                                            : QUICK_ENTRY_POINT(pInitializeType);
279    arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
280    if (do_clinit_) {
281      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
282    } else {
283      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
284    }
285
286    // Move the class to the desired location.
287    Location out = locations->Out();
288    if (out.IsValid()) {
289      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
290      Primitive::Type type = at_->GetType();
291      arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
292    }
293
294    RestoreLiveRegisters(codegen, locations);
295    __ B(GetExitLabel());
296  }
297
298  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM64"; }
299
300 private:
301  // The class this slow path will load.
302  HLoadClass* const cls_;
303
304  // The instruction where this slow path is happening.
305  // (Might be the load class or an initialization check).
306  HInstruction* const at_;
307
308  // The dex PC of `at_`.
309  const uint32_t dex_pc_;
310
311  // Whether to initialize the class.
312  const bool do_clinit_;
313
314  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
315};
316
317class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
318 public:
319  explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {}
320
321  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
322    LocationSummary* locations = instruction_->GetLocations();
323    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
324    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
325
326    __ Bind(GetEntryLabel());
327    SaveLiveRegisters(codegen, locations);
328
329    InvokeRuntimeCallingConvention calling_convention;
330    __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex());
331    arm64_codegen->InvokeRuntime(
332        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
333    CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
334    Primitive::Type type = instruction_->GetType();
335    arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
336
337    RestoreLiveRegisters(codegen, locations);
338    __ B(GetExitLabel());
339  }
340
341  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
342
343 private:
344  HLoadString* const instruction_;
345
346  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
347};
348
349class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
350 public:
351  explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
352
353  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
354    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
355    __ Bind(GetEntryLabel());
356    if (instruction_->CanThrowIntoCatchBlock()) {
357      // Live registers will be restored in the catch block if caught.
358      SaveLiveRegisters(codegen, instruction_->GetLocations());
359    }
360    arm64_codegen->InvokeRuntime(
361        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
362    CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
363  }
364
365  bool IsFatal() const OVERRIDE { return true; }
366
367  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
368
369 private:
370  HNullCheck* const instruction_;
371
372  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
373};
374
375class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
376 public:
377  SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor)
378      : instruction_(instruction), successor_(successor) {}
379
380  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
381    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
382    __ Bind(GetEntryLabel());
383    SaveLiveRegisters(codegen, instruction_->GetLocations());
384    arm64_codegen->InvokeRuntime(
385        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
386    CheckEntrypointTypes<kQuickTestSuspend, void, void>();
387    RestoreLiveRegisters(codegen, instruction_->GetLocations());
388    if (successor_ == nullptr) {
389      __ B(GetReturnLabel());
390    } else {
391      __ B(arm64_codegen->GetLabelOf(successor_));
392    }
393  }
394
395  vixl::Label* GetReturnLabel() {
396    DCHECK(successor_ == nullptr);
397    return &return_label_;
398  }
399
400  HBasicBlock* GetSuccessor() const {
401    return successor_;
402  }
403
404  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; }
405
406 private:
407  HSuspendCheck* const instruction_;
408  // If not null, the block to branch to after the suspend check.
409  HBasicBlock* const successor_;
410
411  // If `successor_` is null, the label to branch to after the suspend check.
412  vixl::Label return_label_;
413
414  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
415};
416
417class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
418 public:
419  TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal)
420      : instruction_(instruction), is_fatal_(is_fatal) {}
421
422  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
423    LocationSummary* locations = instruction_->GetLocations();
424    Location class_to_check = locations->InAt(1);
425    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
426                                                        : locations->Out();
427    DCHECK(instruction_->IsCheckCast()
428           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
429    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
430    uint32_t dex_pc = instruction_->GetDexPc();
431
432    __ Bind(GetEntryLabel());
433
434    if (instruction_->IsCheckCast()) {
435      // The codegen for the instruction overwrites `temp`, so put it back in place.
436      Register obj = InputRegisterAt(instruction_, 0);
437      Register temp = WRegisterFrom(locations->GetTemp(0));
438      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
439      __ Ldr(temp, HeapOperand(obj, class_offset));
440      arm64_codegen->GetAssembler()->MaybeUnpoisonHeapReference(temp);
441    }
442
443    if (!is_fatal_) {
444      SaveLiveRegisters(codegen, locations);
445    }
446
447    // We're moving two locations to locations that could overlap, so we need a parallel
448    // move resolver.
449    InvokeRuntimeCallingConvention calling_convention;
450    codegen->EmitParallelMoves(
451        class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
452        object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
453
454    if (instruction_->IsInstanceOf()) {
455      arm64_codegen->InvokeRuntime(
456          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc, this);
457      Primitive::Type ret_type = instruction_->GetType();
458      Location ret_loc = calling_convention.GetReturnLocation(ret_type);
459      arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
460      CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t,
461                           const mirror::Class*, const mirror::Class*>();
462    } else {
463      DCHECK(instruction_->IsCheckCast());
464      arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
465      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
466    }
467
468    if (!is_fatal_) {
469      RestoreLiveRegisters(codegen, locations);
470      __ B(GetExitLabel());
471    }
472  }
473
474  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
475  bool IsFatal() const { return is_fatal_; }
476
477 private:
478  HInstruction* const instruction_;
479  const bool is_fatal_;
480
481  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
482};
483
484class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
485 public:
486  explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
487      : instruction_(instruction) {}
488
489  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
490    __ Bind(GetEntryLabel());
491    SaveLiveRegisters(codegen, instruction_->GetLocations());
492    DCHECK(instruction_->IsDeoptimize());
493    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
494    uint32_t dex_pc = deoptimize->GetDexPc();
495    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
496    arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
497  }
498
499  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
500
501 private:
502  HInstruction* const instruction_;
503  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
504};
505
506class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
507 public:
508  explicit ArraySetSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {}
509
510  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
511    LocationSummary* locations = instruction_->GetLocations();
512    __ Bind(GetEntryLabel());
513    SaveLiveRegisters(codegen, locations);
514
515    InvokeRuntimeCallingConvention calling_convention;
516    HParallelMove parallel_move(codegen->GetGraph()->GetArena());
517    parallel_move.AddMove(
518        locations->InAt(0),
519        LocationFrom(calling_convention.GetRegisterAt(0)),
520        Primitive::kPrimNot,
521        nullptr);
522    parallel_move.AddMove(
523        locations->InAt(1),
524        LocationFrom(calling_convention.GetRegisterAt(1)),
525        Primitive::kPrimInt,
526        nullptr);
527    parallel_move.AddMove(
528        locations->InAt(2),
529        LocationFrom(calling_convention.GetRegisterAt(2)),
530        Primitive::kPrimNot,
531        nullptr);
532    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
533
534    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
535    arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
536                                 instruction_,
537                                 instruction_->GetDexPc(),
538                                 this);
539    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
540    RestoreLiveRegisters(codegen, locations);
541    __ B(GetExitLabel());
542  }
543
544  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; }
545
546 private:
547  HInstruction* const instruction_;
548
549  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64);
550};
551
552void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) {
553  uint32_t num_entries = switch_instr_->GetNumEntries();
554  DCHECK_GE(num_entries, kPackedSwitchJumpTableThreshold);
555
556  // We are about to use the assembler to place literals directly. Make sure we have enough
557  // underlying code buffer and we have generated the jump table with right size.
558  CodeBufferCheckScope scope(codegen->GetVIXLAssembler(), num_entries * sizeof(int32_t),
559                             CodeBufferCheckScope::kCheck, CodeBufferCheckScope::kExactSize);
560
561  __ Bind(&table_start_);
562  const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
563  for (uint32_t i = 0; i < num_entries; i++) {
564    vixl::Label* target_label = codegen->GetLabelOf(successors[i]);
565    DCHECK(target_label->IsBound());
566    ptrdiff_t jump_offset = target_label->location() - table_start_.location();
567    DCHECK_GT(jump_offset, std::numeric_limits<int32_t>::min());
568    DCHECK_LE(jump_offset, std::numeric_limits<int32_t>::max());
569    Literal<int32_t> literal(jump_offset);
570    __ place(&literal);
571  }
572}
573
574#undef __
575
576Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type type) {
577  Location next_location;
578  if (type == Primitive::kPrimVoid) {
579    LOG(FATAL) << "Unreachable type " << type;
580  }
581
582  if (Primitive::IsFloatingPointType(type) &&
583      (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
584    next_location = LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++));
585  } else if (!Primitive::IsFloatingPointType(type) &&
586             (gp_index_ < calling_convention.GetNumberOfRegisters())) {
587    next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
588  } else {
589    size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
590    next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
591                                                 : Location::StackSlot(stack_offset);
592  }
593
594  // Space on the stack is reserved for all arguments.
595  stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
596  return next_location;
597}
598
599Location InvokeDexCallingConventionVisitorARM64::GetMethodLocation() const {
600  return LocationFrom(kArtMethodRegister);
601}
602
603CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
604                                       const Arm64InstructionSetFeatures& isa_features,
605                                       const CompilerOptions& compiler_options,
606                                       OptimizingCompilerStats* stats)
607    : CodeGenerator(graph,
608                    kNumberOfAllocatableRegisters,
609                    kNumberOfAllocatableFPRegisters,
610                    kNumberOfAllocatableRegisterPairs,
611                    callee_saved_core_registers.list(),
612                    callee_saved_fp_registers.list(),
613                    compiler_options,
614                    stats),
615      block_labels_(nullptr),
616      jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
617      location_builder_(graph, this),
618      instruction_visitor_(graph, this),
619      move_resolver_(graph->GetArena(), this),
620      isa_features_(isa_features),
621      uint64_literals_(std::less<uint64_t>(),
622                       graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
623      method_patches_(MethodReferenceComparator(),
624                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
625      call_patches_(MethodReferenceComparator(),
626                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
627      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
628      pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
629  // Save the link register (containing the return address) to mimic Quick.
630  AddAllocatedRegister(LocationFrom(lr));
631}
632
633#define __ GetVIXLAssembler()->
634
635void CodeGeneratorARM64::EmitJumpTables() {
636  for (auto jump_table : jump_tables_) {
637    jump_table->EmitTable(this);
638  }
639}
640
641void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
642  EmitJumpTables();
643  // Ensure we emit the literal pool.
644  __ FinalizeCode();
645
646  CodeGenerator::Finalize(allocator);
647}
648
649void ParallelMoveResolverARM64::PrepareForEmitNativeCode() {
650  // Note: There are 6 kinds of moves:
651  // 1. constant -> GPR/FPR (non-cycle)
652  // 2. constant -> stack (non-cycle)
653  // 3. GPR/FPR -> GPR/FPR
654  // 4. GPR/FPR -> stack
655  // 5. stack -> GPR/FPR
656  // 6. stack -> stack (non-cycle)
657  // Case 1, 2 and 6 should never be included in a dependency cycle on ARM64. For case 3, 4, and 5
658  // VIXL uses at most 1 GPR. VIXL has 2 GPR and 1 FPR temps, and there should be no intersecting
659  // cycles on ARM64, so we always have 1 GPR and 1 FPR available VIXL temps to resolve the
660  // dependency.
661  vixl_temps_.Open(GetVIXLAssembler());
662}
663
664void ParallelMoveResolverARM64::FinishEmitNativeCode() {
665  vixl_temps_.Close();
666}
667
668Location ParallelMoveResolverARM64::AllocateScratchLocationFor(Location::Kind kind) {
669  DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister ||
670         kind == Location::kStackSlot || kind == Location::kDoubleStackSlot);
671  kind = (kind == Location::kFpuRegister) ? Location::kFpuRegister : Location::kRegister;
672  Location scratch = GetScratchLocation(kind);
673  if (!scratch.Equals(Location::NoLocation())) {
674    return scratch;
675  }
676  // Allocate from VIXL temp registers.
677  if (kind == Location::kRegister) {
678    scratch = LocationFrom(vixl_temps_.AcquireX());
679  } else {
680    DCHECK(kind == Location::kFpuRegister);
681    scratch = LocationFrom(vixl_temps_.AcquireD());
682  }
683  AddScratchLocation(scratch);
684  return scratch;
685}
686
687void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
688  if (loc.IsRegister()) {
689    vixl_temps_.Release(XRegisterFrom(loc));
690  } else {
691    DCHECK(loc.IsFpuRegister());
692    vixl_temps_.Release(DRegisterFrom(loc));
693  }
694  RemoveScratchLocation(loc);
695}
696
697void ParallelMoveResolverARM64::EmitMove(size_t index) {
698  MoveOperands* move = moves_[index];
699  codegen_->MoveLocation(move->GetDestination(), move->GetSource(), Primitive::kPrimVoid);
700}
701
702void CodeGeneratorARM64::GenerateFrameEntry() {
703  MacroAssembler* masm = GetVIXLAssembler();
704  BlockPoolsScope block_pools(masm);
705  __ Bind(&frame_entry_label_);
706
707  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
708  if (do_overflow_check) {
709    UseScratchRegisterScope temps(masm);
710    Register temp = temps.AcquireX();
711    DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
712    __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
713    __ Ldr(wzr, MemOperand(temp, 0));
714    RecordPcInfo(nullptr, 0);
715  }
716
717  if (!HasEmptyFrame()) {
718    int frame_size = GetFrameSize();
719    // Stack layout:
720    //      sp[frame_size - 8]        : lr.
721    //      ...                       : other preserved core registers.
722    //      ...                       : other preserved fp registers.
723    //      ...                       : reserved frame space.
724    //      sp[0]                     : current method.
725    __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
726    GetAssembler()->cfi().AdjustCFAOffset(frame_size);
727    GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
728        frame_size - GetCoreSpillSize());
729    GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
730        frame_size - FrameEntrySpillSize());
731  }
732}
733
734void CodeGeneratorARM64::GenerateFrameExit() {
735  BlockPoolsScope block_pools(GetVIXLAssembler());
736  GetAssembler()->cfi().RememberState();
737  if (!HasEmptyFrame()) {
738    int frame_size = GetFrameSize();
739    GetAssembler()->UnspillRegisters(GetFramePreservedFPRegisters(),
740        frame_size - FrameEntrySpillSize());
741    GetAssembler()->UnspillRegisters(GetFramePreservedCoreRegisters(),
742        frame_size - GetCoreSpillSize());
743    __ Drop(frame_size);
744    GetAssembler()->cfi().AdjustCFAOffset(-frame_size);
745  }
746  __ Ret();
747  GetAssembler()->cfi().RestoreState();
748  GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
749}
750
751vixl::CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
752  DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0));
753  return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
754                          core_spill_mask_);
755}
756
757vixl::CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
758  DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
759                                         GetNumberOfFloatingPointRegisters()));
760  return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
761                          fpu_spill_mask_);
762}
763
764void CodeGeneratorARM64::Bind(HBasicBlock* block) {
765  __ Bind(GetLabelOf(block));
766}
767
768void CodeGeneratorARM64::Move(HInstruction* instruction,
769                              Location location,
770                              HInstruction* move_for) {
771  LocationSummary* locations = instruction->GetLocations();
772  Primitive::Type type = instruction->GetType();
773  DCHECK_NE(type, Primitive::kPrimVoid);
774
775  if (instruction->IsFakeString()) {
776    // The fake string is an alias for null.
777    DCHECK(IsBaseline());
778    instruction = locations->Out().GetConstant();
779    DCHECK(instruction->IsNullConstant()) << instruction->DebugName();
780  }
781
782  if (instruction->IsCurrentMethod()) {
783    MoveLocation(location,
784                 Location::DoubleStackSlot(kCurrentMethodStackOffset),
785                 Primitive::kPrimVoid);
786  } else if (locations != nullptr && locations->Out().Equals(location)) {
787    return;
788  } else if (instruction->IsIntConstant()
789             || instruction->IsLongConstant()
790             || instruction->IsNullConstant()) {
791    int64_t value = GetInt64ValueOf(instruction->AsConstant());
792    if (location.IsRegister()) {
793      Register dst = RegisterFrom(location, type);
794      DCHECK(((instruction->IsIntConstant() || instruction->IsNullConstant()) && dst.Is32Bits()) ||
795             (instruction->IsLongConstant() && dst.Is64Bits()));
796      __ Mov(dst, value);
797    } else {
798      DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
799      UseScratchRegisterScope temps(GetVIXLAssembler());
800      Register temp = (instruction->IsIntConstant() || instruction->IsNullConstant())
801          ? temps.AcquireW()
802          : temps.AcquireX();
803      __ Mov(temp, value);
804      __ Str(temp, StackOperandFrom(location));
805    }
806  } else if (instruction->IsTemporary()) {
807    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
808    MoveLocation(location, temp_location, type);
809  } else if (instruction->IsLoadLocal()) {
810    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
811    if (Primitive::Is64BitType(type)) {
812      MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
813    } else {
814      MoveLocation(location, Location::StackSlot(stack_slot), type);
815    }
816
817  } else {
818    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
819    MoveLocation(location, locations->Out(), type);
820  }
821}
822
823void CodeGeneratorARM64::MoveConstant(Location location, int32_t value) {
824  DCHECK(location.IsRegister());
825  __ Mov(RegisterFrom(location, Primitive::kPrimInt), value);
826}
827
828void CodeGeneratorARM64::AddLocationAsTemp(Location location, LocationSummary* locations) {
829  if (location.IsRegister()) {
830    locations->AddTemp(location);
831  } else {
832    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
833  }
834}
835
836Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
837  Primitive::Type type = load->GetType();
838
839  switch (type) {
840    case Primitive::kPrimNot:
841    case Primitive::kPrimInt:
842    case Primitive::kPrimFloat:
843      return Location::StackSlot(GetStackSlot(load->GetLocal()));
844
845    case Primitive::kPrimLong:
846    case Primitive::kPrimDouble:
847      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
848
849    case Primitive::kPrimBoolean:
850    case Primitive::kPrimByte:
851    case Primitive::kPrimChar:
852    case Primitive::kPrimShort:
853    case Primitive::kPrimVoid:
854      LOG(FATAL) << "Unexpected type " << type;
855  }
856
857  LOG(FATAL) << "Unreachable";
858  return Location::NoLocation();
859}
860
861void CodeGeneratorARM64::MarkGCCard(Register object, Register value, bool value_can_be_null) {
862  UseScratchRegisterScope temps(GetVIXLAssembler());
863  Register card = temps.AcquireX();
864  Register temp = temps.AcquireW();   // Index within the CardTable - 32bit.
865  vixl::Label done;
866  if (value_can_be_null) {
867    __ Cbz(value, &done);
868  }
869  __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
870  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
871  __ Strb(card, MemOperand(card, temp.X()));
872  if (value_can_be_null) {
873    __ Bind(&done);
874  }
875}
876
877void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const {
878  // Blocked core registers:
879  //      lr        : Runtime reserved.
880  //      tr        : Runtime reserved.
881  //      xSuspend  : Runtime reserved. TODO: Unblock this when the runtime stops using it.
882  //      ip1       : VIXL core temp.
883  //      ip0       : VIXL core temp.
884  //
885  // Blocked fp registers:
886  //      d31       : VIXL fp temp.
887  CPURegList reserved_core_registers = vixl_reserved_core_registers;
888  reserved_core_registers.Combine(runtime_reserved_core_registers);
889  while (!reserved_core_registers.IsEmpty()) {
890    blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
891  }
892
893  CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
894  while (!reserved_fp_registers.IsEmpty()) {
895    blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
896  }
897
898  if (is_baseline) {
899    CPURegList reserved_core_baseline_registers = callee_saved_core_registers;
900    while (!reserved_core_baseline_registers.IsEmpty()) {
901      blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true;
902    }
903  }
904
905  if (is_baseline || GetGraph()->IsDebuggable()) {
906    // Stubs do not save callee-save floating point registers. If the graph
907    // is debuggable, we need to deal with these registers differently. For
908    // now, just block them.
909    CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers;
910    while (!reserved_fp_baseline_registers.IsEmpty()) {
911      blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true;
912    }
913  }
914}
915
916Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
917  if (type == Primitive::kPrimVoid) {
918    LOG(FATAL) << "Unreachable type " << type;
919  }
920
921  if (Primitive::IsFloatingPointType(type)) {
922    ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
923    DCHECK_NE(reg, -1);
924    return Location::FpuRegisterLocation(reg);
925  } else {
926    ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
927    DCHECK_NE(reg, -1);
928    return Location::RegisterLocation(reg);
929  }
930}
931
932size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
933  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
934  __ Str(reg, MemOperand(sp, stack_index));
935  return kArm64WordSize;
936}
937
938size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
939  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
940  __ Ldr(reg, MemOperand(sp, stack_index));
941  return kArm64WordSize;
942}
943
944size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
945  FPRegister reg = FPRegister(reg_id, kDRegSize);
946  __ Str(reg, MemOperand(sp, stack_index));
947  return kArm64WordSize;
948}
949
950size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
951  FPRegister reg = FPRegister(reg_id, kDRegSize);
952  __ Ldr(reg, MemOperand(sp, stack_index));
953  return kArm64WordSize;
954}
955
956void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
957  stream << XRegister(reg);
958}
959
960void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
961  stream << DRegister(reg);
962}
963
964void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
965  if (constant->IsIntConstant()) {
966    __ Mov(Register(destination), constant->AsIntConstant()->GetValue());
967  } else if (constant->IsLongConstant()) {
968    __ Mov(Register(destination), constant->AsLongConstant()->GetValue());
969  } else if (constant->IsNullConstant()) {
970    __ Mov(Register(destination), 0);
971  } else if (constant->IsFloatConstant()) {
972    __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
973  } else {
974    DCHECK(constant->IsDoubleConstant());
975    __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
976  }
977}
978
979
980static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
981  DCHECK(constant.IsConstant());
982  HConstant* cst = constant.GetConstant();
983  return (cst->IsIntConstant() && type == Primitive::kPrimInt) ||
984         // Null is mapped to a core W register, which we associate with kPrimInt.
985         (cst->IsNullConstant() && type == Primitive::kPrimInt) ||
986         (cst->IsLongConstant() && type == Primitive::kPrimLong) ||
987         (cst->IsFloatConstant() && type == Primitive::kPrimFloat) ||
988         (cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
989}
990
991void CodeGeneratorARM64::MoveLocation(Location destination,
992                                      Location source,
993                                      Primitive::Type dst_type) {
994  if (source.Equals(destination)) {
995    return;
996  }
997
998  // A valid move can always be inferred from the destination and source
999  // locations. When moving from and to a register, the argument type can be
1000  // used to generate 32bit instead of 64bit moves. In debug mode we also
1001  // checks the coherency of the locations and the type.
1002  bool unspecified_type = (dst_type == Primitive::kPrimVoid);
1003
1004  if (destination.IsRegister() || destination.IsFpuRegister()) {
1005    if (unspecified_type) {
1006      HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
1007      if (source.IsStackSlot() ||
1008          (src_cst != nullptr && (src_cst->IsIntConstant()
1009                                  || src_cst->IsFloatConstant()
1010                                  || src_cst->IsNullConstant()))) {
1011        // For stack slots and 32bit constants, a 64bit type is appropriate.
1012        dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
1013      } else {
1014        // If the source is a double stack slot or a 64bit constant, a 64bit
1015        // type is appropriate. Else the source is a register, and since the
1016        // type has not been specified, we chose a 64bit type to force a 64bit
1017        // move.
1018        dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
1019      }
1020    }
1021    DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
1022           (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
1023    CPURegister dst = CPURegisterFrom(destination, dst_type);
1024    if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
1025      DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
1026      __ Ldr(dst, StackOperandFrom(source));
1027    } else if (source.IsConstant()) {
1028      DCHECK(CoherentConstantAndType(source, dst_type));
1029      MoveConstant(dst, source.GetConstant());
1030    } else if (source.IsRegister()) {
1031      if (destination.IsRegister()) {
1032        __ Mov(Register(dst), RegisterFrom(source, dst_type));
1033      } else {
1034        DCHECK(destination.IsFpuRegister());
1035        Primitive::Type source_type = Primitive::Is64BitType(dst_type)
1036            ? Primitive::kPrimLong
1037            : Primitive::kPrimInt;
1038        __ Fmov(FPRegisterFrom(destination, dst_type), RegisterFrom(source, source_type));
1039      }
1040    } else {
1041      DCHECK(source.IsFpuRegister());
1042      if (destination.IsRegister()) {
1043        Primitive::Type source_type = Primitive::Is64BitType(dst_type)
1044            ? Primitive::kPrimDouble
1045            : Primitive::kPrimFloat;
1046        __ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type));
1047      } else {
1048        DCHECK(destination.IsFpuRegister());
1049        __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
1050      }
1051    }
1052  } else {  // The destination is not a register. It must be a stack slot.
1053    DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
1054    if (source.IsRegister() || source.IsFpuRegister()) {
1055      if (unspecified_type) {
1056        if (source.IsRegister()) {
1057          dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
1058        } else {
1059          dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
1060        }
1061      }
1062      DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
1063             (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
1064      __ Str(CPURegisterFrom(source, dst_type), StackOperandFrom(destination));
1065    } else if (source.IsConstant()) {
1066      DCHECK(unspecified_type || CoherentConstantAndType(source, dst_type))
1067          << source << " " << dst_type;
1068      UseScratchRegisterScope temps(GetVIXLAssembler());
1069      HConstant* src_cst = source.GetConstant();
1070      CPURegister temp;
1071      if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) {
1072        temp = temps.AcquireW();
1073      } else if (src_cst->IsLongConstant()) {
1074        temp = temps.AcquireX();
1075      } else if (src_cst->IsFloatConstant()) {
1076        temp = temps.AcquireS();
1077      } else {
1078        DCHECK(src_cst->IsDoubleConstant());
1079        temp = temps.AcquireD();
1080      }
1081      MoveConstant(temp, src_cst);
1082      __ Str(temp, StackOperandFrom(destination));
1083    } else {
1084      DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
1085      DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
1086      UseScratchRegisterScope temps(GetVIXLAssembler());
1087      // There is generally less pressure on FP registers.
1088      FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
1089      __ Ldr(temp, StackOperandFrom(source));
1090      __ Str(temp, StackOperandFrom(destination));
1091    }
1092  }
1093}
1094
1095void CodeGeneratorARM64::Load(Primitive::Type type,
1096                              CPURegister dst,
1097                              const MemOperand& src) {
1098  switch (type) {
1099    case Primitive::kPrimBoolean:
1100      __ Ldrb(Register(dst), src);
1101      break;
1102    case Primitive::kPrimByte:
1103      __ Ldrsb(Register(dst), src);
1104      break;
1105    case Primitive::kPrimShort:
1106      __ Ldrsh(Register(dst), src);
1107      break;
1108    case Primitive::kPrimChar:
1109      __ Ldrh(Register(dst), src);
1110      break;
1111    case Primitive::kPrimInt:
1112    case Primitive::kPrimNot:
1113    case Primitive::kPrimLong:
1114    case Primitive::kPrimFloat:
1115    case Primitive::kPrimDouble:
1116      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1117      __ Ldr(dst, src);
1118      break;
1119    case Primitive::kPrimVoid:
1120      LOG(FATAL) << "Unreachable type " << type;
1121  }
1122}
1123
1124void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
1125                                     CPURegister dst,
1126                                     const MemOperand& src) {
1127  MacroAssembler* masm = GetVIXLAssembler();
1128  BlockPoolsScope block_pools(masm);
1129  UseScratchRegisterScope temps(masm);
1130  Register temp_base = temps.AcquireX();
1131  Primitive::Type type = instruction->GetType();
1132
1133  DCHECK(!src.IsPreIndex());
1134  DCHECK(!src.IsPostIndex());
1135
1136  // TODO(vixl): Let the MacroAssembler handle MemOperand.
1137  __ Add(temp_base, src.base(), OperandFromMemOperand(src));
1138  MemOperand base = MemOperand(temp_base);
1139  switch (type) {
1140    case Primitive::kPrimBoolean:
1141      __ Ldarb(Register(dst), base);
1142      MaybeRecordImplicitNullCheck(instruction);
1143      break;
1144    case Primitive::kPrimByte:
1145      __ Ldarb(Register(dst), base);
1146      MaybeRecordImplicitNullCheck(instruction);
1147      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
1148      break;
1149    case Primitive::kPrimChar:
1150      __ Ldarh(Register(dst), base);
1151      MaybeRecordImplicitNullCheck(instruction);
1152      break;
1153    case Primitive::kPrimShort:
1154      __ Ldarh(Register(dst), base);
1155      MaybeRecordImplicitNullCheck(instruction);
1156      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
1157      break;
1158    case Primitive::kPrimInt:
1159    case Primitive::kPrimNot:
1160    case Primitive::kPrimLong:
1161      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1162      __ Ldar(Register(dst), base);
1163      MaybeRecordImplicitNullCheck(instruction);
1164      break;
1165    case Primitive::kPrimFloat:
1166    case Primitive::kPrimDouble: {
1167      DCHECK(dst.IsFPRegister());
1168      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1169
1170      Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1171      __ Ldar(temp, base);
1172      MaybeRecordImplicitNullCheck(instruction);
1173      __ Fmov(FPRegister(dst), temp);
1174      break;
1175    }
1176    case Primitive::kPrimVoid:
1177      LOG(FATAL) << "Unreachable type " << type;
1178  }
1179}
1180
1181void CodeGeneratorARM64::Store(Primitive::Type type,
1182                               CPURegister src,
1183                               const MemOperand& dst) {
1184  switch (type) {
1185    case Primitive::kPrimBoolean:
1186    case Primitive::kPrimByte:
1187      __ Strb(Register(src), dst);
1188      break;
1189    case Primitive::kPrimChar:
1190    case Primitive::kPrimShort:
1191      __ Strh(Register(src), dst);
1192      break;
1193    case Primitive::kPrimInt:
1194    case Primitive::kPrimNot:
1195    case Primitive::kPrimLong:
1196    case Primitive::kPrimFloat:
1197    case Primitive::kPrimDouble:
1198      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1199      __ Str(src, dst);
1200      break;
1201    case Primitive::kPrimVoid:
1202      LOG(FATAL) << "Unreachable type " << type;
1203  }
1204}
1205
1206void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
1207                                      CPURegister src,
1208                                      const MemOperand& dst) {
1209  UseScratchRegisterScope temps(GetVIXLAssembler());
1210  Register temp_base = temps.AcquireX();
1211
1212  DCHECK(!dst.IsPreIndex());
1213  DCHECK(!dst.IsPostIndex());
1214
1215  // TODO(vixl): Let the MacroAssembler handle this.
1216  Operand op = OperandFromMemOperand(dst);
1217  __ Add(temp_base, dst.base(), op);
1218  MemOperand base = MemOperand(temp_base);
1219  switch (type) {
1220    case Primitive::kPrimBoolean:
1221    case Primitive::kPrimByte:
1222      __ Stlrb(Register(src), base);
1223      break;
1224    case Primitive::kPrimChar:
1225    case Primitive::kPrimShort:
1226      __ Stlrh(Register(src), base);
1227      break;
1228    case Primitive::kPrimInt:
1229    case Primitive::kPrimNot:
1230    case Primitive::kPrimLong:
1231      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1232      __ Stlr(Register(src), base);
1233      break;
1234    case Primitive::kPrimFloat:
1235    case Primitive::kPrimDouble: {
1236      DCHECK(src.IsFPRegister());
1237      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1238
1239      Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1240      __ Fmov(temp, FPRegister(src));
1241      __ Stlr(temp, base);
1242      break;
1243    }
1244    case Primitive::kPrimVoid:
1245      LOG(FATAL) << "Unreachable type " << type;
1246  }
1247}
1248
1249void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint,
1250                                       HInstruction* instruction,
1251                                       uint32_t dex_pc,
1252                                       SlowPathCode* slow_path) {
1253  InvokeRuntime(GetThreadOffset<kArm64WordSize>(entrypoint).Int32Value(),
1254                instruction,
1255                dex_pc,
1256                slow_path);
1257}
1258
1259void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
1260                                       HInstruction* instruction,
1261                                       uint32_t dex_pc,
1262                                       SlowPathCode* slow_path) {
1263  ValidateInvokeRuntime(instruction, slow_path);
1264  BlockPoolsScope block_pools(GetVIXLAssembler());
1265  __ Ldr(lr, MemOperand(tr, entry_point_offset));
1266  __ Blr(lr);
1267  RecordPcInfo(instruction, dex_pc, slow_path);
1268}
1269
1270void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
1271                                                                     vixl::Register class_reg) {
1272  UseScratchRegisterScope temps(GetVIXLAssembler());
1273  Register temp = temps.AcquireW();
1274  size_t status_offset = mirror::Class::StatusOffset().SizeValue();
1275  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1276
1277  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
1278  if (use_acquire_release) {
1279    // TODO(vixl): Let the MacroAssembler handle MemOperand.
1280    __ Add(temp, class_reg, status_offset);
1281    __ Ldar(temp, HeapOperand(temp));
1282    __ Cmp(temp, mirror::Class::kStatusInitialized);
1283    __ B(lt, slow_path->GetEntryLabel());
1284  } else {
1285    __ Ldr(temp, HeapOperand(class_reg, status_offset));
1286    __ Cmp(temp, mirror::Class::kStatusInitialized);
1287    __ B(lt, slow_path->GetEntryLabel());
1288    __ Dmb(InnerShareable, BarrierReads);
1289  }
1290  __ Bind(slow_path->GetExitLabel());
1291}
1292
1293void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) {
1294  BarrierType type = BarrierAll;
1295
1296  switch (kind) {
1297    case MemBarrierKind::kAnyAny:
1298    case MemBarrierKind::kAnyStore: {
1299      type = BarrierAll;
1300      break;
1301    }
1302    case MemBarrierKind::kLoadAny: {
1303      type = BarrierReads;
1304      break;
1305    }
1306    case MemBarrierKind::kStoreStore: {
1307      type = BarrierWrites;
1308      break;
1309    }
1310    default:
1311      LOG(FATAL) << "Unexpected memory barrier " << kind;
1312  }
1313  __ Dmb(InnerShareable, type);
1314}
1315
1316void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
1317                                                         HBasicBlock* successor) {
1318  SuspendCheckSlowPathARM64* slow_path =
1319      down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
1320  if (slow_path == nullptr) {
1321    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
1322    instruction->SetSlowPath(slow_path);
1323    codegen_->AddSlowPath(slow_path);
1324    if (successor != nullptr) {
1325      DCHECK(successor->IsLoopHeader());
1326      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
1327    }
1328  } else {
1329    DCHECK_EQ(slow_path->GetSuccessor(), successor);
1330  }
1331
1332  UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
1333  Register temp = temps.AcquireW();
1334
1335  __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
1336  if (successor == nullptr) {
1337    __ Cbnz(temp, slow_path->GetEntryLabel());
1338    __ Bind(slow_path->GetReturnLabel());
1339  } else {
1340    __ Cbz(temp, codegen_->GetLabelOf(successor));
1341    __ B(slow_path->GetEntryLabel());
1342    // slow_path will return to GetLabelOf(successor).
1343  }
1344}
1345
1346InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
1347                                                             CodeGeneratorARM64* codegen)
1348      : HGraphVisitor(graph),
1349        assembler_(codegen->GetAssembler()),
1350        codegen_(codegen) {}
1351
1352#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)              \
1353  /* No unimplemented IR. */
1354
1355#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
1356
1357enum UnimplementedInstructionBreakCode {
1358  // Using a base helps identify when we hit such breakpoints.
1359  UnimplementedInstructionBreakCodeBaseCode = 0x900,
1360#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
1361  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
1362#undef ENUM_UNIMPLEMENTED_INSTRUCTION
1363};
1364
1365#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name)                               \
1366  void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) {  \
1367    __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name));                               \
1368  }                                                                                   \
1369  void LocationsBuilderARM64::Visit##name(H##name* instr) {                           \
1370    LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
1371    locations->SetOut(Location::Any());                                               \
1372  }
1373  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
1374#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
1375
1376#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
1377#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
1378
1379void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
1380  DCHECK_EQ(instr->InputCount(), 2U);
1381  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1382  Primitive::Type type = instr->GetResultType();
1383  switch (type) {
1384    case Primitive::kPrimInt:
1385    case Primitive::kPrimLong:
1386      locations->SetInAt(0, Location::RequiresRegister());
1387      locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr));
1388      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1389      break;
1390
1391    case Primitive::kPrimFloat:
1392    case Primitive::kPrimDouble:
1393      locations->SetInAt(0, Location::RequiresFpuRegister());
1394      locations->SetInAt(1, Location::RequiresFpuRegister());
1395      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1396      break;
1397
1398    default:
1399      LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
1400  }
1401}
1402
1403void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
1404  LocationSummary* locations =
1405      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1406  locations->SetInAt(0, Location::RequiresRegister());
1407  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1408    locations->SetOut(Location::RequiresFpuRegister());
1409  } else {
1410    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1411  }
1412}
1413
1414void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
1415                                                   const FieldInfo& field_info) {
1416  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
1417  Primitive::Type field_type = field_info.GetFieldType();
1418  BlockPoolsScope block_pools(GetVIXLAssembler());
1419
1420  MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
1421  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1422
1423  if (field_info.IsVolatile()) {
1424    if (use_acquire_release) {
1425      // NB: LoadAcquire will record the pc info if needed.
1426      codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
1427    } else {
1428      codegen_->Load(field_type, OutputCPURegister(instruction), field);
1429      codegen_->MaybeRecordImplicitNullCheck(instruction);
1430      // For IRIW sequential consistency kLoadAny is not sufficient.
1431      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1432    }
1433  } else {
1434    codegen_->Load(field_type, OutputCPURegister(instruction), field);
1435    codegen_->MaybeRecordImplicitNullCheck(instruction);
1436  }
1437
1438  if (field_type == Primitive::kPrimNot) {
1439    GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W());
1440  }
1441}
1442
1443void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
1444  LocationSummary* locations =
1445      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1446  locations->SetInAt(0, Location::RequiresRegister());
1447  if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
1448    locations->SetInAt(1, Location::RequiresFpuRegister());
1449  } else {
1450    locations->SetInAt(1, Location::RequiresRegister());
1451  }
1452}
1453
1454void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
1455                                                   const FieldInfo& field_info,
1456                                                   bool value_can_be_null) {
1457  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
1458  BlockPoolsScope block_pools(GetVIXLAssembler());
1459
1460  Register obj = InputRegisterAt(instruction, 0);
1461  CPURegister value = InputCPURegisterAt(instruction, 1);
1462  CPURegister source = value;
1463  Offset offset = field_info.GetFieldOffset();
1464  Primitive::Type field_type = field_info.GetFieldType();
1465  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1466
1467  {
1468    // We use a block to end the scratch scope before the write barrier, thus
1469    // freeing the temporary registers so they can be used in `MarkGCCard`.
1470    UseScratchRegisterScope temps(GetVIXLAssembler());
1471
1472    if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) {
1473      DCHECK(value.IsW());
1474      Register temp = temps.AcquireW();
1475      __ Mov(temp, value.W());
1476      GetAssembler()->PoisonHeapReference(temp.W());
1477      source = temp;
1478    }
1479
1480    if (field_info.IsVolatile()) {
1481      if (use_acquire_release) {
1482        codegen_->StoreRelease(field_type, source, HeapOperand(obj, offset));
1483        codegen_->MaybeRecordImplicitNullCheck(instruction);
1484      } else {
1485        GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
1486        codegen_->Store(field_type, source, HeapOperand(obj, offset));
1487        codegen_->MaybeRecordImplicitNullCheck(instruction);
1488        GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1489      }
1490    } else {
1491      codegen_->Store(field_type, source, HeapOperand(obj, offset));
1492      codegen_->MaybeRecordImplicitNullCheck(instruction);
1493    }
1494  }
1495
1496  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
1497    codegen_->MarkGCCard(obj, Register(value), value_can_be_null);
1498  }
1499}
1500
1501void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
1502  Primitive::Type type = instr->GetType();
1503
1504  switch (type) {
1505    case Primitive::kPrimInt:
1506    case Primitive::kPrimLong: {
1507      Register dst = OutputRegister(instr);
1508      Register lhs = InputRegisterAt(instr, 0);
1509      Operand rhs = InputOperandAt(instr, 1);
1510      if (instr->IsAdd()) {
1511        __ Add(dst, lhs, rhs);
1512      } else if (instr->IsAnd()) {
1513        __ And(dst, lhs, rhs);
1514      } else if (instr->IsOr()) {
1515        __ Orr(dst, lhs, rhs);
1516      } else if (instr->IsSub()) {
1517        __ Sub(dst, lhs, rhs);
1518      } else {
1519        DCHECK(instr->IsXor());
1520        __ Eor(dst, lhs, rhs);
1521      }
1522      break;
1523    }
1524    case Primitive::kPrimFloat:
1525    case Primitive::kPrimDouble: {
1526      FPRegister dst = OutputFPRegister(instr);
1527      FPRegister lhs = InputFPRegisterAt(instr, 0);
1528      FPRegister rhs = InputFPRegisterAt(instr, 1);
1529      if (instr->IsAdd()) {
1530        __ Fadd(dst, lhs, rhs);
1531      } else if (instr->IsSub()) {
1532        __ Fsub(dst, lhs, rhs);
1533      } else {
1534        LOG(FATAL) << "Unexpected floating-point binary operation";
1535      }
1536      break;
1537    }
1538    default:
1539      LOG(FATAL) << "Unexpected binary operation type " << type;
1540  }
1541}
1542
1543void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
1544  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1545
1546  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1547  Primitive::Type type = instr->GetResultType();
1548  switch (type) {
1549    case Primitive::kPrimInt:
1550    case Primitive::kPrimLong: {
1551      locations->SetInAt(0, Location::RequiresRegister());
1552      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1553      locations->SetOut(Location::RequiresRegister());
1554      break;
1555    }
1556    default:
1557      LOG(FATAL) << "Unexpected shift type " << type;
1558  }
1559}
1560
1561void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) {
1562  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1563
1564  Primitive::Type type = instr->GetType();
1565  switch (type) {
1566    case Primitive::kPrimInt:
1567    case Primitive::kPrimLong: {
1568      Register dst = OutputRegister(instr);
1569      Register lhs = InputRegisterAt(instr, 0);
1570      Operand rhs = InputOperandAt(instr, 1);
1571      if (rhs.IsImmediate()) {
1572        uint32_t shift_value = (type == Primitive::kPrimInt)
1573          ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue)
1574          : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue);
1575        if (instr->IsShl()) {
1576          __ Lsl(dst, lhs, shift_value);
1577        } else if (instr->IsShr()) {
1578          __ Asr(dst, lhs, shift_value);
1579        } else {
1580          __ Lsr(dst, lhs, shift_value);
1581        }
1582      } else {
1583        Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
1584
1585        if (instr->IsShl()) {
1586          __ Lsl(dst, lhs, rhs_reg);
1587        } else if (instr->IsShr()) {
1588          __ Asr(dst, lhs, rhs_reg);
1589        } else {
1590          __ Lsr(dst, lhs, rhs_reg);
1591        }
1592      }
1593      break;
1594    }
1595    default:
1596      LOG(FATAL) << "Unexpected shift operation type " << type;
1597  }
1598}
1599
1600void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
1601  HandleBinaryOp(instruction);
1602}
1603
1604void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
1605  HandleBinaryOp(instruction);
1606}
1607
1608void LocationsBuilderARM64::VisitAnd(HAnd* instruction) {
1609  HandleBinaryOp(instruction);
1610}
1611
1612void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
1613  HandleBinaryOp(instruction);
1614}
1615
1616void LocationsBuilderARM64::VisitArm64IntermediateAddress(HArm64IntermediateAddress* instruction) {
1617  LocationSummary* locations =
1618      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1619  locations->SetInAt(0, Location::RequiresRegister());
1620  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction));
1621  locations->SetOut(Location::RequiresRegister());
1622}
1623
1624void InstructionCodeGeneratorARM64::VisitArm64IntermediateAddress(
1625    HArm64IntermediateAddress* instruction) {
1626  __ Add(OutputRegister(instruction),
1627         InputRegisterAt(instruction, 0),
1628         Operand(InputOperandAt(instruction, 1)));
1629}
1630
1631void LocationsBuilderARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) {
1632  LocationSummary* locations =
1633      new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
1634  locations->SetInAt(HArm64MultiplyAccumulate::kInputAccumulatorIndex,
1635                     Location::RequiresRegister());
1636  locations->SetInAt(HArm64MultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
1637  locations->SetInAt(HArm64MultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister());
1638  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1639}
1640
1641void InstructionCodeGeneratorARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) {
1642  Register res = OutputRegister(instr);
1643  Register accumulator = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputAccumulatorIndex);
1644  Register mul_left = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulLeftIndex);
1645  Register mul_right = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulRightIndex);
1646
1647  // Avoid emitting code that could trigger Cortex A53's erratum 835769.
1648  // This fixup should be carried out for all multiply-accumulate instructions:
1649  // madd, msub, smaddl, smsubl, umaddl and umsubl.
1650  if (instr->GetType() == Primitive::kPrimLong &&
1651      codegen_->GetInstructionSetFeatures().NeedFixCortexA53_835769()) {
1652    MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen_)->GetVIXLAssembler();
1653    vixl::Instruction* prev = masm->GetCursorAddress<vixl::Instruction*>() - vixl::kInstructionSize;
1654    if (prev->IsLoadOrStore()) {
1655      // Make sure we emit only exactly one nop.
1656      vixl::CodeBufferCheckScope scope(masm,
1657                                       vixl::kInstructionSize,
1658                                       vixl::CodeBufferCheckScope::kCheck,
1659                                       vixl::CodeBufferCheckScope::kExactSize);
1660      __ nop();
1661    }
1662  }
1663
1664  if (instr->GetOpKind() == HInstruction::kAdd) {
1665    __ Madd(res, mul_left, mul_right, accumulator);
1666  } else {
1667    DCHECK(instr->GetOpKind() == HInstruction::kSub);
1668    __ Msub(res, mul_left, mul_right, accumulator);
1669  }
1670}
1671
1672void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
1673  LocationSummary* locations =
1674      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1675  locations->SetInAt(0, Location::RequiresRegister());
1676  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1677  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1678    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1679  } else {
1680    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1681  }
1682}
1683
1684void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
1685  Primitive::Type type = instruction->GetType();
1686  Register obj = InputRegisterAt(instruction, 0);
1687  Location index = instruction->GetLocations()->InAt(1);
1688  size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
1689  MemOperand source = HeapOperand(obj);
1690  CPURegister dest = OutputCPURegister(instruction);
1691
1692  MacroAssembler* masm = GetVIXLAssembler();
1693  UseScratchRegisterScope temps(masm);
1694  // Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
1695  BlockPoolsScope block_pools(masm);
1696
1697  if (index.IsConstant()) {
1698    offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
1699    source = HeapOperand(obj, offset);
1700  } else {
1701    Register temp = temps.AcquireSameSizeAs(obj);
1702    if (instruction->GetArray()->IsArm64IntermediateAddress()) {
1703      // We do not need to compute the intermediate address from the array: the
1704      // input instruction has done it already. See the comment in
1705      // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
1706      if (kIsDebugBuild) {
1707        HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
1708        DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
1709      }
1710      temp = obj;
1711    } else {
1712      __ Add(temp, obj, offset);
1713    }
1714    source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
1715  }
1716
1717  codegen_->Load(type, dest, source);
1718  codegen_->MaybeRecordImplicitNullCheck(instruction);
1719
1720  if (instruction->GetType() == Primitive::kPrimNot) {
1721    GetAssembler()->MaybeUnpoisonHeapReference(dest.W());
1722  }
1723}
1724
1725void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
1726  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1727  locations->SetInAt(0, Location::RequiresRegister());
1728  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1729}
1730
1731void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
1732  BlockPoolsScope block_pools(GetVIXLAssembler());
1733  __ Ldr(OutputRegister(instruction),
1734         HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
1735  codegen_->MaybeRecordImplicitNullCheck(instruction);
1736}
1737
1738void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
1739  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1740      instruction,
1741      instruction->NeedsTypeCheck() ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
1742  locations->SetInAt(0, Location::RequiresRegister());
1743  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1744  if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1745    locations->SetInAt(2, Location::RequiresFpuRegister());
1746  } else {
1747    locations->SetInAt(2, Location::RequiresRegister());
1748  }
1749}
1750
1751void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
1752  Primitive::Type value_type = instruction->GetComponentType();
1753  LocationSummary* locations = instruction->GetLocations();
1754  bool may_need_runtime_call = locations->CanCall();
1755  bool needs_write_barrier =
1756      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1757
1758  Register array = InputRegisterAt(instruction, 0);
1759  CPURegister value = InputCPURegisterAt(instruction, 2);
1760  CPURegister source = value;
1761  Location index = locations->InAt(1);
1762  size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
1763  MemOperand destination = HeapOperand(array);
1764  MacroAssembler* masm = GetVIXLAssembler();
1765  BlockPoolsScope block_pools(masm);
1766
1767  if (!needs_write_barrier) {
1768    DCHECK(!may_need_runtime_call);
1769    if (index.IsConstant()) {
1770      offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
1771      destination = HeapOperand(array, offset);
1772    } else {
1773      UseScratchRegisterScope temps(masm);
1774      Register temp = temps.AcquireSameSizeAs(array);
1775      if (instruction->GetArray()->IsArm64IntermediateAddress()) {
1776        // We do not need to compute the intermediate address from the array: the
1777        // input instruction has done it already. See the comment in
1778        // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
1779        if (kIsDebugBuild) {
1780          HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
1781          DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
1782        }
1783        temp = array;
1784      } else {
1785        __ Add(temp, array, offset);
1786      }
1787      destination = HeapOperand(temp,
1788                                XRegisterFrom(index),
1789                                LSL,
1790                                Primitive::ComponentSizeShift(value_type));
1791    }
1792    codegen_->Store(value_type, value, destination);
1793    codegen_->MaybeRecordImplicitNullCheck(instruction);
1794  } else {
1795    DCHECK(needs_write_barrier);
1796    DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress());
1797    vixl::Label done;
1798    SlowPathCodeARM64* slow_path = nullptr;
1799    {
1800      // We use a block to end the scratch scope before the write barrier, thus
1801      // freeing the temporary registers so they can be used in `MarkGCCard`.
1802      UseScratchRegisterScope temps(masm);
1803      Register temp = temps.AcquireSameSizeAs(array);
1804      if (index.IsConstant()) {
1805        offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
1806        destination = HeapOperand(array, offset);
1807      } else {
1808        destination = HeapOperand(temp,
1809                                  XRegisterFrom(index),
1810                                  LSL,
1811                                  Primitive::ComponentSizeShift(value_type));
1812      }
1813
1814      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1815      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
1816      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
1817
1818      if (may_need_runtime_call) {
1819        slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction);
1820        codegen_->AddSlowPath(slow_path);
1821        if (instruction->GetValueCanBeNull()) {
1822          vixl::Label non_zero;
1823          __ Cbnz(Register(value), &non_zero);
1824          if (!index.IsConstant()) {
1825            __ Add(temp, array, offset);
1826          }
1827          __ Str(wzr, destination);
1828          codegen_->MaybeRecordImplicitNullCheck(instruction);
1829          __ B(&done);
1830          __ Bind(&non_zero);
1831        }
1832
1833        Register temp2 = temps.AcquireSameSizeAs(array);
1834        __ Ldr(temp, HeapOperand(array, class_offset));
1835        codegen_->MaybeRecordImplicitNullCheck(instruction);
1836        GetAssembler()->MaybeUnpoisonHeapReference(temp);
1837        __ Ldr(temp, HeapOperand(temp, component_offset));
1838        __ Ldr(temp2, HeapOperand(Register(value), class_offset));
1839        // No need to poison/unpoison, we're comparing two poisoned references.
1840        __ Cmp(temp, temp2);
1841        if (instruction->StaticTypeOfArrayIsObjectArray()) {
1842          vixl::Label do_put;
1843          __ B(eq, &do_put);
1844          GetAssembler()->MaybeUnpoisonHeapReference(temp);
1845          __ Ldr(temp, HeapOperand(temp, super_offset));
1846          // No need to unpoison, we're comparing against null.
1847          __ Cbnz(temp, slow_path->GetEntryLabel());
1848          __ Bind(&do_put);
1849        } else {
1850          __ B(ne, slow_path->GetEntryLabel());
1851        }
1852        temps.Release(temp2);
1853      }
1854
1855      if (kPoisonHeapReferences) {
1856        Register temp2 = temps.AcquireSameSizeAs(array);
1857          DCHECK(value.IsW());
1858        __ Mov(temp2, value.W());
1859        GetAssembler()->PoisonHeapReference(temp2);
1860        source = temp2;
1861      }
1862
1863      if (!index.IsConstant()) {
1864        __ Add(temp, array, offset);
1865      }
1866      __ Str(source, destination);
1867
1868      if (!may_need_runtime_call) {
1869        codegen_->MaybeRecordImplicitNullCheck(instruction);
1870      }
1871    }
1872
1873    codegen_->MarkGCCard(array, value.W(), instruction->GetValueCanBeNull());
1874
1875    if (done.IsLinked()) {
1876      __ Bind(&done);
1877    }
1878
1879    if (slow_path != nullptr) {
1880      __ Bind(slow_path->GetExitLabel());
1881    }
1882  }
1883}
1884
1885void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1886  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1887      ? LocationSummary::kCallOnSlowPath
1888      : LocationSummary::kNoCall;
1889  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
1890  locations->SetInAt(0, Location::RequiresRegister());
1891  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
1892  if (instruction->HasUses()) {
1893    locations->SetOut(Location::SameAsFirstInput());
1894  }
1895}
1896
1897void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1898  BoundsCheckSlowPathARM64* slow_path =
1899      new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction);
1900  codegen_->AddSlowPath(slow_path);
1901
1902  __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
1903  __ B(slow_path->GetEntryLabel(), hs);
1904}
1905
1906void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
1907  LocationSummary* locations =
1908      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1909  locations->SetInAt(0, Location::RequiresRegister());
1910  if (check->HasUses()) {
1911    locations->SetOut(Location::SameAsFirstInput());
1912  }
1913}
1914
1915void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
1916  // We assume the class is not null.
1917  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
1918      check->GetLoadClass(), check, check->GetDexPc(), true);
1919  codegen_->AddSlowPath(slow_path);
1920  GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
1921}
1922
1923static bool IsFloatingPointZeroConstant(HInstruction* instruction) {
1924  return (instruction->IsFloatConstant() && (instruction->AsFloatConstant()->GetValue() == 0.0f))
1925      || (instruction->IsDoubleConstant() && (instruction->AsDoubleConstant()->GetValue() == 0.0));
1926}
1927
1928void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
1929  LocationSummary* locations =
1930      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
1931  Primitive::Type in_type = compare->InputAt(0)->GetType();
1932  switch (in_type) {
1933    case Primitive::kPrimLong: {
1934      locations->SetInAt(0, Location::RequiresRegister());
1935      locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare));
1936      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1937      break;
1938    }
1939    case Primitive::kPrimFloat:
1940    case Primitive::kPrimDouble: {
1941      locations->SetInAt(0, Location::RequiresFpuRegister());
1942      locations->SetInAt(1,
1943                         IsFloatingPointZeroConstant(compare->InputAt(1))
1944                             ? Location::ConstantLocation(compare->InputAt(1)->AsConstant())
1945                             : Location::RequiresFpuRegister());
1946      locations->SetOut(Location::RequiresRegister());
1947      break;
1948    }
1949    default:
1950      LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1951  }
1952}
1953
1954void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) {
1955  Primitive::Type in_type = compare->InputAt(0)->GetType();
1956
1957  //  0 if: left == right
1958  //  1 if: left  > right
1959  // -1 if: left  < right
1960  switch (in_type) {
1961    case Primitive::kPrimLong: {
1962      Register result = OutputRegister(compare);
1963      Register left = InputRegisterAt(compare, 0);
1964      Operand right = InputOperandAt(compare, 1);
1965
1966      __ Cmp(left, right);
1967      __ Cset(result, ne);
1968      __ Cneg(result, result, lt);
1969      break;
1970    }
1971    case Primitive::kPrimFloat:
1972    case Primitive::kPrimDouble: {
1973      Register result = OutputRegister(compare);
1974      FPRegister left = InputFPRegisterAt(compare, 0);
1975      if (compare->GetLocations()->InAt(1).IsConstant()) {
1976        DCHECK(IsFloatingPointZeroConstant(compare->GetLocations()->InAt(1).GetConstant()));
1977        // 0.0 is the only immediate that can be encoded directly in an FCMP instruction.
1978        __ Fcmp(left, 0.0);
1979      } else {
1980        __ Fcmp(left, InputFPRegisterAt(compare, 1));
1981      }
1982      if (compare->IsGtBias()) {
1983        __ Cset(result, ne);
1984      } else {
1985        __ Csetm(result, ne);
1986      }
1987      __ Cneg(result, result, compare->IsGtBias() ? mi : gt);
1988      break;
1989    }
1990    default:
1991      LOG(FATAL) << "Unimplemented compare type " << in_type;
1992  }
1993}
1994
1995void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
1996  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1997
1998  if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
1999    locations->SetInAt(0, Location::RequiresFpuRegister());
2000    locations->SetInAt(1,
2001                       IsFloatingPointZeroConstant(instruction->InputAt(1))
2002                           ? Location::ConstantLocation(instruction->InputAt(1)->AsConstant())
2003                           : Location::RequiresFpuRegister());
2004  } else {
2005    // Integer cases.
2006    locations->SetInAt(0, Location::RequiresRegister());
2007    locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
2008  }
2009
2010  if (instruction->NeedsMaterialization()) {
2011    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2012  }
2013}
2014
2015void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
2016  if (!instruction->NeedsMaterialization()) {
2017    return;
2018  }
2019
2020  LocationSummary* locations = instruction->GetLocations();
2021  Register res = RegisterFrom(locations->Out(), instruction->GetType());
2022  IfCondition if_cond = instruction->GetCondition();
2023  Condition arm64_cond = ARM64Condition(if_cond);
2024
2025  if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
2026    FPRegister lhs = InputFPRegisterAt(instruction, 0);
2027    if (locations->InAt(1).IsConstant()) {
2028      DCHECK(IsFloatingPointZeroConstant(locations->InAt(1).GetConstant()));
2029      // 0.0 is the only immediate that can be encoded directly in an FCMP instruction.
2030      __ Fcmp(lhs, 0.0);
2031    } else {
2032      __ Fcmp(lhs, InputFPRegisterAt(instruction, 1));
2033    }
2034    __ Cset(res, arm64_cond);
2035    if (instruction->IsFPConditionTrueIfNaN()) {
2036      // res = IsUnordered(arm64_cond) ? 1 : res  <=>  res = IsNotUnordered(arm64_cond) ? res : 1
2037      __ Csel(res, res, Operand(1), vc);  // VC for "not unordered".
2038    } else if (instruction->IsFPConditionFalseIfNaN()) {
2039      // res = IsUnordered(arm64_cond) ? 0 : res  <=>  res = IsNotUnordered(arm64_cond) ? res : 0
2040      __ Csel(res, res, Operand(0), vc);  // VC for "not unordered".
2041    }
2042  } else {
2043    // Integer cases.
2044    Register lhs = InputRegisterAt(instruction, 0);
2045    Operand rhs = InputOperandAt(instruction, 1);
2046    __ Cmp(lhs, rhs);
2047    __ Cset(res, arm64_cond);
2048  }
2049}
2050
2051#define FOR_EACH_CONDITION_INSTRUCTION(M)                                                \
2052  M(Equal)                                                                               \
2053  M(NotEqual)                                                                            \
2054  M(LessThan)                                                                            \
2055  M(LessThanOrEqual)                                                                     \
2056  M(GreaterThan)                                                                         \
2057  M(GreaterThanOrEqual)                                                                  \
2058  M(Below)                                                                               \
2059  M(BelowOrEqual)                                                                        \
2060  M(Above)                                                                               \
2061  M(AboveOrEqual)
2062#define DEFINE_CONDITION_VISITORS(Name)                                                  \
2063void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }         \
2064void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
2065FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
2066#undef DEFINE_CONDITION_VISITORS
2067#undef FOR_EACH_CONDITION_INSTRUCTION
2068
2069void InstructionCodeGeneratorARM64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2070  DCHECK(instruction->IsDiv() || instruction->IsRem());
2071
2072  LocationSummary* locations = instruction->GetLocations();
2073  Location second = locations->InAt(1);
2074  DCHECK(second.IsConstant());
2075
2076  Register out = OutputRegister(instruction);
2077  Register dividend = InputRegisterAt(instruction, 0);
2078  int64_t imm = Int64FromConstant(second.GetConstant());
2079  DCHECK(imm == 1 || imm == -1);
2080
2081  if (instruction->IsRem()) {
2082    __ Mov(out, 0);
2083  } else {
2084    if (imm == 1) {
2085      __ Mov(out, dividend);
2086    } else {
2087      __ Neg(out, dividend);
2088    }
2089  }
2090}
2091
2092void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2093  DCHECK(instruction->IsDiv() || instruction->IsRem());
2094
2095  LocationSummary* locations = instruction->GetLocations();
2096  Location second = locations->InAt(1);
2097  DCHECK(second.IsConstant());
2098
2099  Register out = OutputRegister(instruction);
2100  Register dividend = InputRegisterAt(instruction, 0);
2101  int64_t imm = Int64FromConstant(second.GetConstant());
2102  uint64_t abs_imm = static_cast<uint64_t>(std::abs(imm));
2103  DCHECK(IsPowerOfTwo(abs_imm));
2104  int ctz_imm = CTZ(abs_imm);
2105
2106  UseScratchRegisterScope temps(GetVIXLAssembler());
2107  Register temp = temps.AcquireSameSizeAs(out);
2108
2109  if (instruction->IsDiv()) {
2110    __ Add(temp, dividend, abs_imm - 1);
2111    __ Cmp(dividend, 0);
2112    __ Csel(out, temp, dividend, lt);
2113    if (imm > 0) {
2114      __ Asr(out, out, ctz_imm);
2115    } else {
2116      __ Neg(out, Operand(out, ASR, ctz_imm));
2117    }
2118  } else {
2119    int bits = instruction->GetResultType() == Primitive::kPrimInt ? 32 : 64;
2120    __ Asr(temp, dividend, bits - 1);
2121    __ Lsr(temp, temp, bits - ctz_imm);
2122    __ Add(out, dividend, temp);
2123    __ And(out, out, abs_imm - 1);
2124    __ Sub(out, out, temp);
2125  }
2126}
2127
2128void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2129  DCHECK(instruction->IsDiv() || instruction->IsRem());
2130
2131  LocationSummary* locations = instruction->GetLocations();
2132  Location second = locations->InAt(1);
2133  DCHECK(second.IsConstant());
2134
2135  Register out = OutputRegister(instruction);
2136  Register dividend = InputRegisterAt(instruction, 0);
2137  int64_t imm = Int64FromConstant(second.GetConstant());
2138
2139  Primitive::Type type = instruction->GetResultType();
2140  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
2141
2142  int64_t magic;
2143  int shift;
2144  CalculateMagicAndShiftForDivRem(imm, type == Primitive::kPrimLong /* is_long */, &magic, &shift);
2145
2146  UseScratchRegisterScope temps(GetVIXLAssembler());
2147  Register temp = temps.AcquireSameSizeAs(out);
2148
2149  // temp = get_high(dividend * magic)
2150  __ Mov(temp, magic);
2151  if (type == Primitive::kPrimLong) {
2152    __ Smulh(temp, dividend, temp);
2153  } else {
2154    __ Smull(temp.X(), dividend, temp);
2155    __ Lsr(temp.X(), temp.X(), 32);
2156  }
2157
2158  if (imm > 0 && magic < 0) {
2159    __ Add(temp, temp, dividend);
2160  } else if (imm < 0 && magic > 0) {
2161    __ Sub(temp, temp, dividend);
2162  }
2163
2164  if (shift != 0) {
2165    __ Asr(temp, temp, shift);
2166  }
2167
2168  if (instruction->IsDiv()) {
2169    __ Sub(out, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
2170  } else {
2171    __ Sub(temp, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
2172    // TODO: Strength reduction for msub.
2173    Register temp_imm = temps.AcquireSameSizeAs(out);
2174    __ Mov(temp_imm, imm);
2175    __ Msub(out, temp, temp_imm, dividend);
2176  }
2177}
2178
2179void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
2180  DCHECK(instruction->IsDiv() || instruction->IsRem());
2181  Primitive::Type type = instruction->GetResultType();
2182  DCHECK(type == Primitive::kPrimInt || Primitive::kPrimLong);
2183
2184  LocationSummary* locations = instruction->GetLocations();
2185  Register out = OutputRegister(instruction);
2186  Location second = locations->InAt(1);
2187
2188  if (second.IsConstant()) {
2189    int64_t imm = Int64FromConstant(second.GetConstant());
2190
2191    if (imm == 0) {
2192      // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2193    } else if (imm == 1 || imm == -1) {
2194      DivRemOneOrMinusOne(instruction);
2195    } else if (IsPowerOfTwo(std::abs(imm))) {
2196      DivRemByPowerOfTwo(instruction);
2197    } else {
2198      DCHECK(imm <= -2 || imm >= 2);
2199      GenerateDivRemWithAnyConstant(instruction);
2200    }
2201  } else {
2202    Register dividend = InputRegisterAt(instruction, 0);
2203    Register divisor = InputRegisterAt(instruction, 1);
2204    if (instruction->IsDiv()) {
2205      __ Sdiv(out, dividend, divisor);
2206    } else {
2207      UseScratchRegisterScope temps(GetVIXLAssembler());
2208      Register temp = temps.AcquireSameSizeAs(out);
2209      __ Sdiv(temp, dividend, divisor);
2210      __ Msub(out, temp, divisor, dividend);
2211    }
2212  }
2213}
2214
2215void LocationsBuilderARM64::VisitDiv(HDiv* div) {
2216  LocationSummary* locations =
2217      new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
2218  switch (div->GetResultType()) {
2219    case Primitive::kPrimInt:
2220    case Primitive::kPrimLong:
2221      locations->SetInAt(0, Location::RequiresRegister());
2222      locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2223      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2224      break;
2225
2226    case Primitive::kPrimFloat:
2227    case Primitive::kPrimDouble:
2228      locations->SetInAt(0, Location::RequiresFpuRegister());
2229      locations->SetInAt(1, Location::RequiresFpuRegister());
2230      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2231      break;
2232
2233    default:
2234      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2235  }
2236}
2237
2238void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
2239  Primitive::Type type = div->GetResultType();
2240  switch (type) {
2241    case Primitive::kPrimInt:
2242    case Primitive::kPrimLong:
2243      GenerateDivRemIntegral(div);
2244      break;
2245
2246    case Primitive::kPrimFloat:
2247    case Primitive::kPrimDouble:
2248      __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
2249      break;
2250
2251    default:
2252      LOG(FATAL) << "Unexpected div type " << type;
2253  }
2254}
2255
2256void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2257  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2258      ? LocationSummary::kCallOnSlowPath
2259      : LocationSummary::kNoCall;
2260  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2261  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2262  if (instruction->HasUses()) {
2263    locations->SetOut(Location::SameAsFirstInput());
2264  }
2265}
2266
2267void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2268  SlowPathCodeARM64* slow_path =
2269      new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
2270  codegen_->AddSlowPath(slow_path);
2271  Location value = instruction->GetLocations()->InAt(0);
2272
2273  Primitive::Type type = instruction->GetType();
2274
2275  if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
2276      LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
2277    return;
2278  }
2279
2280  if (value.IsConstant()) {
2281    int64_t divisor = Int64ConstantFrom(value);
2282    if (divisor == 0) {
2283      __ B(slow_path->GetEntryLabel());
2284    } else {
2285      // A division by a non-null constant is valid. We don't need to perform
2286      // any check, so simply fall through.
2287    }
2288  } else {
2289    __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
2290  }
2291}
2292
2293void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
2294  LocationSummary* locations =
2295      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2296  locations->SetOut(Location::ConstantLocation(constant));
2297}
2298
2299void InstructionCodeGeneratorARM64::VisitDoubleConstant(
2300    HDoubleConstant* constant ATTRIBUTE_UNUSED) {
2301  // Will be generated at use site.
2302}
2303
2304void LocationsBuilderARM64::VisitExit(HExit* exit) {
2305  exit->SetLocations(nullptr);
2306}
2307
2308void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
2309}
2310
2311void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
2312  LocationSummary* locations =
2313      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2314  locations->SetOut(Location::ConstantLocation(constant));
2315}
2316
2317void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
2318  // Will be generated at use site.
2319}
2320
2321void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
2322  DCHECK(!successor->IsExitBlock());
2323  HBasicBlock* block = got->GetBlock();
2324  HInstruction* previous = got->GetPrevious();
2325  HLoopInformation* info = block->GetLoopInformation();
2326
2327  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
2328    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
2329    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
2330    return;
2331  }
2332  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
2333    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
2334  }
2335  if (!codegen_->GoesToNextBlock(block, successor)) {
2336    __ B(codegen_->GetLabelOf(successor));
2337  }
2338}
2339
2340void LocationsBuilderARM64::VisitGoto(HGoto* got) {
2341  got->SetLocations(nullptr);
2342}
2343
2344void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
2345  HandleGoto(got, got->GetSuccessor());
2346}
2347
2348void LocationsBuilderARM64::VisitTryBoundary(HTryBoundary* try_boundary) {
2349  try_boundary->SetLocations(nullptr);
2350}
2351
2352void InstructionCodeGeneratorARM64::VisitTryBoundary(HTryBoundary* try_boundary) {
2353  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
2354  if (!successor->IsExitBlock()) {
2355    HandleGoto(try_boundary, successor);
2356  }
2357}
2358
2359void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruction,
2360                                                          size_t condition_input_index,
2361                                                          vixl::Label* true_target,
2362                                                          vixl::Label* false_target) {
2363  // FP branching requires both targets to be explicit. If either of the targets
2364  // is nullptr (fallthrough) use and bind `fallthrough_target` instead.
2365  vixl::Label fallthrough_target;
2366  HInstruction* cond = instruction->InputAt(condition_input_index);
2367
2368  if (true_target == nullptr && false_target == nullptr) {
2369    // Nothing to do. The code always falls through.
2370    return;
2371  } else if (cond->IsIntConstant()) {
2372    // Constant condition, statically compared against 1.
2373    if (cond->AsIntConstant()->IsOne()) {
2374      if (true_target != nullptr) {
2375        __ B(true_target);
2376      }
2377    } else {
2378      DCHECK(cond->AsIntConstant()->IsZero());
2379      if (false_target != nullptr) {
2380        __ B(false_target);
2381      }
2382    }
2383    return;
2384  }
2385
2386  // The following code generates these patterns:
2387  //  (1) true_target == nullptr && false_target != nullptr
2388  //        - opposite condition true => branch to false_target
2389  //  (2) true_target != nullptr && false_target == nullptr
2390  //        - condition true => branch to true_target
2391  //  (3) true_target != nullptr && false_target != nullptr
2392  //        - condition true => branch to true_target
2393  //        - branch to false_target
2394  if (IsBooleanValueOrMaterializedCondition(cond)) {
2395    // The condition instruction has been materialized, compare the output to 0.
2396    Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
2397    DCHECK(cond_val.IsRegister());
2398      if (true_target == nullptr) {
2399      __ Cbz(InputRegisterAt(instruction, condition_input_index), false_target);
2400    } else {
2401      __ Cbnz(InputRegisterAt(instruction, condition_input_index), true_target);
2402    }
2403  } else {
2404    // The condition instruction has not been materialized, use its inputs as
2405    // the comparison and its condition as the branch condition.
2406    HCondition* condition = cond->AsCondition();
2407
2408    Primitive::Type type = condition->InputAt(0)->GetType();
2409    if (Primitive::IsFloatingPointType(type)) {
2410      FPRegister lhs = InputFPRegisterAt(condition, 0);
2411      if (condition->GetLocations()->InAt(1).IsConstant()) {
2412        DCHECK(IsFloatingPointZeroConstant(condition->GetLocations()->InAt(1).GetConstant()));
2413        // 0.0 is the only immediate that can be encoded directly in an FCMP instruction.
2414        __ Fcmp(lhs, 0.0);
2415      } else {
2416        __ Fcmp(lhs, InputFPRegisterAt(condition, 1));
2417      }
2418      if (condition->IsFPConditionTrueIfNaN()) {
2419        __ B(vs, true_target == nullptr ? &fallthrough_target : true_target);
2420      } else if (condition->IsFPConditionFalseIfNaN()) {
2421        __ B(vs, false_target == nullptr ? &fallthrough_target : false_target);
2422      }
2423      if (true_target == nullptr) {
2424        __ B(ARM64Condition(condition->GetOppositeCondition()), false_target);
2425      } else {
2426        __ B(ARM64Condition(condition->GetCondition()), true_target);
2427      }
2428    } else {
2429      // Integer cases.
2430      Register lhs = InputRegisterAt(condition, 0);
2431      Operand rhs = InputOperandAt(condition, 1);
2432
2433      Condition arm64_cond;
2434      vixl::Label* non_fallthrough_target;
2435      if (true_target == nullptr) {
2436        arm64_cond = ARM64Condition(condition->GetOppositeCondition());
2437        non_fallthrough_target = false_target;
2438      } else {
2439        arm64_cond = ARM64Condition(condition->GetCondition());
2440        non_fallthrough_target = true_target;
2441      }
2442
2443      if ((arm64_cond != gt && arm64_cond != le) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
2444        switch (arm64_cond) {
2445          case eq:
2446            __ Cbz(lhs, non_fallthrough_target);
2447            break;
2448          case ne:
2449            __ Cbnz(lhs, non_fallthrough_target);
2450            break;
2451          case lt:
2452            // Test the sign bit and branch accordingly.
2453            __ Tbnz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, non_fallthrough_target);
2454            break;
2455          case ge:
2456            // Test the sign bit and branch accordingly.
2457            __ Tbz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, non_fallthrough_target);
2458            break;
2459          default:
2460            // Without the `static_cast` the compiler throws an error for
2461            // `-Werror=sign-promo`.
2462            LOG(FATAL) << "Unexpected condition: " << static_cast<int>(arm64_cond);
2463        }
2464      } else {
2465        __ Cmp(lhs, rhs);
2466        __ B(arm64_cond, non_fallthrough_target);
2467      }
2468    }
2469  }
2470
2471  // If neither branch falls through (case 3), the conditional branch to `true_target`
2472  // was already emitted (case 2) and we need to emit a jump to `false_target`.
2473  if (true_target != nullptr && false_target != nullptr) {
2474    __ B(false_target);
2475  }
2476
2477  if (fallthrough_target.IsLinked()) {
2478    __ Bind(&fallthrough_target);
2479  }
2480}
2481
2482void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
2483  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2484  if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
2485    locations->SetInAt(0, Location::RequiresRegister());
2486  }
2487}
2488
2489void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
2490  HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
2491  HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
2492  vixl::Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
2493      nullptr : codegen_->GetLabelOf(true_successor);
2494  vixl::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
2495      nullptr : codegen_->GetLabelOf(false_successor);
2496  GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
2497}
2498
2499void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2500  LocationSummary* locations = new (GetGraph()->GetArena())
2501      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2502  if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
2503    locations->SetInAt(0, Location::RequiresRegister());
2504  }
2505}
2506
2507void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2508  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
2509      DeoptimizationSlowPathARM64(deoptimize);
2510  codegen_->AddSlowPath(slow_path);
2511  GenerateTestAndBranch(deoptimize,
2512                        /* condition_input_index */ 0,
2513                        slow_path->GetEntryLabel(),
2514                        /* false_target */ nullptr);
2515}
2516
2517void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2518  HandleFieldGet(instruction);
2519}
2520
2521void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2522  HandleFieldGet(instruction, instruction->GetFieldInfo());
2523}
2524
2525void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2526  HandleFieldSet(instruction);
2527}
2528
2529void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2530  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
2531}
2532
2533void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
2534  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2535  switch (instruction->GetTypeCheckKind()) {
2536    case TypeCheckKind::kExactCheck:
2537    case TypeCheckKind::kAbstractClassCheck:
2538    case TypeCheckKind::kClassHierarchyCheck:
2539    case TypeCheckKind::kArrayObjectCheck:
2540      call_kind = LocationSummary::kNoCall;
2541      break;
2542    case TypeCheckKind::kUnresolvedCheck:
2543    case TypeCheckKind::kInterfaceCheck:
2544      call_kind = LocationSummary::kCall;
2545      break;
2546    case TypeCheckKind::kArrayCheck:
2547      call_kind = LocationSummary::kCallOnSlowPath;
2548      break;
2549  }
2550  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2551  if (call_kind != LocationSummary::kCall) {
2552    locations->SetInAt(0, Location::RequiresRegister());
2553    locations->SetInAt(1, Location::RequiresRegister());
2554    // The out register is used as a temporary, so it overlaps with the inputs.
2555    // Note that TypeCheckSlowPathARM64 uses this register too.
2556    locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2557  } else {
2558    InvokeRuntimeCallingConvention calling_convention;
2559    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(0)));
2560    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
2561    locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
2562  }
2563}
2564
2565void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
2566  LocationSummary* locations = instruction->GetLocations();
2567  Register obj = InputRegisterAt(instruction, 0);
2568  Register cls = InputRegisterAt(instruction, 1);
2569  Register out = OutputRegister(instruction);
2570  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2571  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
2572  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
2573  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
2574
2575  vixl::Label done, zero;
2576  SlowPathCodeARM64* slow_path = nullptr;
2577
2578  // Return 0 if `obj` is null.
2579  // Avoid null check if we know `obj` is not null.
2580  if (instruction->MustDoNullCheck()) {
2581    __ Cbz(obj, &zero);
2582  }
2583
2584  // In case of an interface/unresolved check, we put the object class into the object register.
2585  // This is safe, as the register is caller-save, and the object must be in another
2586  // register if it survives the runtime call.
2587  Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
2588      (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
2589      ? obj
2590      : out;
2591  __ Ldr(target, HeapOperand(obj.W(), class_offset));
2592  GetAssembler()->MaybeUnpoisonHeapReference(target);
2593
2594  switch (instruction->GetTypeCheckKind()) {
2595    case TypeCheckKind::kExactCheck: {
2596      __ Cmp(out, cls);
2597      __ Cset(out, eq);
2598      if (zero.IsLinked()) {
2599        __ B(&done);
2600      }
2601      break;
2602    }
2603    case TypeCheckKind::kAbstractClassCheck: {
2604      // If the class is abstract, we eagerly fetch the super class of the
2605      // object to avoid doing a comparison we know will fail.
2606      vixl::Label loop, success;
2607      __ Bind(&loop);
2608      __ Ldr(out, HeapOperand(out, super_offset));
2609      GetAssembler()->MaybeUnpoisonHeapReference(out);
2610      // If `out` is null, we use it for the result, and jump to `done`.
2611      __ Cbz(out, &done);
2612      __ Cmp(out, cls);
2613      __ B(ne, &loop);
2614      __ Mov(out, 1);
2615      if (zero.IsLinked()) {
2616        __ B(&done);
2617      }
2618      break;
2619    }
2620    case TypeCheckKind::kClassHierarchyCheck: {
2621      // Walk over the class hierarchy to find a match.
2622      vixl::Label loop, success;
2623      __ Bind(&loop);
2624      __ Cmp(out, cls);
2625      __ B(eq, &success);
2626      __ Ldr(out, HeapOperand(out, super_offset));
2627      GetAssembler()->MaybeUnpoisonHeapReference(out);
2628      __ Cbnz(out, &loop);
2629      // If `out` is null, we use it for the result, and jump to `done`.
2630      __ B(&done);
2631      __ Bind(&success);
2632      __ Mov(out, 1);
2633      if (zero.IsLinked()) {
2634        __ B(&done);
2635      }
2636      break;
2637    }
2638    case TypeCheckKind::kArrayObjectCheck: {
2639      // Do an exact check.
2640      vixl::Label exact_check;
2641      __ Cmp(out, cls);
2642      __ B(eq, &exact_check);
2643      // Otherwise, we need to check that the object's class is a non primitive array.
2644      __ Ldr(out, HeapOperand(out, component_offset));
2645      GetAssembler()->MaybeUnpoisonHeapReference(out);
2646      // If `out` is null, we use it for the result, and jump to `done`.
2647      __ Cbz(out, &done);
2648      __ Ldrh(out, HeapOperand(out, primitive_offset));
2649      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
2650      __ Cbnz(out, &zero);
2651      __ Bind(&exact_check);
2652      __ Mov(out, 1);
2653      __ B(&done);
2654      break;
2655    }
2656    case TypeCheckKind::kArrayCheck: {
2657      __ Cmp(out, cls);
2658      DCHECK(locations->OnlyCallsOnSlowPath());
2659      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
2660          instruction, /* is_fatal */ false);
2661      codegen_->AddSlowPath(slow_path);
2662      __ B(ne, slow_path->GetEntryLabel());
2663      __ Mov(out, 1);
2664      if (zero.IsLinked()) {
2665        __ B(&done);
2666      }
2667      break;
2668    }
2669    case TypeCheckKind::kUnresolvedCheck:
2670    case TypeCheckKind::kInterfaceCheck:
2671    default: {
2672      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
2673                              instruction,
2674                              instruction->GetDexPc(),
2675                              nullptr);
2676      if (zero.IsLinked()) {
2677        __ B(&done);
2678      }
2679      break;
2680    }
2681  }
2682
2683  if (zero.IsLinked()) {
2684    __ Bind(&zero);
2685    __ Mov(out, 0);
2686  }
2687
2688  if (done.IsLinked()) {
2689    __ Bind(&done);
2690  }
2691
2692  if (slow_path != nullptr) {
2693    __ Bind(slow_path->GetExitLabel());
2694  }
2695}
2696
2697void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
2698  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2699  bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
2700
2701  switch (instruction->GetTypeCheckKind()) {
2702    case TypeCheckKind::kExactCheck:
2703    case TypeCheckKind::kAbstractClassCheck:
2704    case TypeCheckKind::kClassHierarchyCheck:
2705    case TypeCheckKind::kArrayObjectCheck:
2706      call_kind = throws_into_catch
2707          ? LocationSummary::kCallOnSlowPath
2708          : LocationSummary::kNoCall;
2709      break;
2710    case TypeCheckKind::kUnresolvedCheck:
2711    case TypeCheckKind::kInterfaceCheck:
2712      call_kind = LocationSummary::kCall;
2713      break;
2714    case TypeCheckKind::kArrayCheck:
2715      call_kind = LocationSummary::kCallOnSlowPath;
2716      break;
2717  }
2718
2719  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
2720      instruction, call_kind);
2721  if (call_kind != LocationSummary::kCall) {
2722    locations->SetInAt(0, Location::RequiresRegister());
2723    locations->SetInAt(1, Location::RequiresRegister());
2724    // Note that TypeCheckSlowPathARM64 uses this register too.
2725    locations->AddTemp(Location::RequiresRegister());
2726  } else {
2727    InvokeRuntimeCallingConvention calling_convention;
2728    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(0)));
2729    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
2730  }
2731}
2732
2733void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
2734  LocationSummary* locations = instruction->GetLocations();
2735  Register obj = InputRegisterAt(instruction, 0);
2736  Register cls = InputRegisterAt(instruction, 1);
2737  Register temp;
2738  if (!locations->WillCall()) {
2739    temp = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
2740  }
2741
2742  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2743  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
2744  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
2745  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
2746  SlowPathCodeARM64* slow_path = nullptr;
2747
2748  if (!locations->WillCall()) {
2749    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
2750        instruction, !locations->CanCall());
2751    codegen_->AddSlowPath(slow_path);
2752  }
2753
2754  vixl::Label done;
2755  // Avoid null check if we know obj is not null.
2756  if (instruction->MustDoNullCheck()) {
2757    __ Cbz(obj, &done);
2758  }
2759
2760  if (locations->WillCall()) {
2761    __ Ldr(obj, HeapOperand(obj, class_offset));
2762    GetAssembler()->MaybeUnpoisonHeapReference(obj);
2763  } else {
2764    __ Ldr(temp, HeapOperand(obj, class_offset));
2765    GetAssembler()->MaybeUnpoisonHeapReference(temp);
2766  }
2767
2768  switch (instruction->GetTypeCheckKind()) {
2769    case TypeCheckKind::kExactCheck:
2770    case TypeCheckKind::kArrayCheck: {
2771      __ Cmp(temp, cls);
2772      // Jump to slow path for throwing the exception or doing a
2773      // more involved array check.
2774      __ B(ne, slow_path->GetEntryLabel());
2775      break;
2776    }
2777    case TypeCheckKind::kAbstractClassCheck: {
2778      // If the class is abstract, we eagerly fetch the super class of the
2779      // object to avoid doing a comparison we know will fail.
2780      vixl::Label loop;
2781      __ Bind(&loop);
2782      __ Ldr(temp, HeapOperand(temp, super_offset));
2783      GetAssembler()->MaybeUnpoisonHeapReference(temp);
2784      // Jump to the slow path to throw the exception.
2785      __ Cbz(temp, slow_path->GetEntryLabel());
2786      __ Cmp(temp, cls);
2787      __ B(ne, &loop);
2788      break;
2789    }
2790    case TypeCheckKind::kClassHierarchyCheck: {
2791      // Walk over the class hierarchy to find a match.
2792      vixl::Label loop;
2793      __ Bind(&loop);
2794      __ Cmp(temp, cls);
2795      __ B(eq, &done);
2796      __ Ldr(temp, HeapOperand(temp, super_offset));
2797      GetAssembler()->MaybeUnpoisonHeapReference(temp);
2798      __ Cbnz(temp, &loop);
2799      // Jump to the slow path to throw the exception.
2800      __ B(slow_path->GetEntryLabel());
2801      break;
2802    }
2803    case TypeCheckKind::kArrayObjectCheck: {
2804      // Do an exact check.
2805      __ Cmp(temp, cls);
2806      __ B(eq, &done);
2807      // Otherwise, we need to check that the object's class is a non primitive array.
2808      __ Ldr(temp, HeapOperand(temp, component_offset));
2809      GetAssembler()->MaybeUnpoisonHeapReference(temp);
2810      __ Cbz(temp, slow_path->GetEntryLabel());
2811      __ Ldrh(temp, HeapOperand(temp, primitive_offset));
2812      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
2813      __ Cbnz(temp, slow_path->GetEntryLabel());
2814      break;
2815    }
2816    case TypeCheckKind::kUnresolvedCheck:
2817    case TypeCheckKind::kInterfaceCheck:
2818    default:
2819      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
2820                              instruction,
2821                              instruction->GetDexPc(),
2822                              nullptr);
2823      break;
2824  }
2825  __ Bind(&done);
2826
2827  if (slow_path != nullptr) {
2828    __ Bind(slow_path->GetExitLabel());
2829  }
2830}
2831
2832void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
2833  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2834  locations->SetOut(Location::ConstantLocation(constant));
2835}
2836
2837void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2838  // Will be generated at use site.
2839}
2840
2841void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
2842  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2843  locations->SetOut(Location::ConstantLocation(constant));
2844}
2845
2846void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2847  // Will be generated at use site.
2848}
2849
2850void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2851  // The trampoline uses the same calling convention as dex calling conventions,
2852  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
2853  // the method_idx.
2854  HandleInvoke(invoke);
2855}
2856
2857void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2858  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
2859}
2860
2861void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
2862  InvokeDexCallingConventionVisitorARM64 calling_convention_visitor;
2863  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2864}
2865
2866void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
2867  HandleInvoke(invoke);
2868}
2869
2870void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
2871  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2872  Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
2873  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2874      invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
2875  Location receiver = invoke->GetLocations()->InAt(0);
2876  Offset class_offset = mirror::Object::ClassOffset();
2877  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
2878
2879  // The register ip1 is required to be used for the hidden argument in
2880  // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
2881  MacroAssembler* masm = GetVIXLAssembler();
2882  UseScratchRegisterScope scratch_scope(masm);
2883  BlockPoolsScope block_pools(masm);
2884  scratch_scope.Exclude(ip1);
2885  __ Mov(ip1, invoke->GetDexMethodIndex());
2886
2887  // temp = object->GetClass();
2888  if (receiver.IsStackSlot()) {
2889    __ Ldr(temp.W(), StackOperandFrom(receiver));
2890    __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
2891  } else {
2892    __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
2893  }
2894  codegen_->MaybeRecordImplicitNullCheck(invoke);
2895  GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
2896  // temp = temp->GetImtEntryAt(method_offset);
2897  __ Ldr(temp, MemOperand(temp, method_offset));
2898  // lr = temp->GetEntryPoint();
2899  __ Ldr(lr, MemOperand(temp, entry_point.Int32Value()));
2900  // lr();
2901  __ Blr(lr);
2902  DCHECK(!codegen_->IsLeafMethod());
2903  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2904}
2905
2906void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2907  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
2908  if (intrinsic.TryDispatch(invoke)) {
2909    return;
2910  }
2911
2912  HandleInvoke(invoke);
2913}
2914
2915void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2916  // When we do not run baseline, explicit clinit checks triggered by static
2917  // invokes must have been pruned by art::PrepareForRegisterAllocation.
2918  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2919
2920  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
2921  if (intrinsic.TryDispatch(invoke)) {
2922    return;
2923  }
2924
2925  HandleInvoke(invoke);
2926}
2927
2928static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codegen) {
2929  if (invoke->GetLocations()->Intrinsified()) {
2930    IntrinsicCodeGeneratorARM64 intrinsic(codegen);
2931    intrinsic.Dispatch(invoke);
2932    return true;
2933  }
2934  return false;
2935}
2936
2937HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
2938      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
2939      MethodReference target_method ATTRIBUTE_UNUSED) {
2940  // On arm64 we support all dispatch types.
2941  return desired_dispatch_info;
2942}
2943
2944void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2945  // For better instruction scheduling we load the direct code pointer before the method pointer.
2946  bool direct_code_loaded = false;
2947  switch (invoke->GetCodePtrLocation()) {
2948    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2949      // LR = code address from literal pool with link-time patch.
2950      __ Ldr(lr, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
2951      direct_code_loaded = true;
2952      break;
2953    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2954      // LR = invoke->GetDirectCodePtr();
2955      __ Ldr(lr, DeduplicateUint64Literal(invoke->GetDirectCodePtr()));
2956      direct_code_loaded = true;
2957      break;
2958    default:
2959      break;
2960  }
2961
2962  // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
2963  Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
2964  switch (invoke->GetMethodLoadKind()) {
2965    case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
2966      // temp = thread->string_init_entrypoint
2967      __ Ldr(XRegisterFrom(temp), MemOperand(tr, invoke->GetStringInitOffset()));
2968      break;
2969    case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
2970      callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
2971      break;
2972    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
2973      // Load method address from literal pool.
2974      __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
2975      break;
2976    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
2977      // Load method address from literal pool with a link-time patch.
2978      __ Ldr(XRegisterFrom(temp),
2979             DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
2980      break;
2981    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
2982      // Add ADRP with its PC-relative DexCache access patch.
2983      pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
2984                                                  invoke->GetDexCacheArrayOffset());
2985      vixl::Label* pc_insn_label = &pc_relative_dex_cache_patches_.back().label;
2986      {
2987        vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
2988        __ Bind(pc_insn_label);
2989        __ adrp(XRegisterFrom(temp), 0);
2990      }
2991      pc_relative_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
2992      // Add LDR with its PC-relative DexCache access patch.
2993      pc_relative_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
2994                                                  invoke->GetDexCacheArrayOffset());
2995      {
2996        vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
2997        __ Bind(&pc_relative_dex_cache_patches_.back().label);
2998        __ ldr(XRegisterFrom(temp), MemOperand(XRegisterFrom(temp), 0));
2999        pc_relative_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
3000      }
3001      break;
3002    }
3003    case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
3004      Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
3005      Register reg = XRegisterFrom(temp);
3006      Register method_reg;
3007      if (current_method.IsRegister()) {
3008        method_reg = XRegisterFrom(current_method);
3009      } else {
3010        DCHECK(invoke->GetLocations()->Intrinsified());
3011        DCHECK(!current_method.IsValid());
3012        method_reg = reg;
3013        __ Ldr(reg.X(), MemOperand(sp, kCurrentMethodStackOffset));
3014      }
3015
3016      // temp = current_method->dex_cache_resolved_methods_;
3017      __ Ldr(reg.X(),
3018             MemOperand(method_reg.X(),
3019                        ArtMethod::DexCacheResolvedMethodsOffset(kArm64WordSize).Int32Value()));
3020      // temp = temp[index_in_cache];
3021      uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
3022    __ Ldr(reg.X(), MemOperand(reg.X(), GetCachePointerOffset(index_in_cache)));
3023      break;
3024    }
3025  }
3026
3027  switch (invoke->GetCodePtrLocation()) {
3028    case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
3029      __ Bl(&frame_entry_label_);
3030      break;
3031    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
3032      relative_call_patches_.emplace_back(invoke->GetTargetMethod());
3033      vixl::Label* label = &relative_call_patches_.back().label;
3034      vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
3035      __ Bind(label);
3036      __ bl(0);  // Branch and link to itself. This will be overriden at link time.
3037      break;
3038    }
3039    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
3040    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
3041      // LR prepared above for better instruction scheduling.
3042      DCHECK(direct_code_loaded);
3043      // lr()
3044      __ Blr(lr);
3045      break;
3046    case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
3047      // LR = callee_method->entry_point_from_quick_compiled_code_;
3048      __ Ldr(lr, MemOperand(
3049          XRegisterFrom(callee_method),
3050          ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
3051      // lr()
3052      __ Blr(lr);
3053      break;
3054  }
3055
3056  DCHECK(!IsLeafMethod());
3057}
3058
3059void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) {
3060  LocationSummary* locations = invoke->GetLocations();
3061  Location receiver = locations->InAt(0);
3062  Register temp = XRegisterFrom(temp_in);
3063  size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
3064      invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
3065  Offset class_offset = mirror::Object::ClassOffset();
3066  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
3067
3068  BlockPoolsScope block_pools(GetVIXLAssembler());
3069
3070  DCHECK(receiver.IsRegister());
3071  __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
3072  MaybeRecordImplicitNullCheck(invoke);
3073  GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
3074  // temp = temp->GetMethodAt(method_offset);
3075  __ Ldr(temp, MemOperand(temp, method_offset));
3076  // lr = temp->GetEntryPoint();
3077  __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
3078  // lr();
3079  __ Blr(lr);
3080}
3081
3082void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
3083  DCHECK(linker_patches->empty());
3084  size_t size =
3085      method_patches_.size() +
3086      call_patches_.size() +
3087      relative_call_patches_.size() +
3088      pc_relative_dex_cache_patches_.size();
3089  linker_patches->reserve(size);
3090  for (const auto& entry : method_patches_) {
3091    const MethodReference& target_method = entry.first;
3092    vixl::Literal<uint64_t>* literal = entry.second;
3093    linker_patches->push_back(LinkerPatch::MethodPatch(literal->offset(),
3094                                                       target_method.dex_file,
3095                                                       target_method.dex_method_index));
3096  }
3097  for (const auto& entry : call_patches_) {
3098    const MethodReference& target_method = entry.first;
3099    vixl::Literal<uint64_t>* literal = entry.second;
3100    linker_patches->push_back(LinkerPatch::CodePatch(literal->offset(),
3101                                                     target_method.dex_file,
3102                                                     target_method.dex_method_index));
3103  }
3104  for (const MethodPatchInfo<vixl::Label>& info : relative_call_patches_) {
3105    linker_patches->push_back(LinkerPatch::RelativeCodePatch(info.label.location(),
3106                                                             info.target_method.dex_file,
3107                                                             info.target_method.dex_method_index));
3108  }
3109  for (const PcRelativeDexCacheAccessInfo& info : pc_relative_dex_cache_patches_) {
3110    linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.location(),
3111                                                              &info.target_dex_file,
3112                                                              info.pc_insn_label->location(),
3113                                                              info.element_offset));
3114  }
3115}
3116
3117vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateUint64Literal(uint64_t value) {
3118  // Look up the literal for value.
3119  auto lb = uint64_literals_.lower_bound(value);
3120  if (lb != uint64_literals_.end() && !uint64_literals_.key_comp()(value, lb->first)) {
3121    return lb->second;
3122  }
3123  // We don't have a literal for this value, insert a new one.
3124  vixl::Literal<uint64_t>* literal = __ CreateLiteralDestroyedWithPool<uint64_t>(value);
3125  uint64_literals_.PutBefore(lb, value, literal);
3126  return literal;
3127}
3128
3129vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodLiteral(
3130    MethodReference target_method,
3131    MethodToLiteralMap* map) {
3132  // Look up the literal for target_method.
3133  auto lb = map->lower_bound(target_method);
3134  if (lb != map->end() && !map->key_comp()(target_method, lb->first)) {
3135    return lb->second;
3136  }
3137  // We don't have a literal for this method yet, insert a new one.
3138  vixl::Literal<uint64_t>* literal = __ CreateLiteralDestroyedWithPool<uint64_t>(0u);
3139  map->PutBefore(lb, target_method, literal);
3140  return literal;
3141}
3142
3143vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodAddressLiteral(
3144    MethodReference target_method) {
3145  return DeduplicateMethodLiteral(target_method, &method_patches_);
3146}
3147
3148vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodCodeLiteral(
3149    MethodReference target_method) {
3150  return DeduplicateMethodLiteral(target_method, &call_patches_);
3151}
3152
3153
3154void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
3155  // When we do not run baseline, explicit clinit checks triggered by static
3156  // invokes must have been pruned by art::PrepareForRegisterAllocation.
3157  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
3158
3159  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
3160    return;
3161  }
3162
3163  BlockPoolsScope block_pools(GetVIXLAssembler());
3164  LocationSummary* locations = invoke->GetLocations();
3165  codegen_->GenerateStaticOrDirectCall(
3166      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
3167  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
3168}
3169
3170void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
3171  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
3172    return;
3173  }
3174
3175  codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
3176  DCHECK(!codegen_->IsLeafMethod());
3177  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
3178}
3179
3180void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
3181  InvokeRuntimeCallingConvention calling_convention;
3182  CodeGenerator::CreateLoadClassLocationSummary(
3183      cls,
3184      LocationFrom(calling_convention.GetRegisterAt(0)),
3185      LocationFrom(vixl::x0));
3186}
3187
3188void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
3189  if (cls->NeedsAccessCheck()) {
3190    codegen_->MoveConstant(cls->GetLocations()->GetTemp(0), cls->GetTypeIndex());
3191    codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
3192                            cls,
3193                            cls->GetDexPc(),
3194                            nullptr);
3195    return;
3196  }
3197
3198  Register out = OutputRegister(cls);
3199  Register current_method = InputRegisterAt(cls, 0);
3200  if (cls->IsReferrersClass()) {
3201    DCHECK(!cls->CanCallRuntime());
3202    DCHECK(!cls->MustGenerateClinitCheck());
3203    __ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
3204  } else {
3205    DCHECK(cls->CanCallRuntime());
3206    MemberOffset resolved_types_offset = ArtMethod::DexCacheResolvedTypesOffset(kArm64PointerSize);
3207    __ Ldr(out.X(), MemOperand(current_method, resolved_types_offset.Int32Value()));
3208    __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
3209    // TODO: We will need a read barrier here.
3210
3211    SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
3212        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
3213    codegen_->AddSlowPath(slow_path);
3214    __ Cbz(out, slow_path->GetEntryLabel());
3215    if (cls->MustGenerateClinitCheck()) {
3216      GenerateClassInitializationCheck(slow_path, out);
3217    } else {
3218      __ Bind(slow_path->GetExitLabel());
3219    }
3220  }
3221}
3222
3223static MemOperand GetExceptionTlsAddress() {
3224  return MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
3225}
3226
3227void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
3228  LocationSummary* locations =
3229      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
3230  locations->SetOut(Location::RequiresRegister());
3231}
3232
3233void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
3234  __ Ldr(OutputRegister(instruction), GetExceptionTlsAddress());
3235}
3236
3237void LocationsBuilderARM64::VisitClearException(HClearException* clear) {
3238  new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
3239}
3240
3241void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
3242  __ Str(wzr, GetExceptionTlsAddress());
3243}
3244
3245void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
3246  load->SetLocations(nullptr);
3247}
3248
3249void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
3250  // Nothing to do, this is driven by the code generator.
3251}
3252
3253void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
3254  LocationSummary* locations =
3255      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
3256  locations->SetInAt(0, Location::RequiresRegister());
3257  locations->SetOut(Location::RequiresRegister());
3258}
3259
3260void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
3261  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
3262  codegen_->AddSlowPath(slow_path);
3263
3264  Register out = OutputRegister(load);
3265  Register current_method = InputRegisterAt(load, 0);
3266  __ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
3267  __ Ldr(out.X(), HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
3268  __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(load->GetStringIndex())));
3269  // TODO: We will need a read barrier here.
3270  __ Cbz(out, slow_path->GetEntryLabel());
3271  __ Bind(slow_path->GetExitLabel());
3272}
3273
3274void LocationsBuilderARM64::VisitLocal(HLocal* local) {
3275  local->SetLocations(nullptr);
3276}
3277
3278void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
3279  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
3280}
3281
3282void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
3283  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
3284  locations->SetOut(Location::ConstantLocation(constant));
3285}
3286
3287void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
3288  // Will be generated at use site.
3289}
3290
3291void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
3292  LocationSummary* locations =
3293      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3294  InvokeRuntimeCallingConvention calling_convention;
3295  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
3296}
3297
3298void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
3299  codegen_->InvokeRuntime(instruction->IsEnter()
3300        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
3301      instruction,
3302      instruction->GetDexPc(),
3303      nullptr);
3304  CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
3305}
3306
3307void LocationsBuilderARM64::VisitMul(HMul* mul) {
3308  LocationSummary* locations =
3309      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
3310  switch (mul->GetResultType()) {
3311    case Primitive::kPrimInt:
3312    case Primitive::kPrimLong:
3313      locations->SetInAt(0, Location::RequiresRegister());
3314      locations->SetInAt(1, Location::RequiresRegister());
3315      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3316      break;
3317
3318    case Primitive::kPrimFloat:
3319    case Primitive::kPrimDouble:
3320      locations->SetInAt(0, Location::RequiresFpuRegister());
3321      locations->SetInAt(1, Location::RequiresFpuRegister());
3322      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3323      break;
3324
3325    default:
3326      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
3327  }
3328}
3329
3330void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
3331  switch (mul->GetResultType()) {
3332    case Primitive::kPrimInt:
3333    case Primitive::kPrimLong:
3334      __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
3335      break;
3336
3337    case Primitive::kPrimFloat:
3338    case Primitive::kPrimDouble:
3339      __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
3340      break;
3341
3342    default:
3343      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
3344  }
3345}
3346
3347void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
3348  LocationSummary* locations =
3349      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
3350  switch (neg->GetResultType()) {
3351    case Primitive::kPrimInt:
3352    case Primitive::kPrimLong:
3353      locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg));
3354      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3355      break;
3356
3357    case Primitive::kPrimFloat:
3358    case Primitive::kPrimDouble:
3359      locations->SetInAt(0, Location::RequiresFpuRegister());
3360      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3361      break;
3362
3363    default:
3364      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
3365  }
3366}
3367
3368void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
3369  switch (neg->GetResultType()) {
3370    case Primitive::kPrimInt:
3371    case Primitive::kPrimLong:
3372      __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
3373      break;
3374
3375    case Primitive::kPrimFloat:
3376    case Primitive::kPrimDouble:
3377      __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0));
3378      break;
3379
3380    default:
3381      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
3382  }
3383}
3384
3385void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
3386  LocationSummary* locations =
3387      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3388  InvokeRuntimeCallingConvention calling_convention;
3389  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
3390  locations->SetOut(LocationFrom(x0));
3391  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
3392  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
3393  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
3394                       void*, uint32_t, int32_t, ArtMethod*>();
3395}
3396
3397void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
3398  LocationSummary* locations = instruction->GetLocations();
3399  InvokeRuntimeCallingConvention calling_convention;
3400  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
3401  DCHECK(type_index.Is(w0));
3402  __ Mov(type_index, instruction->GetTypeIndex());
3403  // Note: if heap poisoning is enabled, the entry point takes cares
3404  // of poisoning the reference.
3405  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3406                          instruction,
3407                          instruction->GetDexPc(),
3408                          nullptr);
3409  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
3410}
3411
3412void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
3413  LocationSummary* locations =
3414      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3415  InvokeRuntimeCallingConvention calling_convention;
3416  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
3417  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
3418  locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
3419  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
3420}
3421
3422void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
3423  LocationSummary* locations = instruction->GetLocations();
3424  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
3425  DCHECK(type_index.Is(w0));
3426  __ Mov(type_index, instruction->GetTypeIndex());
3427  // Note: if heap poisoning is enabled, the entry point takes cares
3428  // of poisoning the reference.
3429  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3430                          instruction,
3431                          instruction->GetDexPc(),
3432                          nullptr);
3433  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
3434}
3435
3436void LocationsBuilderARM64::VisitNot(HNot* instruction) {
3437  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3438  locations->SetInAt(0, Location::RequiresRegister());
3439  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3440}
3441
3442void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
3443  switch (instruction->GetResultType()) {
3444    case Primitive::kPrimInt:
3445    case Primitive::kPrimLong:
3446      __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
3447      break;
3448
3449    default:
3450      LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
3451  }
3452}
3453
3454void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
3455  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3456  locations->SetInAt(0, Location::RequiresRegister());
3457  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3458}
3459
3460void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
3461  __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::Operand(1));
3462}
3463
3464void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
3465  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
3466      ? LocationSummary::kCallOnSlowPath
3467      : LocationSummary::kNoCall;
3468  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3469  locations->SetInAt(0, Location::RequiresRegister());
3470  if (instruction->HasUses()) {
3471    locations->SetOut(Location::SameAsFirstInput());
3472  }
3473}
3474
3475void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
3476  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3477    return;
3478  }
3479
3480  BlockPoolsScope block_pools(GetVIXLAssembler());
3481  Location obj = instruction->GetLocations()->InAt(0);
3482  __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
3483  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3484}
3485
3486void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
3487  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
3488  codegen_->AddSlowPath(slow_path);
3489
3490  LocationSummary* locations = instruction->GetLocations();
3491  Location obj = locations->InAt(0);
3492
3493  __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
3494}
3495
3496void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
3497  if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
3498    GenerateImplicitNullCheck(instruction);
3499  } else {
3500    GenerateExplicitNullCheck(instruction);
3501  }
3502}
3503
3504void LocationsBuilderARM64::VisitOr(HOr* instruction) {
3505  HandleBinaryOp(instruction);
3506}
3507
3508void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
3509  HandleBinaryOp(instruction);
3510}
3511
3512void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
3513  LOG(FATAL) << "Unreachable";
3514}
3515
3516void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
3517  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3518}
3519
3520void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
3521  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3522  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
3523  if (location.IsStackSlot()) {
3524    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3525  } else if (location.IsDoubleStackSlot()) {
3526    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3527  }
3528  locations->SetOut(location);
3529}
3530
3531void InstructionCodeGeneratorARM64::VisitParameterValue(
3532    HParameterValue* instruction ATTRIBUTE_UNUSED) {
3533  // Nothing to do, the parameter is already at its location.
3534}
3535
3536void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
3537  LocationSummary* locations =
3538      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3539  locations->SetOut(LocationFrom(kArtMethodRegister));
3540}
3541
3542void InstructionCodeGeneratorARM64::VisitCurrentMethod(
3543    HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
3544  // Nothing to do, the method is already at its location.
3545}
3546
3547void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
3548  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3549  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
3550    locations->SetInAt(i, Location::Any());
3551  }
3552  locations->SetOut(Location::Any());
3553}
3554
3555void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
3556  LOG(FATAL) << "Unreachable";
3557}
3558
3559void LocationsBuilderARM64::VisitRem(HRem* rem) {
3560  Primitive::Type type = rem->GetResultType();
3561  LocationSummary::CallKind call_kind =
3562      Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
3563  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
3564
3565  switch (type) {
3566    case Primitive::kPrimInt:
3567    case Primitive::kPrimLong:
3568      locations->SetInAt(0, Location::RequiresRegister());
3569      locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
3570      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3571      break;
3572
3573    case Primitive::kPrimFloat:
3574    case Primitive::kPrimDouble: {
3575      InvokeRuntimeCallingConvention calling_convention;
3576      locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
3577      locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1)));
3578      locations->SetOut(calling_convention.GetReturnLocation(type));
3579
3580      break;
3581    }
3582
3583    default:
3584      LOG(FATAL) << "Unexpected rem type " << type;
3585  }
3586}
3587
3588void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
3589  Primitive::Type type = rem->GetResultType();
3590
3591  switch (type) {
3592    case Primitive::kPrimInt:
3593    case Primitive::kPrimLong: {
3594      GenerateDivRemIntegral(rem);
3595      break;
3596    }
3597
3598    case Primitive::kPrimFloat:
3599    case Primitive::kPrimDouble: {
3600      int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
3601                                                             : QUICK_ENTRY_POINT(pFmod);
3602      codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc(), nullptr);
3603      break;
3604    }
3605
3606    default:
3607      LOG(FATAL) << "Unexpected rem type " << type;
3608  }
3609}
3610
3611void LocationsBuilderARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3612  memory_barrier->SetLocations(nullptr);
3613}
3614
3615void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3616  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
3617}
3618
3619void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
3620  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3621  Primitive::Type return_type = instruction->InputAt(0)->GetType();
3622  locations->SetInAt(0, ARM64ReturnLocation(return_type));
3623}
3624
3625void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) {
3626  codegen_->GenerateFrameExit();
3627}
3628
3629void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
3630  instruction->SetLocations(nullptr);
3631}
3632
3633void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction ATTRIBUTE_UNUSED) {
3634  codegen_->GenerateFrameExit();
3635}
3636
3637void LocationsBuilderARM64::VisitShl(HShl* shl) {
3638  HandleShift(shl);
3639}
3640
3641void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) {
3642  HandleShift(shl);
3643}
3644
3645void LocationsBuilderARM64::VisitShr(HShr* shr) {
3646  HandleShift(shr);
3647}
3648
3649void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) {
3650  HandleShift(shr);
3651}
3652
3653void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
3654  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3655  Primitive::Type field_type = store->InputAt(1)->GetType();
3656  switch (field_type) {
3657    case Primitive::kPrimNot:
3658    case Primitive::kPrimBoolean:
3659    case Primitive::kPrimByte:
3660    case Primitive::kPrimChar:
3661    case Primitive::kPrimShort:
3662    case Primitive::kPrimInt:
3663    case Primitive::kPrimFloat:
3664      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3665      break;
3666
3667    case Primitive::kPrimLong:
3668    case Primitive::kPrimDouble:
3669      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3670      break;
3671
3672    default:
3673      LOG(FATAL) << "Unimplemented local type " << field_type;
3674  }
3675}
3676
3677void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3678}
3679
3680void LocationsBuilderARM64::VisitSub(HSub* instruction) {
3681  HandleBinaryOp(instruction);
3682}
3683
3684void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
3685  HandleBinaryOp(instruction);
3686}
3687
3688void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3689  HandleFieldGet(instruction);
3690}
3691
3692void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3693  HandleFieldGet(instruction, instruction->GetFieldInfo());
3694}
3695
3696void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3697  HandleFieldSet(instruction);
3698}
3699
3700void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3701  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3702}
3703
3704void LocationsBuilderARM64::VisitUnresolvedInstanceFieldGet(
3705    HUnresolvedInstanceFieldGet* instruction) {
3706  FieldAccessCallingConventionARM64 calling_convention;
3707  codegen_->CreateUnresolvedFieldLocationSummary(
3708      instruction, instruction->GetFieldType(), calling_convention);
3709}
3710
3711void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldGet(
3712    HUnresolvedInstanceFieldGet* instruction) {
3713  FieldAccessCallingConventionARM64 calling_convention;
3714  codegen_->GenerateUnresolvedFieldAccess(instruction,
3715                                          instruction->GetFieldType(),
3716                                          instruction->GetFieldIndex(),
3717                                          instruction->GetDexPc(),
3718                                          calling_convention);
3719}
3720
3721void LocationsBuilderARM64::VisitUnresolvedInstanceFieldSet(
3722    HUnresolvedInstanceFieldSet* instruction) {
3723  FieldAccessCallingConventionARM64 calling_convention;
3724  codegen_->CreateUnresolvedFieldLocationSummary(
3725      instruction, instruction->GetFieldType(), calling_convention);
3726}
3727
3728void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldSet(
3729    HUnresolvedInstanceFieldSet* instruction) {
3730  FieldAccessCallingConventionARM64 calling_convention;
3731  codegen_->GenerateUnresolvedFieldAccess(instruction,
3732                                          instruction->GetFieldType(),
3733                                          instruction->GetFieldIndex(),
3734                                          instruction->GetDexPc(),
3735                                          calling_convention);
3736}
3737
3738void LocationsBuilderARM64::VisitUnresolvedStaticFieldGet(
3739    HUnresolvedStaticFieldGet* instruction) {
3740  FieldAccessCallingConventionARM64 calling_convention;
3741  codegen_->CreateUnresolvedFieldLocationSummary(
3742      instruction, instruction->GetFieldType(), calling_convention);
3743}
3744
3745void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldGet(
3746    HUnresolvedStaticFieldGet* instruction) {
3747  FieldAccessCallingConventionARM64 calling_convention;
3748  codegen_->GenerateUnresolvedFieldAccess(instruction,
3749                                          instruction->GetFieldType(),
3750                                          instruction->GetFieldIndex(),
3751                                          instruction->GetDexPc(),
3752                                          calling_convention);
3753}
3754
3755void LocationsBuilderARM64::VisitUnresolvedStaticFieldSet(
3756    HUnresolvedStaticFieldSet* instruction) {
3757  FieldAccessCallingConventionARM64 calling_convention;
3758  codegen_->CreateUnresolvedFieldLocationSummary(
3759      instruction, instruction->GetFieldType(), calling_convention);
3760}
3761
3762void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet(
3763    HUnresolvedStaticFieldSet* instruction) {
3764  FieldAccessCallingConventionARM64 calling_convention;
3765  codegen_->GenerateUnresolvedFieldAccess(instruction,
3766                                          instruction->GetFieldType(),
3767                                          instruction->GetFieldIndex(),
3768                                          instruction->GetDexPc(),
3769                                          calling_convention);
3770}
3771
3772void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
3773  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3774}
3775
3776void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
3777  HBasicBlock* block = instruction->GetBlock();
3778  if (block->GetLoopInformation() != nullptr) {
3779    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3780    // The back edge will generate the suspend check.
3781    return;
3782  }
3783  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3784    // The goto will generate the suspend check.
3785    return;
3786  }
3787  GenerateSuspendCheck(instruction, nullptr);
3788}
3789
3790void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
3791  temp->SetLocations(nullptr);
3792}
3793
3794void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3795  // Nothing to do, this is driven by the code generator.
3796}
3797
3798void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
3799  LocationSummary* locations =
3800      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3801  InvokeRuntimeCallingConvention calling_convention;
3802  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
3803}
3804
3805void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
3806  codegen_->InvokeRuntime(
3807      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
3808  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3809}
3810
3811void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
3812  LocationSummary* locations =
3813      new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
3814  Primitive::Type input_type = conversion->GetInputType();
3815  Primitive::Type result_type = conversion->GetResultType();
3816  DCHECK_NE(input_type, result_type);
3817  if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3818      (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3819    LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3820  }
3821
3822  if (Primitive::IsFloatingPointType(input_type)) {
3823    locations->SetInAt(0, Location::RequiresFpuRegister());
3824  } else {
3825    locations->SetInAt(0, Location::RequiresRegister());
3826  }
3827
3828  if (Primitive::IsFloatingPointType(result_type)) {
3829    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3830  } else {
3831    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3832  }
3833}
3834
3835void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) {
3836  Primitive::Type result_type = conversion->GetResultType();
3837  Primitive::Type input_type = conversion->GetInputType();
3838
3839  DCHECK_NE(input_type, result_type);
3840
3841  if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3842    int result_size = Primitive::ComponentSize(result_type);
3843    int input_size = Primitive::ComponentSize(input_type);
3844    int min_size = std::min(result_size, input_size);
3845    Register output = OutputRegister(conversion);
3846    Register source = InputRegisterAt(conversion, 0);
3847    if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) {
3848      __ Ubfx(output, source, 0, result_size * kBitsPerByte);
3849    } else if (result_type == Primitive::kPrimInt && input_type == Primitive::kPrimLong) {
3850      // 'int' values are used directly as W registers, discarding the top
3851      // bits, so we don't need to sign-extend and can just perform a move.
3852      // We do not pass the `kDiscardForSameWReg` argument to force clearing the
3853      // top 32 bits of the target register. We theoretically could leave those
3854      // bits unchanged, but we would have to make sure that no code uses a
3855      // 32bit input value as a 64bit value assuming that the top 32 bits are
3856      // zero.
3857      __ Mov(output.W(), source.W());
3858    } else if ((result_type == Primitive::kPrimChar) ||
3859               ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
3860      __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
3861    } else {
3862      __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
3863    }
3864  } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3865    __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
3866  } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3867    CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3868    __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0));
3869  } else if (Primitive::IsFloatingPointType(result_type) &&
3870             Primitive::IsFloatingPointType(input_type)) {
3871    __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0));
3872  } else {
3873    LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3874                << " to " << result_type;
3875  }
3876}
3877
3878void LocationsBuilderARM64::VisitUShr(HUShr* ushr) {
3879  HandleShift(ushr);
3880}
3881
3882void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) {
3883  HandleShift(ushr);
3884}
3885
3886void LocationsBuilderARM64::VisitXor(HXor* instruction) {
3887  HandleBinaryOp(instruction);
3888}
3889
3890void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
3891  HandleBinaryOp(instruction);
3892}
3893
3894void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3895  // Nothing to do, this should be removed during prepare for register allocator.
3896  LOG(FATAL) << "Unreachable";
3897}
3898
3899void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3900  // Nothing to do, this should be removed during prepare for register allocator.
3901  LOG(FATAL) << "Unreachable";
3902}
3903
3904void LocationsBuilderARM64::VisitFakeString(HFakeString* instruction) {
3905  DCHECK(codegen_->IsBaseline());
3906  LocationSummary* locations =
3907      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3908  locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3909}
3910
3911void InstructionCodeGeneratorARM64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3912  DCHECK(codegen_->IsBaseline());
3913  // Will be generated at use site.
3914}
3915
3916// Simple implementation of packed switch - generate cascaded compare/jumps.
3917void LocationsBuilderARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3918  LocationSummary* locations =
3919      new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
3920  locations->SetInAt(0, Location::RequiresRegister());
3921}
3922
3923void InstructionCodeGeneratorARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3924  int32_t lower_bound = switch_instr->GetStartValue();
3925  uint32_t num_entries = switch_instr->GetNumEntries();
3926  Register value_reg = InputRegisterAt(switch_instr, 0);
3927  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
3928
3929  // Roughly set 16 as max average assemblies generated per HIR in a graph.
3930  static constexpr int32_t kMaxExpectedSizePerHInstruction = 16 * vixl::kInstructionSize;
3931  // ADR has a limited range(+/-1MB), so we set a threshold for the number of HIRs in the graph to
3932  // make sure we don't emit it if the target may run out of range.
3933  // TODO: Instead of emitting all jump tables at the end of the code, we could keep track of ADR
3934  // ranges and emit the tables only as required.
3935  static constexpr int32_t kJumpTableInstructionThreshold = 1* MB / kMaxExpectedSizePerHInstruction;
3936
3937  if (num_entries < kPackedSwitchJumpTableThreshold ||
3938      // Current instruction id is an upper bound of the number of HIRs in the graph.
3939      GetGraph()->GetCurrentInstructionId() > kJumpTableInstructionThreshold) {
3940    // Create a series of compare/jumps.
3941    const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
3942    for (uint32_t i = 0; i < num_entries; i++) {
3943      int32_t case_value = lower_bound + i;
3944      vixl::Label* succ = codegen_->GetLabelOf(successors[i]);
3945      if (case_value == 0) {
3946        __ Cbz(value_reg, succ);
3947      } else {
3948        __ Cmp(value_reg, Operand(case_value));
3949        __ B(eq, succ);
3950      }
3951    }
3952
3953    // And the default for any other value.
3954    if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
3955      __ B(codegen_->GetLabelOf(default_block));
3956    }
3957  } else {
3958    JumpTableARM64* jump_table = new (GetGraph()->GetArena()) JumpTableARM64(switch_instr);
3959    codegen_->AddJumpTable(jump_table);
3960
3961    UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
3962
3963    // Below instructions should use at most one blocked register. Since there are two blocked
3964    // registers, we are free to block one.
3965    Register temp_w = temps.AcquireW();
3966    Register index;
3967    // Remove the bias.
3968    if (lower_bound != 0) {
3969      index = temp_w;
3970      __ Sub(index, value_reg, Operand(lower_bound));
3971    } else {
3972      index = value_reg;
3973    }
3974
3975    // Jump to default block if index is out of the range.
3976    __ Cmp(index, Operand(num_entries));
3977    __ B(hs, codegen_->GetLabelOf(default_block));
3978
3979    // In current VIXL implementation, it won't require any blocked registers to encode the
3980    // immediate value for Adr. So we are free to use both VIXL blocked registers to reduce the
3981    // register pressure.
3982    Register table_base = temps.AcquireX();
3983    // Load jump offset from the table.
3984    __ Adr(table_base, jump_table->GetTableStartLabel());
3985    Register jump_offset = temp_w;
3986    __ Ldr(jump_offset, MemOperand(table_base, index, UXTW, 2));
3987
3988    // Jump to target block by branching to table_base(pc related) + offset.
3989    Register target_address = table_base;
3990    __ Add(target_address, table_base, Operand(jump_offset, SXTW));
3991    __ Br(target_address);
3992  }
3993}
3994
3995#undef __
3996#undef QUICK_ENTRY_POINT
3997
3998}  // namespace arm64
3999}  // namespace art
4000