code_generator_arm64.cc revision 85c7bab43d11180d552179c506c2ffdf34dd749c
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm64.h"
18
19#include "arch/arm64/instruction_set_features_arm64.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "common_arm64.h"
23#include "compiled_method.h"
24#include "entrypoints/quick/quick_entrypoints.h"
25#include "entrypoints/quick/quick_entrypoints_enum.h"
26#include "gc/accounting/card_table.h"
27#include "intrinsics.h"
28#include "intrinsics_arm64.h"
29#include "mirror/array-inl.h"
30#include "mirror/class-inl.h"
31#include "offsets.h"
32#include "thread.h"
33#include "utils/arm64/assembler_arm64.h"
34#include "utils/assembler.h"
35#include "utils/stack_checks.h"
36
37
38using namespace vixl;   // NOLINT(build/namespaces)
39
40#ifdef __
41#error "ARM64 Codegen VIXL macro-assembler macro already defined."
42#endif
43
44namespace art {
45
46namespace arm64 {
47
48using helpers::CPURegisterFrom;
49using helpers::DRegisterFrom;
50using helpers::FPRegisterFrom;
51using helpers::HeapOperand;
52using helpers::HeapOperandFrom;
53using helpers::InputCPURegisterAt;
54using helpers::InputFPRegisterAt;
55using helpers::InputRegisterAt;
56using helpers::InputOperandAt;
57using helpers::Int64ConstantFrom;
58using helpers::LocationFrom;
59using helpers::OperandFromMemOperand;
60using helpers::OutputCPURegister;
61using helpers::OutputFPRegister;
62using helpers::OutputRegister;
63using helpers::RegisterFrom;
64using helpers::StackOperandFrom;
65using helpers::VIXLRegCodeFromART;
66using helpers::WRegisterFrom;
67using helpers::XRegisterFrom;
68using helpers::ARM64EncodableConstantOrRegister;
69using helpers::ArtVixlRegCodeCoherentForRegSet;
70
71static constexpr int kCurrentMethodStackOffset = 0;
72
73inline Condition ARM64Condition(IfCondition cond) {
74  switch (cond) {
75    case kCondEQ: return eq;
76    case kCondNE: return ne;
77    case kCondLT: return lt;
78    case kCondLE: return le;
79    case kCondGT: return gt;
80    case kCondGE: return ge;
81  }
82  LOG(FATAL) << "Unreachable";
83  UNREACHABLE();
84}
85
86Location ARM64ReturnLocation(Primitive::Type return_type) {
87  // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
88  // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
89  // but we use the exact registers for clarity.
90  if (return_type == Primitive::kPrimFloat) {
91    return LocationFrom(s0);
92  } else if (return_type == Primitive::kPrimDouble) {
93    return LocationFrom(d0);
94  } else if (return_type == Primitive::kPrimLong) {
95    return LocationFrom(x0);
96  } else if (return_type == Primitive::kPrimVoid) {
97    return Location::NoLocation();
98  } else {
99    return LocationFrom(w0);
100  }
101}
102
103Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
104  return ARM64ReturnLocation(return_type);
105}
106
107#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
108#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
109
110// Calculate memory accessing operand for save/restore live registers.
111static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
112                                           RegisterSet* register_set,
113                                           int64_t spill_offset,
114                                           bool is_save) {
115  DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(),
116                                         codegen->GetNumberOfCoreRegisters(),
117                                         register_set->GetFloatingPointRegisters(),
118                                         codegen->GetNumberOfFloatingPointRegisters()));
119
120  CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
121      register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
122  CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
123      register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
124
125  MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
126  UseScratchRegisterScope temps(masm);
127
128  Register base = masm->StackPointer();
129  int64_t core_spill_size = core_list.TotalSizeInBytes();
130  int64_t fp_spill_size = fp_list.TotalSizeInBytes();
131  int64_t reg_size = kXRegSizeInBytes;
132  int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size;
133  uint32_t ls_access_size = WhichPowerOf2(reg_size);
134  if (((core_list.Count() > 1) || (fp_list.Count() > 1)) &&
135      !masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) {
136    // If the offset does not fit in the instruction's immediate field, use an alternate register
137    // to compute the base address(float point registers spill base address).
138    Register new_base = temps.AcquireSameSizeAs(base);
139    __ Add(new_base, base, Operand(spill_offset + core_spill_size));
140    base = new_base;
141    spill_offset = -core_spill_size;
142    int64_t new_max_ls_pair_offset = fp_spill_size - 2 * reg_size;
143    DCHECK(masm->IsImmLSPair(spill_offset, ls_access_size));
144    DCHECK(masm->IsImmLSPair(new_max_ls_pair_offset, ls_access_size));
145  }
146
147  if (is_save) {
148    __ StoreCPURegList(core_list, MemOperand(base, spill_offset));
149    __ StoreCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
150  } else {
151    __ LoadCPURegList(core_list, MemOperand(base, spill_offset));
152    __ LoadCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
153  }
154}
155
156void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
157  RegisterSet* register_set = locations->GetLiveRegisters();
158  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
159  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
160    if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) {
161      // If the register holds an object, update the stack mask.
162      if (locations->RegisterContainsObject(i)) {
163        locations->SetStackBit(stack_offset / kVRegSize);
164      }
165      DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
166      DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
167      saved_core_stack_offsets_[i] = stack_offset;
168      stack_offset += kXRegSizeInBytes;
169    }
170  }
171
172  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
173    if (!codegen->IsFloatingPointCalleeSaveRegister(i) &&
174        register_set->ContainsFloatingPointRegister(i)) {
175      DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
176      DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
177      saved_fpu_stack_offsets_[i] = stack_offset;
178      stack_offset += kDRegSizeInBytes;
179    }
180  }
181
182  SaveRestoreLiveRegistersHelper(codegen, register_set,
183                                 codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
184}
185
186void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
187  RegisterSet* register_set = locations->GetLiveRegisters();
188  SaveRestoreLiveRegistersHelper(codegen, register_set,
189                                 codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
190}
191
192class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
193 public:
194  explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : instruction_(instruction) {}
195
196  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
197    LocationSummary* locations = instruction_->GetLocations();
198    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
199
200    __ Bind(GetEntryLabel());
201    if (instruction_->CanThrowIntoCatchBlock()) {
202      // Live registers will be restored in the catch block if caught.
203      SaveLiveRegisters(codegen, instruction_->GetLocations());
204    }
205    // We're moving two locations to locations that could overlap, so we need a parallel
206    // move resolver.
207    InvokeRuntimeCallingConvention calling_convention;
208    codegen->EmitParallelMoves(
209        locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
210        locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
211    arm64_codegen->InvokeRuntime(
212        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
213    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
214  }
215
216  bool IsFatal() const OVERRIDE { return true; }
217
218  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
219
220 private:
221  HBoundsCheck* const instruction_;
222
223  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
224};
225
226class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
227 public:
228  explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {}
229
230  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
231    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
232    __ Bind(GetEntryLabel());
233    if (instruction_->CanThrowIntoCatchBlock()) {
234      // Live registers will be restored in the catch block if caught.
235      SaveLiveRegisters(codegen, instruction_->GetLocations());
236    }
237    arm64_codegen->InvokeRuntime(
238        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
239    CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
240  }
241
242  bool IsFatal() const OVERRIDE { return true; }
243
244  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
245
246 private:
247  HDivZeroCheck* const instruction_;
248  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
249};
250
251class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
252 public:
253  LoadClassSlowPathARM64(HLoadClass* cls,
254                         HInstruction* at,
255                         uint32_t dex_pc,
256                         bool do_clinit)
257      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
258    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
259  }
260
261  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
262    LocationSummary* locations = at_->GetLocations();
263    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
264
265    __ Bind(GetEntryLabel());
266    SaveLiveRegisters(codegen, locations);
267
268    InvokeRuntimeCallingConvention calling_convention;
269    __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
270    int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
271                                            : QUICK_ENTRY_POINT(pInitializeType);
272    arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
273    if (do_clinit_) {
274      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
275    } else {
276      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
277    }
278
279    // Move the class to the desired location.
280    Location out = locations->Out();
281    if (out.IsValid()) {
282      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
283      Primitive::Type type = at_->GetType();
284      arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
285    }
286
287    RestoreLiveRegisters(codegen, locations);
288    __ B(GetExitLabel());
289  }
290
291  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM64"; }
292
293 private:
294  // The class this slow path will load.
295  HLoadClass* const cls_;
296
297  // The instruction where this slow path is happening.
298  // (Might be the load class or an initialization check).
299  HInstruction* const at_;
300
301  // The dex PC of `at_`.
302  const uint32_t dex_pc_;
303
304  // Whether to initialize the class.
305  const bool do_clinit_;
306
307  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
308};
309
310class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
311 public:
312  explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {}
313
314  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
315    LocationSummary* locations = instruction_->GetLocations();
316    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
317    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
318
319    __ Bind(GetEntryLabel());
320    SaveLiveRegisters(codegen, locations);
321
322    InvokeRuntimeCallingConvention calling_convention;
323    __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex());
324    arm64_codegen->InvokeRuntime(
325        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
326    CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
327    Primitive::Type type = instruction_->GetType();
328    arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
329
330    RestoreLiveRegisters(codegen, locations);
331    __ B(GetExitLabel());
332  }
333
334  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
335
336 private:
337  HLoadString* const instruction_;
338
339  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
340};
341
342class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
343 public:
344  explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
345
346  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
347    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
348    __ Bind(GetEntryLabel());
349    if (instruction_->CanThrowIntoCatchBlock()) {
350      // Live registers will be restored in the catch block if caught.
351      SaveLiveRegisters(codegen, instruction_->GetLocations());
352    }
353    arm64_codegen->InvokeRuntime(
354        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
355    CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
356  }
357
358  bool IsFatal() const OVERRIDE { return true; }
359
360  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
361
362 private:
363  HNullCheck* const instruction_;
364
365  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
366};
367
368class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
369 public:
370  SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor)
371      : instruction_(instruction), successor_(successor) {}
372
373  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
374    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
375    __ Bind(GetEntryLabel());
376    SaveLiveRegisters(codegen, instruction_->GetLocations());
377    arm64_codegen->InvokeRuntime(
378        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
379    CheckEntrypointTypes<kQuickTestSuspend, void, void>();
380    RestoreLiveRegisters(codegen, instruction_->GetLocations());
381    if (successor_ == nullptr) {
382      __ B(GetReturnLabel());
383    } else {
384      __ B(arm64_codegen->GetLabelOf(successor_));
385    }
386  }
387
388  vixl::Label* GetReturnLabel() {
389    DCHECK(successor_ == nullptr);
390    return &return_label_;
391  }
392
393  HBasicBlock* GetSuccessor() const {
394    return successor_;
395  }
396
397  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; }
398
399 private:
400  HSuspendCheck* const instruction_;
401  // If not null, the block to branch to after the suspend check.
402  HBasicBlock* const successor_;
403
404  // If `successor_` is null, the label to branch to after the suspend check.
405  vixl::Label return_label_;
406
407  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
408};
409
410class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
411 public:
412  TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal)
413      : instruction_(instruction), is_fatal_(is_fatal) {}
414
415  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
416    LocationSummary* locations = instruction_->GetLocations();
417    Location class_to_check = locations->InAt(1);
418    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
419                                                        : locations->Out();
420    DCHECK(instruction_->IsCheckCast()
421           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
422    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
423    uint32_t dex_pc = instruction_->GetDexPc();
424
425    __ Bind(GetEntryLabel());
426
427    if (instruction_->IsCheckCast()) {
428      // The codegen for the instruction overwrites `temp`, so put it back in place.
429      Register obj = InputRegisterAt(instruction_, 0);
430      Register temp = WRegisterFrom(locations->GetTemp(0));
431      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
432      __ Ldr(temp, HeapOperand(obj, class_offset));
433      arm64_codegen->GetAssembler()->MaybeUnpoisonHeapReference(temp);
434    }
435
436    if (!is_fatal_) {
437      SaveLiveRegisters(codegen, locations);
438    }
439
440    // We're moving two locations to locations that could overlap, so we need a parallel
441    // move resolver.
442    InvokeRuntimeCallingConvention calling_convention;
443    codegen->EmitParallelMoves(
444        class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
445        object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
446
447    if (instruction_->IsInstanceOf()) {
448      arm64_codegen->InvokeRuntime(
449          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc, this);
450      Primitive::Type ret_type = instruction_->GetType();
451      Location ret_loc = calling_convention.GetReturnLocation(ret_type);
452      arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
453      CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t,
454                           const mirror::Class*, const mirror::Class*>();
455    } else {
456      DCHECK(instruction_->IsCheckCast());
457      arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
458      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
459    }
460
461    if (!is_fatal_) {
462      RestoreLiveRegisters(codegen, locations);
463      __ B(GetExitLabel());
464    }
465  }
466
467  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
468  bool IsFatal() const { return is_fatal_; }
469
470 private:
471  HInstruction* const instruction_;
472  const bool is_fatal_;
473
474  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
475};
476
477class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
478 public:
479  explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
480    : instruction_(instruction) {}
481
482  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
483    __ Bind(GetEntryLabel());
484    SaveLiveRegisters(codegen, instruction_->GetLocations());
485    DCHECK(instruction_->IsDeoptimize());
486    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
487    uint32_t dex_pc = deoptimize->GetDexPc();
488    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
489    arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
490  }
491
492  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
493
494 private:
495  HInstruction* const instruction_;
496  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
497};
498
499#undef __
500
501Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type type) {
502  Location next_location;
503  if (type == Primitive::kPrimVoid) {
504    LOG(FATAL) << "Unreachable type " << type;
505  }
506
507  if (Primitive::IsFloatingPointType(type) &&
508      (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
509    next_location = LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++));
510  } else if (!Primitive::IsFloatingPointType(type) &&
511             (gp_index_ < calling_convention.GetNumberOfRegisters())) {
512    next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
513  } else {
514    size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
515    next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
516                                                 : Location::StackSlot(stack_offset);
517  }
518
519  // Space on the stack is reserved for all arguments.
520  stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
521  return next_location;
522}
523
524Location InvokeDexCallingConventionVisitorARM64::GetMethodLocation() const {
525  return LocationFrom(kArtMethodRegister);
526}
527
528CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
529                                       const Arm64InstructionSetFeatures& isa_features,
530                                       const CompilerOptions& compiler_options,
531                                       OptimizingCompilerStats* stats)
532    : CodeGenerator(graph,
533                    kNumberOfAllocatableRegisters,
534                    kNumberOfAllocatableFPRegisters,
535                    kNumberOfAllocatableRegisterPairs,
536                    callee_saved_core_registers.list(),
537                    callee_saved_fp_registers.list(),
538                    compiler_options,
539                    stats),
540      block_labels_(nullptr),
541      location_builder_(graph, this),
542      instruction_visitor_(graph, this),
543      move_resolver_(graph->GetArena(), this),
544      isa_features_(isa_features),
545      uint64_literals_(std::less<uint64_t>(), graph->GetArena()->Adapter()),
546      method_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
547      call_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
548      relative_call_patches_(graph->GetArena()->Adapter()),
549      pc_rel_dex_cache_patches_(graph->GetArena()->Adapter()) {
550  // Save the link register (containing the return address) to mimic Quick.
551  AddAllocatedRegister(LocationFrom(lr));
552}
553
554#undef __
555#define __ GetVIXLAssembler()->
556
557void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
558  // Ensure we emit the literal pool.
559  __ FinalizeCode();
560
561  CodeGenerator::Finalize(allocator);
562}
563
564void ParallelMoveResolverARM64::PrepareForEmitNativeCode() {
565  // Note: There are 6 kinds of moves:
566  // 1. constant -> GPR/FPR (non-cycle)
567  // 2. constant -> stack (non-cycle)
568  // 3. GPR/FPR -> GPR/FPR
569  // 4. GPR/FPR -> stack
570  // 5. stack -> GPR/FPR
571  // 6. stack -> stack (non-cycle)
572  // Case 1, 2 and 6 should never be included in a dependency cycle on ARM64. For case 3, 4, and 5
573  // VIXL uses at most 1 GPR. VIXL has 2 GPR and 1 FPR temps, and there should be no intersecting
574  // cycles on ARM64, so we always have 1 GPR and 1 FPR available VIXL temps to resolve the
575  // dependency.
576  vixl_temps_.Open(GetVIXLAssembler());
577}
578
579void ParallelMoveResolverARM64::FinishEmitNativeCode() {
580  vixl_temps_.Close();
581}
582
583Location ParallelMoveResolverARM64::AllocateScratchLocationFor(Location::Kind kind) {
584  DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister ||
585         kind == Location::kStackSlot || kind == Location::kDoubleStackSlot);
586  kind = (kind == Location::kFpuRegister) ? Location::kFpuRegister : Location::kRegister;
587  Location scratch = GetScratchLocation(kind);
588  if (!scratch.Equals(Location::NoLocation())) {
589    return scratch;
590  }
591  // Allocate from VIXL temp registers.
592  if (kind == Location::kRegister) {
593    scratch = LocationFrom(vixl_temps_.AcquireX());
594  } else {
595    DCHECK(kind == Location::kFpuRegister);
596    scratch = LocationFrom(vixl_temps_.AcquireD());
597  }
598  AddScratchLocation(scratch);
599  return scratch;
600}
601
602void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
603  if (loc.IsRegister()) {
604    vixl_temps_.Release(XRegisterFrom(loc));
605  } else {
606    DCHECK(loc.IsFpuRegister());
607    vixl_temps_.Release(DRegisterFrom(loc));
608  }
609  RemoveScratchLocation(loc);
610}
611
612void ParallelMoveResolverARM64::EmitMove(size_t index) {
613  MoveOperands* move = moves_.Get(index);
614  codegen_->MoveLocation(move->GetDestination(), move->GetSource());
615}
616
617void CodeGeneratorARM64::GenerateFrameEntry() {
618  MacroAssembler* masm = GetVIXLAssembler();
619  BlockPoolsScope block_pools(masm);
620  __ Bind(&frame_entry_label_);
621
622  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
623  if (do_overflow_check) {
624    UseScratchRegisterScope temps(masm);
625    Register temp = temps.AcquireX();
626    DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
627    __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
628    __ Ldr(wzr, MemOperand(temp, 0));
629    RecordPcInfo(nullptr, 0);
630  }
631
632  if (!HasEmptyFrame()) {
633    int frame_size = GetFrameSize();
634    // Stack layout:
635    //      sp[frame_size - 8]        : lr.
636    //      ...                       : other preserved core registers.
637    //      ...                       : other preserved fp registers.
638    //      ...                       : reserved frame space.
639    //      sp[0]                     : current method.
640    __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
641    GetAssembler()->cfi().AdjustCFAOffset(frame_size);
642    GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
643        frame_size - GetCoreSpillSize());
644    GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
645        frame_size - FrameEntrySpillSize());
646  }
647}
648
649void CodeGeneratorARM64::GenerateFrameExit() {
650  BlockPoolsScope block_pools(GetVIXLAssembler());
651  GetAssembler()->cfi().RememberState();
652  if (!HasEmptyFrame()) {
653    int frame_size = GetFrameSize();
654    GetAssembler()->UnspillRegisters(GetFramePreservedFPRegisters(),
655        frame_size - FrameEntrySpillSize());
656    GetAssembler()->UnspillRegisters(GetFramePreservedCoreRegisters(),
657        frame_size - GetCoreSpillSize());
658    __ Drop(frame_size);
659    GetAssembler()->cfi().AdjustCFAOffset(-frame_size);
660  }
661  __ Ret();
662  GetAssembler()->cfi().RestoreState();
663  GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
664}
665
666vixl::CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
667  DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0));
668  return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
669                          core_spill_mask_);
670}
671
672vixl::CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
673  DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
674                                         GetNumberOfFloatingPointRegisters()));
675  return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
676                          fpu_spill_mask_);
677}
678
679void CodeGeneratorARM64::Bind(HBasicBlock* block) {
680  __ Bind(GetLabelOf(block));
681}
682
683void CodeGeneratorARM64::Move(HInstruction* instruction,
684                              Location location,
685                              HInstruction* move_for) {
686  LocationSummary* locations = instruction->GetLocations();
687  Primitive::Type type = instruction->GetType();
688  DCHECK_NE(type, Primitive::kPrimVoid);
689
690  if (instruction->IsFakeString()) {
691    // The fake string is an alias for null.
692    DCHECK(IsBaseline());
693    instruction = locations->Out().GetConstant();
694    DCHECK(instruction->IsNullConstant()) << instruction->DebugName();
695  }
696
697  if (instruction->IsCurrentMethod()) {
698    MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset));
699  } else if (locations != nullptr && locations->Out().Equals(location)) {
700    return;
701  } else if (instruction->IsIntConstant()
702             || instruction->IsLongConstant()
703             || instruction->IsNullConstant()) {
704    int64_t value = GetInt64ValueOf(instruction->AsConstant());
705    if (location.IsRegister()) {
706      Register dst = RegisterFrom(location, type);
707      DCHECK(((instruction->IsIntConstant() || instruction->IsNullConstant()) && dst.Is32Bits()) ||
708             (instruction->IsLongConstant() && dst.Is64Bits()));
709      __ Mov(dst, value);
710    } else {
711      DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
712      UseScratchRegisterScope temps(GetVIXLAssembler());
713      Register temp = (instruction->IsIntConstant() || instruction->IsNullConstant())
714          ? temps.AcquireW()
715          : temps.AcquireX();
716      __ Mov(temp, value);
717      __ Str(temp, StackOperandFrom(location));
718    }
719  } else if (instruction->IsTemporary()) {
720    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
721    MoveLocation(location, temp_location, type);
722  } else if (instruction->IsLoadLocal()) {
723    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
724    if (Primitive::Is64BitType(type)) {
725      MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
726    } else {
727      MoveLocation(location, Location::StackSlot(stack_slot), type);
728    }
729
730  } else {
731    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
732    MoveLocation(location, locations->Out(), type);
733  }
734}
735
736void CodeGeneratorARM64::MoveConstant(Location location, int32_t value) {
737  DCHECK(location.IsRegister());
738  __ Mov(RegisterFrom(location, Primitive::kPrimInt), value);
739}
740
741Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
742  Primitive::Type type = load->GetType();
743
744  switch (type) {
745    case Primitive::kPrimNot:
746    case Primitive::kPrimInt:
747    case Primitive::kPrimFloat:
748      return Location::StackSlot(GetStackSlot(load->GetLocal()));
749
750    case Primitive::kPrimLong:
751    case Primitive::kPrimDouble:
752      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
753
754    case Primitive::kPrimBoolean:
755    case Primitive::kPrimByte:
756    case Primitive::kPrimChar:
757    case Primitive::kPrimShort:
758    case Primitive::kPrimVoid:
759      LOG(FATAL) << "Unexpected type " << type;
760  }
761
762  LOG(FATAL) << "Unreachable";
763  return Location::NoLocation();
764}
765
766void CodeGeneratorARM64::MarkGCCard(Register object, Register value, bool value_can_be_null) {
767  UseScratchRegisterScope temps(GetVIXLAssembler());
768  Register card = temps.AcquireX();
769  Register temp = temps.AcquireW();   // Index within the CardTable - 32bit.
770  vixl::Label done;
771  if (value_can_be_null) {
772    __ Cbz(value, &done);
773  }
774  __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
775  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
776  __ Strb(card, MemOperand(card, temp.X()));
777  if (value_can_be_null) {
778    __ Bind(&done);
779  }
780}
781
782void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const {
783  // Blocked core registers:
784  //      lr        : Runtime reserved.
785  //      tr        : Runtime reserved.
786  //      xSuspend  : Runtime reserved. TODO: Unblock this when the runtime stops using it.
787  //      ip1       : VIXL core temp.
788  //      ip0       : VIXL core temp.
789  //
790  // Blocked fp registers:
791  //      d31       : VIXL fp temp.
792  CPURegList reserved_core_registers = vixl_reserved_core_registers;
793  reserved_core_registers.Combine(runtime_reserved_core_registers);
794  while (!reserved_core_registers.IsEmpty()) {
795    blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
796  }
797
798  CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
799  while (!reserved_fp_registers.IsEmpty()) {
800    blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
801  }
802
803  if (is_baseline) {
804    CPURegList reserved_core_baseline_registers = callee_saved_core_registers;
805    while (!reserved_core_baseline_registers.IsEmpty()) {
806      blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true;
807    }
808
809    CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers;
810    while (!reserved_fp_baseline_registers.IsEmpty()) {
811      blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true;
812    }
813  }
814}
815
816Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
817  if (type == Primitive::kPrimVoid) {
818    LOG(FATAL) << "Unreachable type " << type;
819  }
820
821  if (Primitive::IsFloatingPointType(type)) {
822    ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
823    DCHECK_NE(reg, -1);
824    return Location::FpuRegisterLocation(reg);
825  } else {
826    ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
827    DCHECK_NE(reg, -1);
828    return Location::RegisterLocation(reg);
829  }
830}
831
832size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
833  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
834  __ Str(reg, MemOperand(sp, stack_index));
835  return kArm64WordSize;
836}
837
838size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
839  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
840  __ Ldr(reg, MemOperand(sp, stack_index));
841  return kArm64WordSize;
842}
843
844size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
845  FPRegister reg = FPRegister(reg_id, kDRegSize);
846  __ Str(reg, MemOperand(sp, stack_index));
847  return kArm64WordSize;
848}
849
850size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
851  FPRegister reg = FPRegister(reg_id, kDRegSize);
852  __ Ldr(reg, MemOperand(sp, stack_index));
853  return kArm64WordSize;
854}
855
856void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
857  stream << XRegister(reg);
858}
859
860void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
861  stream << DRegister(reg);
862}
863
864void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
865  if (constant->IsIntConstant()) {
866    __ Mov(Register(destination), constant->AsIntConstant()->GetValue());
867  } else if (constant->IsLongConstant()) {
868    __ Mov(Register(destination), constant->AsLongConstant()->GetValue());
869  } else if (constant->IsNullConstant()) {
870    __ Mov(Register(destination), 0);
871  } else if (constant->IsFloatConstant()) {
872    __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
873  } else {
874    DCHECK(constant->IsDoubleConstant());
875    __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
876  }
877}
878
879
880static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
881  DCHECK(constant.IsConstant());
882  HConstant* cst = constant.GetConstant();
883  return (cst->IsIntConstant() && type == Primitive::kPrimInt) ||
884         // Null is mapped to a core W register, which we associate with kPrimInt.
885         (cst->IsNullConstant() && type == Primitive::kPrimInt) ||
886         (cst->IsLongConstant() && type == Primitive::kPrimLong) ||
887         (cst->IsFloatConstant() && type == Primitive::kPrimFloat) ||
888         (cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
889}
890
891void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
892  if (source.Equals(destination)) {
893    return;
894  }
895
896  // A valid move can always be inferred from the destination and source
897  // locations. When moving from and to a register, the argument type can be
898  // used to generate 32bit instead of 64bit moves. In debug mode we also
899  // checks the coherency of the locations and the type.
900  bool unspecified_type = (type == Primitive::kPrimVoid);
901
902  if (destination.IsRegister() || destination.IsFpuRegister()) {
903    if (unspecified_type) {
904      HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
905      if (source.IsStackSlot() ||
906          (src_cst != nullptr && (src_cst->IsIntConstant()
907                                  || src_cst->IsFloatConstant()
908                                  || src_cst->IsNullConstant()))) {
909        // For stack slots and 32bit constants, a 64bit type is appropriate.
910        type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
911      } else {
912        // If the source is a double stack slot or a 64bit constant, a 64bit
913        // type is appropriate. Else the source is a register, and since the
914        // type has not been specified, we chose a 64bit type to force a 64bit
915        // move.
916        type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
917      }
918    }
919    DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
920           (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
921    CPURegister dst = CPURegisterFrom(destination, type);
922    if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
923      DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
924      __ Ldr(dst, StackOperandFrom(source));
925    } else if (source.IsConstant()) {
926      DCHECK(CoherentConstantAndType(source, type));
927      MoveConstant(dst, source.GetConstant());
928    } else {
929      if (destination.IsRegister()) {
930        __ Mov(Register(dst), RegisterFrom(source, type));
931      } else {
932        DCHECK(destination.IsFpuRegister());
933        __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
934      }
935    }
936  } else {  // The destination is not a register. It must be a stack slot.
937    DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
938    if (source.IsRegister() || source.IsFpuRegister()) {
939      if (unspecified_type) {
940        if (source.IsRegister()) {
941          type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
942        } else {
943          type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
944        }
945      }
946      DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
947             (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
948      __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
949    } else if (source.IsConstant()) {
950      DCHECK(unspecified_type || CoherentConstantAndType(source, type)) << source << " " << type;
951      UseScratchRegisterScope temps(GetVIXLAssembler());
952      HConstant* src_cst = source.GetConstant();
953      CPURegister temp;
954      if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) {
955        temp = temps.AcquireW();
956      } else if (src_cst->IsLongConstant()) {
957        temp = temps.AcquireX();
958      } else if (src_cst->IsFloatConstant()) {
959        temp = temps.AcquireS();
960      } else {
961        DCHECK(src_cst->IsDoubleConstant());
962        temp = temps.AcquireD();
963      }
964      MoveConstant(temp, src_cst);
965      __ Str(temp, StackOperandFrom(destination));
966    } else {
967      DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
968      DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
969      UseScratchRegisterScope temps(GetVIXLAssembler());
970      // There is generally less pressure on FP registers.
971      FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
972      __ Ldr(temp, StackOperandFrom(source));
973      __ Str(temp, StackOperandFrom(destination));
974    }
975  }
976}
977
978void CodeGeneratorARM64::Load(Primitive::Type type,
979                              CPURegister dst,
980                              const MemOperand& src) {
981  switch (type) {
982    case Primitive::kPrimBoolean:
983      __ Ldrb(Register(dst), src);
984      break;
985    case Primitive::kPrimByte:
986      __ Ldrsb(Register(dst), src);
987      break;
988    case Primitive::kPrimShort:
989      __ Ldrsh(Register(dst), src);
990      break;
991    case Primitive::kPrimChar:
992      __ Ldrh(Register(dst), src);
993      break;
994    case Primitive::kPrimInt:
995    case Primitive::kPrimNot:
996    case Primitive::kPrimLong:
997    case Primitive::kPrimFloat:
998    case Primitive::kPrimDouble:
999      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1000      __ Ldr(dst, src);
1001      break;
1002    case Primitive::kPrimVoid:
1003      LOG(FATAL) << "Unreachable type " << type;
1004  }
1005}
1006
1007void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
1008                                     CPURegister dst,
1009                                     const MemOperand& src) {
1010  MacroAssembler* masm = GetVIXLAssembler();
1011  BlockPoolsScope block_pools(masm);
1012  UseScratchRegisterScope temps(masm);
1013  Register temp_base = temps.AcquireX();
1014  Primitive::Type type = instruction->GetType();
1015
1016  DCHECK(!src.IsPreIndex());
1017  DCHECK(!src.IsPostIndex());
1018
1019  // TODO(vixl): Let the MacroAssembler handle MemOperand.
1020  __ Add(temp_base, src.base(), OperandFromMemOperand(src));
1021  MemOperand base = MemOperand(temp_base);
1022  switch (type) {
1023    case Primitive::kPrimBoolean:
1024      __ Ldarb(Register(dst), base);
1025      MaybeRecordImplicitNullCheck(instruction);
1026      break;
1027    case Primitive::kPrimByte:
1028      __ Ldarb(Register(dst), base);
1029      MaybeRecordImplicitNullCheck(instruction);
1030      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
1031      break;
1032    case Primitive::kPrimChar:
1033      __ Ldarh(Register(dst), base);
1034      MaybeRecordImplicitNullCheck(instruction);
1035      break;
1036    case Primitive::kPrimShort:
1037      __ Ldarh(Register(dst), base);
1038      MaybeRecordImplicitNullCheck(instruction);
1039      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
1040      break;
1041    case Primitive::kPrimInt:
1042    case Primitive::kPrimNot:
1043    case Primitive::kPrimLong:
1044      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1045      __ Ldar(Register(dst), base);
1046      MaybeRecordImplicitNullCheck(instruction);
1047      break;
1048    case Primitive::kPrimFloat:
1049    case Primitive::kPrimDouble: {
1050      DCHECK(dst.IsFPRegister());
1051      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1052
1053      Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1054      __ Ldar(temp, base);
1055      MaybeRecordImplicitNullCheck(instruction);
1056      __ Fmov(FPRegister(dst), temp);
1057      break;
1058    }
1059    case Primitive::kPrimVoid:
1060      LOG(FATAL) << "Unreachable type " << type;
1061  }
1062}
1063
1064void CodeGeneratorARM64::Store(Primitive::Type type,
1065                               CPURegister src,
1066                               const MemOperand& dst) {
1067  switch (type) {
1068    case Primitive::kPrimBoolean:
1069    case Primitive::kPrimByte:
1070      __ Strb(Register(src), dst);
1071      break;
1072    case Primitive::kPrimChar:
1073    case Primitive::kPrimShort:
1074      __ Strh(Register(src), dst);
1075      break;
1076    case Primitive::kPrimInt:
1077    case Primitive::kPrimNot:
1078    case Primitive::kPrimLong:
1079    case Primitive::kPrimFloat:
1080    case Primitive::kPrimDouble:
1081      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1082      __ Str(src, dst);
1083      break;
1084    case Primitive::kPrimVoid:
1085      LOG(FATAL) << "Unreachable type " << type;
1086  }
1087}
1088
1089void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
1090                                      CPURegister src,
1091                                      const MemOperand& dst) {
1092  UseScratchRegisterScope temps(GetVIXLAssembler());
1093  Register temp_base = temps.AcquireX();
1094
1095  DCHECK(!dst.IsPreIndex());
1096  DCHECK(!dst.IsPostIndex());
1097
1098  // TODO(vixl): Let the MacroAssembler handle this.
1099  Operand op = OperandFromMemOperand(dst);
1100  __ Add(temp_base, dst.base(), op);
1101  MemOperand base = MemOperand(temp_base);
1102  switch (type) {
1103    case Primitive::kPrimBoolean:
1104    case Primitive::kPrimByte:
1105      __ Stlrb(Register(src), base);
1106      break;
1107    case Primitive::kPrimChar:
1108    case Primitive::kPrimShort:
1109      __ Stlrh(Register(src), base);
1110      break;
1111    case Primitive::kPrimInt:
1112    case Primitive::kPrimNot:
1113    case Primitive::kPrimLong:
1114      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1115      __ Stlr(Register(src), base);
1116      break;
1117    case Primitive::kPrimFloat:
1118    case Primitive::kPrimDouble: {
1119      DCHECK(src.IsFPRegister());
1120      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1121
1122      Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1123      __ Fmov(temp, FPRegister(src));
1124      __ Stlr(temp, base);
1125      break;
1126    }
1127    case Primitive::kPrimVoid:
1128      LOG(FATAL) << "Unreachable type " << type;
1129  }
1130}
1131
1132void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint,
1133                                       HInstruction* instruction,
1134                                       uint32_t dex_pc,
1135                                       SlowPathCode* slow_path) {
1136  InvokeRuntime(GetThreadOffset<kArm64WordSize>(entrypoint).Int32Value(),
1137                instruction,
1138                dex_pc,
1139                slow_path);
1140}
1141
1142void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
1143                                       HInstruction* instruction,
1144                                       uint32_t dex_pc,
1145                                       SlowPathCode* slow_path) {
1146  ValidateInvokeRuntime(instruction, slow_path);
1147  BlockPoolsScope block_pools(GetVIXLAssembler());
1148  __ Ldr(lr, MemOperand(tr, entry_point_offset));
1149  __ Blr(lr);
1150  RecordPcInfo(instruction, dex_pc, slow_path);
1151}
1152
1153void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
1154                                                                     vixl::Register class_reg) {
1155  UseScratchRegisterScope temps(GetVIXLAssembler());
1156  Register temp = temps.AcquireW();
1157  size_t status_offset = mirror::Class::StatusOffset().SizeValue();
1158  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1159
1160  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
1161  if (use_acquire_release) {
1162    // TODO(vixl): Let the MacroAssembler handle MemOperand.
1163    __ Add(temp, class_reg, status_offset);
1164    __ Ldar(temp, HeapOperand(temp));
1165    __ Cmp(temp, mirror::Class::kStatusInitialized);
1166    __ B(lt, slow_path->GetEntryLabel());
1167  } else {
1168    __ Ldr(temp, HeapOperand(class_reg, status_offset));
1169    __ Cmp(temp, mirror::Class::kStatusInitialized);
1170    __ B(lt, slow_path->GetEntryLabel());
1171    __ Dmb(InnerShareable, BarrierReads);
1172  }
1173  __ Bind(slow_path->GetExitLabel());
1174}
1175
1176void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) {
1177  BarrierType type = BarrierAll;
1178
1179  switch (kind) {
1180    case MemBarrierKind::kAnyAny:
1181    case MemBarrierKind::kAnyStore: {
1182      type = BarrierAll;
1183      break;
1184    }
1185    case MemBarrierKind::kLoadAny: {
1186      type = BarrierReads;
1187      break;
1188    }
1189    case MemBarrierKind::kStoreStore: {
1190      type = BarrierWrites;
1191      break;
1192    }
1193    default:
1194      LOG(FATAL) << "Unexpected memory barrier " << kind;
1195  }
1196  __ Dmb(InnerShareable, type);
1197}
1198
1199void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
1200                                                         HBasicBlock* successor) {
1201  SuspendCheckSlowPathARM64* slow_path =
1202      down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
1203  if (slow_path == nullptr) {
1204    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
1205    instruction->SetSlowPath(slow_path);
1206    codegen_->AddSlowPath(slow_path);
1207    if (successor != nullptr) {
1208      DCHECK(successor->IsLoopHeader());
1209      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
1210    }
1211  } else {
1212    DCHECK_EQ(slow_path->GetSuccessor(), successor);
1213  }
1214
1215  UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
1216  Register temp = temps.AcquireW();
1217
1218  __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
1219  if (successor == nullptr) {
1220    __ Cbnz(temp, slow_path->GetEntryLabel());
1221    __ Bind(slow_path->GetReturnLabel());
1222  } else {
1223    __ Cbz(temp, codegen_->GetLabelOf(successor));
1224    __ B(slow_path->GetEntryLabel());
1225    // slow_path will return to GetLabelOf(successor).
1226  }
1227}
1228
1229InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
1230                                                             CodeGeneratorARM64* codegen)
1231      : HGraphVisitor(graph),
1232        assembler_(codegen->GetAssembler()),
1233        codegen_(codegen) {}
1234
1235#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)              \
1236  /* No unimplemented IR. */
1237
1238#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
1239
1240enum UnimplementedInstructionBreakCode {
1241  // Using a base helps identify when we hit such breakpoints.
1242  UnimplementedInstructionBreakCodeBaseCode = 0x900,
1243#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
1244  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
1245#undef ENUM_UNIMPLEMENTED_INSTRUCTION
1246};
1247
1248#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name)                               \
1249  void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) {                   \
1250    UNUSED(instr);                                                                    \
1251    __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name));                               \
1252  }                                                                                   \
1253  void LocationsBuilderARM64::Visit##name(H##name* instr) {                           \
1254    LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
1255    locations->SetOut(Location::Any());                                               \
1256  }
1257  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
1258#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
1259
1260#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
1261#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
1262
1263void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
1264  DCHECK_EQ(instr->InputCount(), 2U);
1265  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1266  Primitive::Type type = instr->GetResultType();
1267  switch (type) {
1268    case Primitive::kPrimInt:
1269    case Primitive::kPrimLong:
1270      locations->SetInAt(0, Location::RequiresRegister());
1271      locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr));
1272      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1273      break;
1274
1275    case Primitive::kPrimFloat:
1276    case Primitive::kPrimDouble:
1277      locations->SetInAt(0, Location::RequiresFpuRegister());
1278      locations->SetInAt(1, Location::RequiresFpuRegister());
1279      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1280      break;
1281
1282    default:
1283      LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
1284  }
1285}
1286
1287void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
1288  LocationSummary* locations =
1289      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1290  locations->SetInAt(0, Location::RequiresRegister());
1291  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1292    locations->SetOut(Location::RequiresFpuRegister());
1293  } else {
1294    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1295  }
1296}
1297
1298void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
1299                                                   const FieldInfo& field_info) {
1300  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
1301  Primitive::Type field_type = field_info.GetFieldType();
1302  BlockPoolsScope block_pools(GetVIXLAssembler());
1303
1304  MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
1305  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1306
1307  if (field_info.IsVolatile()) {
1308    if (use_acquire_release) {
1309      // NB: LoadAcquire will record the pc info if needed.
1310      codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
1311    } else {
1312      codegen_->Load(field_type, OutputCPURegister(instruction), field);
1313      codegen_->MaybeRecordImplicitNullCheck(instruction);
1314      // For IRIW sequential consistency kLoadAny is not sufficient.
1315      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1316    }
1317  } else {
1318    codegen_->Load(field_type, OutputCPURegister(instruction), field);
1319    codegen_->MaybeRecordImplicitNullCheck(instruction);
1320  }
1321
1322  if (field_type == Primitive::kPrimNot) {
1323    GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W());
1324  }
1325}
1326
1327void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
1328  LocationSummary* locations =
1329      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1330  locations->SetInAt(0, Location::RequiresRegister());
1331  if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
1332    locations->SetInAt(1, Location::RequiresFpuRegister());
1333  } else {
1334    locations->SetInAt(1, Location::RequiresRegister());
1335  }
1336}
1337
1338void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
1339                                                   const FieldInfo& field_info,
1340                                                   bool value_can_be_null) {
1341  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
1342  BlockPoolsScope block_pools(GetVIXLAssembler());
1343
1344  Register obj = InputRegisterAt(instruction, 0);
1345  CPURegister value = InputCPURegisterAt(instruction, 1);
1346  CPURegister source = value;
1347  Offset offset = field_info.GetFieldOffset();
1348  Primitive::Type field_type = field_info.GetFieldType();
1349  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1350
1351  {
1352    // We use a block to end the scratch scope before the write barrier, thus
1353    // freeing the temporary registers so they can be used in `MarkGCCard`.
1354    UseScratchRegisterScope temps(GetVIXLAssembler());
1355
1356    if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) {
1357      DCHECK(value.IsW());
1358      Register temp = temps.AcquireW();
1359      __ Mov(temp, value.W());
1360      GetAssembler()->PoisonHeapReference(temp.W());
1361      source = temp;
1362    }
1363
1364    if (field_info.IsVolatile()) {
1365      if (use_acquire_release) {
1366        codegen_->StoreRelease(field_type, source, HeapOperand(obj, offset));
1367        codegen_->MaybeRecordImplicitNullCheck(instruction);
1368      } else {
1369        GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
1370        codegen_->Store(field_type, source, HeapOperand(obj, offset));
1371        codegen_->MaybeRecordImplicitNullCheck(instruction);
1372        GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1373      }
1374    } else {
1375      codegen_->Store(field_type, source, HeapOperand(obj, offset));
1376      codegen_->MaybeRecordImplicitNullCheck(instruction);
1377    }
1378  }
1379
1380  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
1381    codegen_->MarkGCCard(obj, Register(value), value_can_be_null);
1382  }
1383}
1384
1385void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
1386  Primitive::Type type = instr->GetType();
1387
1388  switch (type) {
1389    case Primitive::kPrimInt:
1390    case Primitive::kPrimLong: {
1391      Register dst = OutputRegister(instr);
1392      Register lhs = InputRegisterAt(instr, 0);
1393      Operand rhs = InputOperandAt(instr, 1);
1394      if (instr->IsAdd()) {
1395        __ Add(dst, lhs, rhs);
1396      } else if (instr->IsAnd()) {
1397        __ And(dst, lhs, rhs);
1398      } else if (instr->IsOr()) {
1399        __ Orr(dst, lhs, rhs);
1400      } else if (instr->IsSub()) {
1401        __ Sub(dst, lhs, rhs);
1402      } else {
1403        DCHECK(instr->IsXor());
1404        __ Eor(dst, lhs, rhs);
1405      }
1406      break;
1407    }
1408    case Primitive::kPrimFloat:
1409    case Primitive::kPrimDouble: {
1410      FPRegister dst = OutputFPRegister(instr);
1411      FPRegister lhs = InputFPRegisterAt(instr, 0);
1412      FPRegister rhs = InputFPRegisterAt(instr, 1);
1413      if (instr->IsAdd()) {
1414        __ Fadd(dst, lhs, rhs);
1415      } else if (instr->IsSub()) {
1416        __ Fsub(dst, lhs, rhs);
1417      } else {
1418        LOG(FATAL) << "Unexpected floating-point binary operation";
1419      }
1420      break;
1421    }
1422    default:
1423      LOG(FATAL) << "Unexpected binary operation type " << type;
1424  }
1425}
1426
1427void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
1428  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1429
1430  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1431  Primitive::Type type = instr->GetResultType();
1432  switch (type) {
1433    case Primitive::kPrimInt:
1434    case Primitive::kPrimLong: {
1435      locations->SetInAt(0, Location::RequiresRegister());
1436      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1437      locations->SetOut(Location::RequiresRegister());
1438      break;
1439    }
1440    default:
1441      LOG(FATAL) << "Unexpected shift type " << type;
1442  }
1443}
1444
1445void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) {
1446  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1447
1448  Primitive::Type type = instr->GetType();
1449  switch (type) {
1450    case Primitive::kPrimInt:
1451    case Primitive::kPrimLong: {
1452      Register dst = OutputRegister(instr);
1453      Register lhs = InputRegisterAt(instr, 0);
1454      Operand rhs = InputOperandAt(instr, 1);
1455      if (rhs.IsImmediate()) {
1456        uint32_t shift_value = (type == Primitive::kPrimInt)
1457          ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue)
1458          : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue);
1459        if (instr->IsShl()) {
1460          __ Lsl(dst, lhs, shift_value);
1461        } else if (instr->IsShr()) {
1462          __ Asr(dst, lhs, shift_value);
1463        } else {
1464          __ Lsr(dst, lhs, shift_value);
1465        }
1466      } else {
1467        Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
1468
1469        if (instr->IsShl()) {
1470          __ Lsl(dst, lhs, rhs_reg);
1471        } else if (instr->IsShr()) {
1472          __ Asr(dst, lhs, rhs_reg);
1473        } else {
1474          __ Lsr(dst, lhs, rhs_reg);
1475        }
1476      }
1477      break;
1478    }
1479    default:
1480      LOG(FATAL) << "Unexpected shift operation type " << type;
1481  }
1482}
1483
1484void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
1485  HandleBinaryOp(instruction);
1486}
1487
1488void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
1489  HandleBinaryOp(instruction);
1490}
1491
1492void LocationsBuilderARM64::VisitAnd(HAnd* instruction) {
1493  HandleBinaryOp(instruction);
1494}
1495
1496void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
1497  HandleBinaryOp(instruction);
1498}
1499
1500void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
1501  LocationSummary* locations =
1502      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1503  locations->SetInAt(0, Location::RequiresRegister());
1504  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1505  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1506    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1507  } else {
1508    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1509  }
1510}
1511
1512void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
1513  LocationSummary* locations = instruction->GetLocations();
1514  Primitive::Type type = instruction->GetType();
1515  Register obj = InputRegisterAt(instruction, 0);
1516  Location index = locations->InAt(1);
1517  size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
1518  MemOperand source = HeapOperand(obj);
1519  MacroAssembler* masm = GetVIXLAssembler();
1520  UseScratchRegisterScope temps(masm);
1521  BlockPoolsScope block_pools(masm);
1522
1523  if (index.IsConstant()) {
1524    offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
1525    source = HeapOperand(obj, offset);
1526  } else {
1527    Register temp = temps.AcquireSameSizeAs(obj);
1528    __ Add(temp, obj, offset);
1529    source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
1530  }
1531
1532  codegen_->Load(type, OutputCPURegister(instruction), source);
1533  codegen_->MaybeRecordImplicitNullCheck(instruction);
1534
1535  if (type == Primitive::kPrimNot) {
1536    GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W());
1537  }
1538}
1539
1540void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
1541  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1542  locations->SetInAt(0, Location::RequiresRegister());
1543  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1544}
1545
1546void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
1547  BlockPoolsScope block_pools(GetVIXLAssembler());
1548  __ Ldr(OutputRegister(instruction),
1549         HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
1550  codegen_->MaybeRecordImplicitNullCheck(instruction);
1551}
1552
1553void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
1554  if (instruction->NeedsTypeCheck()) {
1555    LocationSummary* locations =
1556        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
1557    InvokeRuntimeCallingConvention calling_convention;
1558    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
1559    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
1560    locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
1561  } else {
1562    LocationSummary* locations =
1563        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1564    locations->SetInAt(0, Location::RequiresRegister());
1565    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1566    if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1567      locations->SetInAt(2, Location::RequiresFpuRegister());
1568    } else {
1569      locations->SetInAt(2, Location::RequiresRegister());
1570    }
1571  }
1572}
1573
1574void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
1575  Primitive::Type value_type = instruction->GetComponentType();
1576  LocationSummary* locations = instruction->GetLocations();
1577  bool needs_runtime_call = locations->WillCall();
1578
1579  if (needs_runtime_call) {
1580    // Note: if heap poisoning is enabled, pAputObject takes cares
1581    // of poisoning the reference.
1582    codegen_->InvokeRuntime(
1583        QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr);
1584    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
1585  } else {
1586    Register obj = InputRegisterAt(instruction, 0);
1587    CPURegister value = InputCPURegisterAt(instruction, 2);
1588    CPURegister source = value;
1589    Location index = locations->InAt(1);
1590    size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
1591    MemOperand destination = HeapOperand(obj);
1592    MacroAssembler* masm = GetVIXLAssembler();
1593    BlockPoolsScope block_pools(masm);
1594    {
1595      // We use a block to end the scratch scope before the write barrier, thus
1596      // freeing the temporary registers so they can be used in `MarkGCCard`.
1597      UseScratchRegisterScope temps(masm);
1598
1599      if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
1600        DCHECK(value.IsW());
1601        Register temp = temps.AcquireW();
1602        __ Mov(temp, value.W());
1603        GetAssembler()->PoisonHeapReference(temp.W());
1604        source = temp;
1605      }
1606
1607      if (index.IsConstant()) {
1608        offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
1609        destination = HeapOperand(obj, offset);
1610      } else {
1611        Register temp = temps.AcquireSameSizeAs(obj);
1612        __ Add(temp, obj, offset);
1613        destination = HeapOperand(temp,
1614                                  XRegisterFrom(index),
1615                                  LSL,
1616                                  Primitive::ComponentSizeShift(value_type));
1617      }
1618
1619      codegen_->Store(value_type, source, destination);
1620      codegen_->MaybeRecordImplicitNullCheck(instruction);
1621    }
1622    if (CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue())) {
1623      codegen_->MarkGCCard(obj, value.W(), instruction->GetValueCanBeNull());
1624    }
1625  }
1626}
1627
1628void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1629  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1630      ? LocationSummary::kCallOnSlowPath
1631      : LocationSummary::kNoCall;
1632  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
1633  locations->SetInAt(0, Location::RequiresRegister());
1634  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
1635  if (instruction->HasUses()) {
1636    locations->SetOut(Location::SameAsFirstInput());
1637  }
1638}
1639
1640void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1641  BoundsCheckSlowPathARM64* slow_path =
1642      new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction);
1643  codegen_->AddSlowPath(slow_path);
1644
1645  __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
1646  __ B(slow_path->GetEntryLabel(), hs);
1647}
1648
1649void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
1650  LocationSummary* locations =
1651      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1652  locations->SetInAt(0, Location::RequiresRegister());
1653  if (check->HasUses()) {
1654    locations->SetOut(Location::SameAsFirstInput());
1655  }
1656}
1657
1658void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
1659  // We assume the class is not null.
1660  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
1661      check->GetLoadClass(), check, check->GetDexPc(), true);
1662  codegen_->AddSlowPath(slow_path);
1663  GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
1664}
1665
1666static bool IsFloatingPointZeroConstant(HInstruction* instruction) {
1667  return (instruction->IsFloatConstant() && (instruction->AsFloatConstant()->GetValue() == 0.0f))
1668      || (instruction->IsDoubleConstant() && (instruction->AsDoubleConstant()->GetValue() == 0.0));
1669}
1670
1671void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
1672  LocationSummary* locations =
1673      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
1674  Primitive::Type in_type = compare->InputAt(0)->GetType();
1675  switch (in_type) {
1676    case Primitive::kPrimLong: {
1677      locations->SetInAt(0, Location::RequiresRegister());
1678      locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare));
1679      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1680      break;
1681    }
1682    case Primitive::kPrimFloat:
1683    case Primitive::kPrimDouble: {
1684      locations->SetInAt(0, Location::RequiresFpuRegister());
1685      locations->SetInAt(1,
1686                         IsFloatingPointZeroConstant(compare->InputAt(1))
1687                             ? Location::ConstantLocation(compare->InputAt(1)->AsConstant())
1688                             : Location::RequiresFpuRegister());
1689      locations->SetOut(Location::RequiresRegister());
1690      break;
1691    }
1692    default:
1693      LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1694  }
1695}
1696
1697void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) {
1698  Primitive::Type in_type = compare->InputAt(0)->GetType();
1699
1700  //  0 if: left == right
1701  //  1 if: left  > right
1702  // -1 if: left  < right
1703  switch (in_type) {
1704    case Primitive::kPrimLong: {
1705      Register result = OutputRegister(compare);
1706      Register left = InputRegisterAt(compare, 0);
1707      Operand right = InputOperandAt(compare, 1);
1708
1709      __ Cmp(left, right);
1710      __ Cset(result, ne);
1711      __ Cneg(result, result, lt);
1712      break;
1713    }
1714    case Primitive::kPrimFloat:
1715    case Primitive::kPrimDouble: {
1716      Register result = OutputRegister(compare);
1717      FPRegister left = InputFPRegisterAt(compare, 0);
1718      if (compare->GetLocations()->InAt(1).IsConstant()) {
1719        DCHECK(IsFloatingPointZeroConstant(compare->GetLocations()->InAt(1).GetConstant()));
1720        // 0.0 is the only immediate that can be encoded directly in an FCMP instruction.
1721        __ Fcmp(left, 0.0);
1722      } else {
1723        __ Fcmp(left, InputFPRegisterAt(compare, 1));
1724      }
1725      if (compare->IsGtBias()) {
1726        __ Cset(result, ne);
1727      } else {
1728        __ Csetm(result, ne);
1729      }
1730      __ Cneg(result, result, compare->IsGtBias() ? mi : gt);
1731      break;
1732    }
1733    default:
1734      LOG(FATAL) << "Unimplemented compare type " << in_type;
1735  }
1736}
1737
1738void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
1739  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1740
1741  if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
1742    locations->SetInAt(0, Location::RequiresFpuRegister());
1743    locations->SetInAt(1,
1744                       IsFloatingPointZeroConstant(instruction->InputAt(1))
1745                           ? Location::ConstantLocation(instruction->InputAt(1)->AsConstant())
1746                           : Location::RequiresFpuRegister());
1747  } else {
1748    // Integer cases.
1749    locations->SetInAt(0, Location::RequiresRegister());
1750    locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
1751  }
1752
1753  if (instruction->NeedsMaterialization()) {
1754    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1755  }
1756}
1757
1758void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
1759  if (!instruction->NeedsMaterialization()) {
1760    return;
1761  }
1762
1763  LocationSummary* locations = instruction->GetLocations();
1764  Register res = RegisterFrom(locations->Out(), instruction->GetType());
1765  IfCondition if_cond = instruction->GetCondition();
1766  Condition arm64_cond = ARM64Condition(if_cond);
1767
1768  if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
1769    FPRegister lhs = InputFPRegisterAt(instruction, 0);
1770    if (locations->InAt(1).IsConstant()) {
1771      DCHECK(IsFloatingPointZeroConstant(locations->InAt(1).GetConstant()));
1772      // 0.0 is the only immediate that can be encoded directly in an FCMP instruction.
1773      __ Fcmp(lhs, 0.0);
1774    } else {
1775      __ Fcmp(lhs, InputFPRegisterAt(instruction, 1));
1776    }
1777    __ Cset(res, arm64_cond);
1778    if (instruction->IsFPConditionTrueIfNaN()) {
1779      // res = IsUnordered(arm64_cond) ? 1 : res  <=>  res = IsNotUnordered(arm64_cond) ? res : 1
1780      __ Csel(res, res, Operand(1), vc);  // VC for "not unordered".
1781    } else if (instruction->IsFPConditionFalseIfNaN()) {
1782      // res = IsUnordered(arm64_cond) ? 0 : res  <=>  res = IsNotUnordered(arm64_cond) ? res : 0
1783      __ Csel(res, res, Operand(0), vc);  // VC for "not unordered".
1784    }
1785  } else {
1786    // Integer cases.
1787    Register lhs = InputRegisterAt(instruction, 0);
1788    Operand rhs = InputOperandAt(instruction, 1);
1789    __ Cmp(lhs, rhs);
1790    __ Cset(res, arm64_cond);
1791  }
1792}
1793
1794#define FOR_EACH_CONDITION_INSTRUCTION(M)                                                \
1795  M(Equal)                                                                               \
1796  M(NotEqual)                                                                            \
1797  M(LessThan)                                                                            \
1798  M(LessThanOrEqual)                                                                     \
1799  M(GreaterThan)                                                                         \
1800  M(GreaterThanOrEqual)
1801#define DEFINE_CONDITION_VISITORS(Name)                                                  \
1802void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }         \
1803void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
1804FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
1805#undef DEFINE_CONDITION_VISITORS
1806#undef FOR_EACH_CONDITION_INSTRUCTION
1807
1808void InstructionCodeGeneratorARM64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
1809  DCHECK(instruction->IsDiv() || instruction->IsRem());
1810
1811  LocationSummary* locations = instruction->GetLocations();
1812  Location second = locations->InAt(1);
1813  DCHECK(second.IsConstant());
1814
1815  Register out = OutputRegister(instruction);
1816  Register dividend = InputRegisterAt(instruction, 0);
1817  int64_t imm = Int64FromConstant(second.GetConstant());
1818  DCHECK(imm == 1 || imm == -1);
1819
1820  if (instruction->IsRem()) {
1821    __ Mov(out, 0);
1822  } else {
1823    if (imm == 1) {
1824      __ Mov(out, dividend);
1825    } else {
1826      __ Neg(out, dividend);
1827    }
1828  }
1829}
1830
1831void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
1832  DCHECK(instruction->IsDiv() || instruction->IsRem());
1833
1834  LocationSummary* locations = instruction->GetLocations();
1835  Location second = locations->InAt(1);
1836  DCHECK(second.IsConstant());
1837
1838  Register out = OutputRegister(instruction);
1839  Register dividend = InputRegisterAt(instruction, 0);
1840  int64_t imm = Int64FromConstant(second.GetConstant());
1841  uint64_t abs_imm = static_cast<uint64_t>(std::abs(imm));
1842  DCHECK(IsPowerOfTwo(abs_imm));
1843  int ctz_imm = CTZ(abs_imm);
1844
1845  UseScratchRegisterScope temps(GetVIXLAssembler());
1846  Register temp = temps.AcquireSameSizeAs(out);
1847
1848  if (instruction->IsDiv()) {
1849    __ Add(temp, dividend, abs_imm - 1);
1850    __ Cmp(dividend, 0);
1851    __ Csel(out, temp, dividend, lt);
1852    if (imm > 0) {
1853      __ Asr(out, out, ctz_imm);
1854    } else {
1855      __ Neg(out, Operand(out, ASR, ctz_imm));
1856    }
1857  } else {
1858    int bits = instruction->GetResultType() == Primitive::kPrimInt ? 32 : 64;
1859    __ Asr(temp, dividend, bits - 1);
1860    __ Lsr(temp, temp, bits - ctz_imm);
1861    __ Add(out, dividend, temp);
1862    __ And(out, out, abs_imm - 1);
1863    __ Sub(out, out, temp);
1864  }
1865}
1866
1867void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
1868  DCHECK(instruction->IsDiv() || instruction->IsRem());
1869
1870  LocationSummary* locations = instruction->GetLocations();
1871  Location second = locations->InAt(1);
1872  DCHECK(second.IsConstant());
1873
1874  Register out = OutputRegister(instruction);
1875  Register dividend = InputRegisterAt(instruction, 0);
1876  int64_t imm = Int64FromConstant(second.GetConstant());
1877
1878  Primitive::Type type = instruction->GetResultType();
1879  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
1880
1881  int64_t magic;
1882  int shift;
1883  CalculateMagicAndShiftForDivRem(imm, type == Primitive::kPrimLong /* is_long */, &magic, &shift);
1884
1885  UseScratchRegisterScope temps(GetVIXLAssembler());
1886  Register temp = temps.AcquireSameSizeAs(out);
1887
1888  // temp = get_high(dividend * magic)
1889  __ Mov(temp, magic);
1890  if (type == Primitive::kPrimLong) {
1891    __ Smulh(temp, dividend, temp);
1892  } else {
1893    __ Smull(temp.X(), dividend, temp);
1894    __ Lsr(temp.X(), temp.X(), 32);
1895  }
1896
1897  if (imm > 0 && magic < 0) {
1898    __ Add(temp, temp, dividend);
1899  } else if (imm < 0 && magic > 0) {
1900    __ Sub(temp, temp, dividend);
1901  }
1902
1903  if (shift != 0) {
1904    __ Asr(temp, temp, shift);
1905  }
1906
1907  if (instruction->IsDiv()) {
1908    __ Sub(out, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
1909  } else {
1910    __ Sub(temp, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
1911    // TODO: Strength reduction for msub.
1912    Register temp_imm = temps.AcquireSameSizeAs(out);
1913    __ Mov(temp_imm, imm);
1914    __ Msub(out, temp, temp_imm, dividend);
1915  }
1916}
1917
1918void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
1919  DCHECK(instruction->IsDiv() || instruction->IsRem());
1920  Primitive::Type type = instruction->GetResultType();
1921  DCHECK(type == Primitive::kPrimInt || Primitive::kPrimLong);
1922
1923  LocationSummary* locations = instruction->GetLocations();
1924  Register out = OutputRegister(instruction);
1925  Location second = locations->InAt(1);
1926
1927  if (second.IsConstant()) {
1928    int64_t imm = Int64FromConstant(second.GetConstant());
1929
1930    if (imm == 0) {
1931      // Do not generate anything. DivZeroCheck would prevent any code to be executed.
1932    } else if (imm == 1 || imm == -1) {
1933      DivRemOneOrMinusOne(instruction);
1934    } else if (IsPowerOfTwo(std::abs(imm))) {
1935      DivRemByPowerOfTwo(instruction);
1936    } else {
1937      DCHECK(imm <= -2 || imm >= 2);
1938      GenerateDivRemWithAnyConstant(instruction);
1939    }
1940  } else {
1941    Register dividend = InputRegisterAt(instruction, 0);
1942    Register divisor = InputRegisterAt(instruction, 1);
1943    if (instruction->IsDiv()) {
1944      __ Sdiv(out, dividend, divisor);
1945    } else {
1946      UseScratchRegisterScope temps(GetVIXLAssembler());
1947      Register temp = temps.AcquireSameSizeAs(out);
1948      __ Sdiv(temp, dividend, divisor);
1949      __ Msub(out, temp, divisor, dividend);
1950    }
1951  }
1952}
1953
1954void LocationsBuilderARM64::VisitDiv(HDiv* div) {
1955  LocationSummary* locations =
1956      new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1957  switch (div->GetResultType()) {
1958    case Primitive::kPrimInt:
1959    case Primitive::kPrimLong:
1960      locations->SetInAt(0, Location::RequiresRegister());
1961      locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
1962      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1963      break;
1964
1965    case Primitive::kPrimFloat:
1966    case Primitive::kPrimDouble:
1967      locations->SetInAt(0, Location::RequiresFpuRegister());
1968      locations->SetInAt(1, Location::RequiresFpuRegister());
1969      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1970      break;
1971
1972    default:
1973      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1974  }
1975}
1976
1977void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
1978  Primitive::Type type = div->GetResultType();
1979  switch (type) {
1980    case Primitive::kPrimInt:
1981    case Primitive::kPrimLong:
1982      GenerateDivRemIntegral(div);
1983      break;
1984
1985    case Primitive::kPrimFloat:
1986    case Primitive::kPrimDouble:
1987      __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
1988      break;
1989
1990    default:
1991      LOG(FATAL) << "Unexpected div type " << type;
1992  }
1993}
1994
1995void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1996  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1997      ? LocationSummary::kCallOnSlowPath
1998      : LocationSummary::kNoCall;
1999  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2000  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2001  if (instruction->HasUses()) {
2002    locations->SetOut(Location::SameAsFirstInput());
2003  }
2004}
2005
2006void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2007  SlowPathCodeARM64* slow_path =
2008      new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
2009  codegen_->AddSlowPath(slow_path);
2010  Location value = instruction->GetLocations()->InAt(0);
2011
2012  Primitive::Type type = instruction->GetType();
2013
2014  if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
2015      LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
2016    return;
2017  }
2018
2019  if (value.IsConstant()) {
2020    int64_t divisor = Int64ConstantFrom(value);
2021    if (divisor == 0) {
2022      __ B(slow_path->GetEntryLabel());
2023    } else {
2024      // A division by a non-null constant is valid. We don't need to perform
2025      // any check, so simply fall through.
2026    }
2027  } else {
2028    __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
2029  }
2030}
2031
2032void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
2033  LocationSummary* locations =
2034      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2035  locations->SetOut(Location::ConstantLocation(constant));
2036}
2037
2038void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
2039  UNUSED(constant);
2040  // Will be generated at use site.
2041}
2042
2043void LocationsBuilderARM64::VisitExit(HExit* exit) {
2044  exit->SetLocations(nullptr);
2045}
2046
2047void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
2048  UNUSED(exit);
2049}
2050
2051void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
2052  LocationSummary* locations =
2053      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2054  locations->SetOut(Location::ConstantLocation(constant));
2055}
2056
2057void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
2058  UNUSED(constant);
2059  // Will be generated at use site.
2060}
2061
2062void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
2063  DCHECK(!successor->IsExitBlock());
2064  HBasicBlock* block = got->GetBlock();
2065  HInstruction* previous = got->GetPrevious();
2066  HLoopInformation* info = block->GetLoopInformation();
2067
2068  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
2069    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
2070    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
2071    return;
2072  }
2073  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
2074    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
2075  }
2076  if (!codegen_->GoesToNextBlock(block, successor)) {
2077    __ B(codegen_->GetLabelOf(successor));
2078  }
2079}
2080
2081void LocationsBuilderARM64::VisitGoto(HGoto* got) {
2082  got->SetLocations(nullptr);
2083}
2084
2085void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
2086  HandleGoto(got, got->GetSuccessor());
2087}
2088
2089void LocationsBuilderARM64::VisitTryBoundary(HTryBoundary* try_boundary) {
2090  try_boundary->SetLocations(nullptr);
2091}
2092
2093void InstructionCodeGeneratorARM64::VisitTryBoundary(HTryBoundary* try_boundary) {
2094  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
2095  if (!successor->IsExitBlock()) {
2096    HandleGoto(try_boundary, successor);
2097  }
2098}
2099
2100void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruction,
2101                                                          vixl::Label* true_target,
2102                                                          vixl::Label* false_target,
2103                                                          vixl::Label* always_true_target) {
2104  HInstruction* cond = instruction->InputAt(0);
2105  HCondition* condition = cond->AsCondition();
2106
2107  if (cond->IsIntConstant()) {
2108    int32_t cond_value = cond->AsIntConstant()->GetValue();
2109    if (cond_value == 1) {
2110      if (always_true_target != nullptr) {
2111        __ B(always_true_target);
2112      }
2113      return;
2114    } else {
2115      DCHECK_EQ(cond_value, 0);
2116    }
2117  } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2118    // The condition instruction has been materialized, compare the output to 0.
2119    Location cond_val = instruction->GetLocations()->InAt(0);
2120    DCHECK(cond_val.IsRegister());
2121    __ Cbnz(InputRegisterAt(instruction, 0), true_target);
2122  } else {
2123    // The condition instruction has not been materialized, use its inputs as
2124    // the comparison and its condition as the branch condition.
2125    Primitive::Type type =
2126        cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
2127
2128    if (Primitive::IsFloatingPointType(type)) {
2129      // FP compares don't like null false_targets.
2130      if (false_target == nullptr) {
2131        false_target = codegen_->GetLabelOf(instruction->AsIf()->IfFalseSuccessor());
2132      }
2133      FPRegister lhs = InputFPRegisterAt(condition, 0);
2134      if (condition->GetLocations()->InAt(1).IsConstant()) {
2135        DCHECK(IsFloatingPointZeroConstant(condition->GetLocations()->InAt(1).GetConstant()));
2136        // 0.0 is the only immediate that can be encoded directly in an FCMP instruction.
2137        __ Fcmp(lhs, 0.0);
2138      } else {
2139        __ Fcmp(lhs, InputFPRegisterAt(condition, 1));
2140      }
2141      if (condition->IsFPConditionTrueIfNaN()) {
2142        __ B(vs, true_target);  // VS for unordered.
2143      } else if (condition->IsFPConditionFalseIfNaN()) {
2144        __ B(vs, false_target);  // VS for unordered.
2145      }
2146      __ B(ARM64Condition(condition->GetCondition()), true_target);
2147    } else {
2148      // Integer cases.
2149      Register lhs = InputRegisterAt(condition, 0);
2150      Operand rhs = InputOperandAt(condition, 1);
2151      Condition arm64_cond = ARM64Condition(condition->GetCondition());
2152      if ((arm64_cond != gt && arm64_cond != le) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
2153        switch (arm64_cond) {
2154          case eq:
2155            __ Cbz(lhs, true_target);
2156            break;
2157          case ne:
2158            __ Cbnz(lhs, true_target);
2159            break;
2160          case lt:
2161            // Test the sign bit and branch accordingly.
2162            __ Tbnz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target);
2163            break;
2164          case ge:
2165            // Test the sign bit and branch accordingly.
2166            __ Tbz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target);
2167            break;
2168          default:
2169            // Without the `static_cast` the compiler throws an error for
2170            // `-Werror=sign-promo`.
2171            LOG(FATAL) << "Unexpected condition: " << static_cast<int>(arm64_cond);
2172        }
2173      } else {
2174        __ Cmp(lhs, rhs);
2175        __ B(arm64_cond, true_target);
2176      }
2177    }
2178  }
2179  if (false_target != nullptr) {
2180    __ B(false_target);
2181  }
2182}
2183
2184void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
2185  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2186  HInstruction* cond = if_instr->InputAt(0);
2187  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2188    locations->SetInAt(0, Location::RequiresRegister());
2189  }
2190}
2191
2192void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
2193  vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2194  vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2195  vixl::Label* always_true_target = true_target;
2196  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2197                                if_instr->IfTrueSuccessor())) {
2198    always_true_target = nullptr;
2199  }
2200  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2201                                if_instr->IfFalseSuccessor())) {
2202    false_target = nullptr;
2203  }
2204  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2205}
2206
2207void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2208  LocationSummary* locations = new (GetGraph()->GetArena())
2209      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2210  HInstruction* cond = deoptimize->InputAt(0);
2211  DCHECK(cond->IsCondition());
2212  if (cond->AsCondition()->NeedsMaterialization()) {
2213    locations->SetInAt(0, Location::RequiresRegister());
2214  }
2215}
2216
2217void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2218  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
2219      DeoptimizationSlowPathARM64(deoptimize);
2220  codegen_->AddSlowPath(slow_path);
2221  vixl::Label* slow_path_entry = slow_path->GetEntryLabel();
2222  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2223}
2224
2225void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2226  HandleFieldGet(instruction);
2227}
2228
2229void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2230  HandleFieldGet(instruction, instruction->GetFieldInfo());
2231}
2232
2233void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2234  HandleFieldSet(instruction);
2235}
2236
2237void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2238  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
2239}
2240
2241void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
2242  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2243  switch (instruction->GetTypeCheckKind()) {
2244    case TypeCheckKind::kExactCheck:
2245    case TypeCheckKind::kAbstractClassCheck:
2246    case TypeCheckKind::kClassHierarchyCheck:
2247    case TypeCheckKind::kArrayObjectCheck:
2248      call_kind = LocationSummary::kNoCall;
2249      break;
2250    case TypeCheckKind::kInterfaceCheck:
2251      call_kind = LocationSummary::kCall;
2252      break;
2253    case TypeCheckKind::kArrayCheck:
2254      call_kind = LocationSummary::kCallOnSlowPath;
2255      break;
2256  }
2257  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2258  if (call_kind != LocationSummary::kCall) {
2259    locations->SetInAt(0, Location::RequiresRegister());
2260    locations->SetInAt(1, Location::RequiresRegister());
2261    // The out register is used as a temporary, so it overlaps with the inputs.
2262    // Note that TypeCheckSlowPathARM64 uses this register too.
2263    locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2264  } else {
2265    InvokeRuntimeCallingConvention calling_convention;
2266    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(0)));
2267    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
2268    locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
2269  }
2270}
2271
2272void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
2273  LocationSummary* locations = instruction->GetLocations();
2274  Register obj = InputRegisterAt(instruction, 0);
2275  Register cls = InputRegisterAt(instruction, 1);
2276  Register out = OutputRegister(instruction);
2277  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2278  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
2279  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
2280  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
2281
2282  vixl::Label done, zero;
2283  SlowPathCodeARM64* slow_path = nullptr;
2284
2285  // Return 0 if `obj` is null.
2286  // Avoid null check if we know `obj` is not null.
2287  if (instruction->MustDoNullCheck()) {
2288    __ Cbz(obj, &zero);
2289  }
2290
2291  // In case of an interface check, we put the object class into the object register.
2292  // This is safe, as the register is caller-save, and the object must be in another
2293  // register if it survives the runtime call.
2294  Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
2295      ? obj
2296      : out;
2297  __ Ldr(target, HeapOperand(obj.W(), class_offset));
2298  GetAssembler()->MaybeUnpoisonHeapReference(target);
2299
2300  switch (instruction->GetTypeCheckKind()) {
2301    case TypeCheckKind::kExactCheck: {
2302      __ Cmp(out, cls);
2303      __ Cset(out, eq);
2304      if (zero.IsLinked()) {
2305        __ B(&done);
2306      }
2307      break;
2308    }
2309    case TypeCheckKind::kAbstractClassCheck: {
2310      // If the class is abstract, we eagerly fetch the super class of the
2311      // object to avoid doing a comparison we know will fail.
2312      vixl::Label loop, success;
2313      __ Bind(&loop);
2314      __ Ldr(out, HeapOperand(out, super_offset));
2315      GetAssembler()->MaybeUnpoisonHeapReference(out);
2316      // If `out` is null, we use it for the result, and jump to `done`.
2317      __ Cbz(out, &done);
2318      __ Cmp(out, cls);
2319      __ B(ne, &loop);
2320      __ Mov(out, 1);
2321      if (zero.IsLinked()) {
2322        __ B(&done);
2323      }
2324      break;
2325    }
2326    case TypeCheckKind::kClassHierarchyCheck: {
2327      // Walk over the class hierarchy to find a match.
2328      vixl::Label loop, success;
2329      __ Bind(&loop);
2330      __ Cmp(out, cls);
2331      __ B(eq, &success);
2332      __ Ldr(out, HeapOperand(out, super_offset));
2333      GetAssembler()->MaybeUnpoisonHeapReference(out);
2334      __ Cbnz(out, &loop);
2335      // If `out` is null, we use it for the result, and jump to `done`.
2336      __ B(&done);
2337      __ Bind(&success);
2338      __ Mov(out, 1);
2339      if (zero.IsLinked()) {
2340        __ B(&done);
2341      }
2342      break;
2343    }
2344    case TypeCheckKind::kArrayObjectCheck: {
2345      // Just need to check that the object's class is a non primitive array.
2346      __ Ldr(out, HeapOperand(out, component_offset));
2347      GetAssembler()->MaybeUnpoisonHeapReference(out);
2348      // If `out` is null, we use it for the result, and jump to `done`.
2349      __ Cbz(out, &done);
2350      __ Ldrh(out, HeapOperand(out, primitive_offset));
2351      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
2352      __ Cbnz(out, &zero);
2353      __ Mov(out, 1);
2354      __ B(&done);
2355      break;
2356    }
2357    case TypeCheckKind::kArrayCheck: {
2358      __ Cmp(out, cls);
2359      DCHECK(locations->OnlyCallsOnSlowPath());
2360      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
2361          instruction, /* is_fatal */ false);
2362      codegen_->AddSlowPath(slow_path);
2363      __ B(ne, slow_path->GetEntryLabel());
2364      __ Mov(out, 1);
2365      if (zero.IsLinked()) {
2366        __ B(&done);
2367      }
2368      break;
2369    }
2370
2371    case TypeCheckKind::kInterfaceCheck:
2372    default: {
2373      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
2374                              instruction,
2375                              instruction->GetDexPc(),
2376                              nullptr);
2377      if (zero.IsLinked()) {
2378        __ B(&done);
2379      }
2380      break;
2381    }
2382  }
2383
2384  if (zero.IsLinked()) {
2385    __ Bind(&zero);
2386    __ Mov(out, 0);
2387  }
2388
2389  if (done.IsLinked()) {
2390    __ Bind(&done);
2391  }
2392
2393  if (slow_path != nullptr) {
2394    __ Bind(slow_path->GetExitLabel());
2395  }
2396}
2397
2398void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
2399  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2400  bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
2401
2402  switch (instruction->GetTypeCheckKind()) {
2403    case TypeCheckKind::kExactCheck:
2404    case TypeCheckKind::kAbstractClassCheck:
2405    case TypeCheckKind::kClassHierarchyCheck:
2406    case TypeCheckKind::kArrayObjectCheck:
2407      call_kind = throws_into_catch
2408          ? LocationSummary::kCallOnSlowPath
2409          : LocationSummary::kNoCall;
2410      break;
2411    case TypeCheckKind::kInterfaceCheck:
2412      call_kind = LocationSummary::kCall;
2413      break;
2414    case TypeCheckKind::kArrayCheck:
2415      call_kind = LocationSummary::kCallOnSlowPath;
2416      break;
2417  }
2418
2419  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
2420      instruction, call_kind);
2421  if (call_kind != LocationSummary::kCall) {
2422    locations->SetInAt(0, Location::RequiresRegister());
2423    locations->SetInAt(1, Location::RequiresRegister());
2424    // Note that TypeCheckSlowPathARM64 uses this register too.
2425    locations->AddTemp(Location::RequiresRegister());
2426  } else {
2427    InvokeRuntimeCallingConvention calling_convention;
2428    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(0)));
2429    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
2430  }
2431}
2432
2433void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
2434  LocationSummary* locations = instruction->GetLocations();
2435  Register obj = InputRegisterAt(instruction, 0);
2436  Register cls = InputRegisterAt(instruction, 1);
2437  Register temp;
2438  if (!locations->WillCall()) {
2439    temp = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
2440  }
2441
2442  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2443  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
2444  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
2445  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
2446  SlowPathCodeARM64* slow_path = nullptr;
2447
2448  if (!locations->WillCall()) {
2449    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
2450        instruction, !locations->CanCall());
2451    codegen_->AddSlowPath(slow_path);
2452  }
2453
2454  vixl::Label done;
2455  // Avoid null check if we know obj is not null.
2456  if (instruction->MustDoNullCheck()) {
2457    __ Cbz(obj, &done);
2458  }
2459
2460  if (locations->WillCall()) {
2461    __ Ldr(obj, HeapOperand(obj, class_offset));
2462    GetAssembler()->MaybeUnpoisonHeapReference(obj);
2463  } else {
2464    __ Ldr(temp, HeapOperand(obj, class_offset));
2465    GetAssembler()->MaybeUnpoisonHeapReference(temp);
2466  }
2467
2468  switch (instruction->GetTypeCheckKind()) {
2469    case TypeCheckKind::kExactCheck:
2470    case TypeCheckKind::kArrayCheck: {
2471      __ Cmp(temp, cls);
2472      // Jump to slow path for throwing the exception or doing a
2473      // more involved array check.
2474      __ B(ne, slow_path->GetEntryLabel());
2475      break;
2476    }
2477    case TypeCheckKind::kAbstractClassCheck: {
2478      // If the class is abstract, we eagerly fetch the super class of the
2479      // object to avoid doing a comparison we know will fail.
2480      vixl::Label loop;
2481      __ Bind(&loop);
2482      __ Ldr(temp, HeapOperand(temp, super_offset));
2483      GetAssembler()->MaybeUnpoisonHeapReference(temp);
2484      // Jump to the slow path to throw the exception.
2485      __ Cbz(temp, slow_path->GetEntryLabel());
2486      __ Cmp(temp, cls);
2487      __ B(ne, &loop);
2488      break;
2489    }
2490    case TypeCheckKind::kClassHierarchyCheck: {
2491      // Walk over the class hierarchy to find a match.
2492      vixl::Label loop, success;
2493      __ Bind(&loop);
2494      __ Cmp(temp, cls);
2495      __ B(eq, &success);
2496      __ Ldr(temp, HeapOperand(temp, super_offset));
2497      GetAssembler()->MaybeUnpoisonHeapReference(temp);
2498      __ Cbnz(temp, &loop);
2499      // Jump to the slow path to throw the exception.
2500      __ B(slow_path->GetEntryLabel());
2501      __ Bind(&success);
2502      break;
2503    }
2504    case TypeCheckKind::kArrayObjectCheck: {
2505      // Just need to check that the object's class is a non primitive array.
2506      __ Ldr(temp, HeapOperand(temp, component_offset));
2507      GetAssembler()->MaybeUnpoisonHeapReference(temp);
2508      __ Cbz(temp, slow_path->GetEntryLabel());
2509      __ Ldrh(temp, HeapOperand(temp, primitive_offset));
2510      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
2511      __ Cbnz(temp, slow_path->GetEntryLabel());
2512      break;
2513    }
2514    case TypeCheckKind::kInterfaceCheck:
2515    default:
2516      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
2517                              instruction,
2518                              instruction->GetDexPc(),
2519                              nullptr);
2520      break;
2521  }
2522  __ Bind(&done);
2523
2524  if (slow_path != nullptr) {
2525    __ Bind(slow_path->GetExitLabel());
2526  }
2527}
2528
2529void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
2530  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2531  locations->SetOut(Location::ConstantLocation(constant));
2532}
2533
2534void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
2535  // Will be generated at use site.
2536  UNUSED(constant);
2537}
2538
2539void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
2540  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2541  locations->SetOut(Location::ConstantLocation(constant));
2542}
2543
2544void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) {
2545  // Will be generated at use site.
2546  UNUSED(constant);
2547}
2548
2549void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2550  // The trampoline uses the same calling convention as dex calling conventions,
2551  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
2552  // the method_idx.
2553  HandleInvoke(invoke);
2554}
2555
2556void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2557  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
2558}
2559
2560void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
2561  InvokeDexCallingConventionVisitorARM64 calling_convention_visitor;
2562  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2563}
2564
2565void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
2566  HandleInvoke(invoke);
2567}
2568
2569void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
2570  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2571  Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
2572  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2573      invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
2574  Location receiver = invoke->GetLocations()->InAt(0);
2575  Offset class_offset = mirror::Object::ClassOffset();
2576  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
2577
2578  // The register ip1 is required to be used for the hidden argument in
2579  // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
2580  MacroAssembler* masm = GetVIXLAssembler();
2581  UseScratchRegisterScope scratch_scope(masm);
2582  BlockPoolsScope block_pools(masm);
2583  scratch_scope.Exclude(ip1);
2584  __ Mov(ip1, invoke->GetDexMethodIndex());
2585
2586  // temp = object->GetClass();
2587  if (receiver.IsStackSlot()) {
2588    __ Ldr(temp.W(), StackOperandFrom(receiver));
2589    __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
2590  } else {
2591    __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
2592  }
2593  codegen_->MaybeRecordImplicitNullCheck(invoke);
2594  GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
2595  // temp = temp->GetImtEntryAt(method_offset);
2596  __ Ldr(temp, MemOperand(temp, method_offset));
2597  // lr = temp->GetEntryPoint();
2598  __ Ldr(lr, MemOperand(temp, entry_point.Int32Value()));
2599  // lr();
2600  __ Blr(lr);
2601  DCHECK(!codegen_->IsLeafMethod());
2602  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2603}
2604
2605void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2606  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
2607  if (intrinsic.TryDispatch(invoke)) {
2608    return;
2609  }
2610
2611  HandleInvoke(invoke);
2612}
2613
2614void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2615  // When we do not run baseline, explicit clinit checks triggered by static
2616  // invokes must have been pruned by art::PrepareForRegisterAllocation.
2617  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2618
2619  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
2620  if (intrinsic.TryDispatch(invoke)) {
2621    return;
2622  }
2623
2624  HandleInvoke(invoke);
2625}
2626
2627static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codegen) {
2628  if (invoke->GetLocations()->Intrinsified()) {
2629    IntrinsicCodeGeneratorARM64 intrinsic(codegen);
2630    intrinsic.Dispatch(invoke);
2631    return true;
2632  }
2633  return false;
2634}
2635
2636void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2637  // For better instruction scheduling we load the direct code pointer before the method pointer.
2638  bool direct_code_loaded = false;
2639  switch (invoke->GetCodePtrLocation()) {
2640    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2641      // LR = code address from literal pool with link-time patch.
2642      __ Ldr(lr, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
2643      direct_code_loaded = true;
2644      break;
2645    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2646      // LR = invoke->GetDirectCodePtr();
2647      __ Ldr(lr, DeduplicateUint64Literal(invoke->GetDirectCodePtr()));
2648      direct_code_loaded = true;
2649      break;
2650    default:
2651      break;
2652  }
2653
2654  // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
2655  Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
2656  switch (invoke->GetMethodLoadKind()) {
2657    case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
2658      // temp = thread->string_init_entrypoint
2659      __ Ldr(XRegisterFrom(temp).X(), MemOperand(tr, invoke->GetStringInitOffset()));
2660      break;
2661    case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
2662      callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2663      break;
2664    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
2665      // Load method address from literal pool.
2666      __ Ldr(XRegisterFrom(temp).X(), DeduplicateUint64Literal(invoke->GetMethodAddress()));
2667      break;
2668    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
2669      // Load method address from literal pool with a link-time patch.
2670      __ Ldr(XRegisterFrom(temp).X(),
2671             DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
2672      break;
2673    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
2674      // Add ADRP with its PC-relative DexCache access patch.
2675      pc_rel_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
2676                                             invoke->GetDexCacheArrayOffset());
2677      vixl::Label* pc_insn_label = &pc_rel_dex_cache_patches_.back().label;
2678      {
2679        vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
2680        __ adrp(XRegisterFrom(temp).X(), 0);
2681      }
2682      __ Bind(pc_insn_label);  // Bind after ADRP.
2683      pc_rel_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
2684      // Add LDR with its PC-relative DexCache access patch.
2685      pc_rel_dex_cache_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
2686                                             invoke->GetDexCacheArrayOffset());
2687      __ Ldr(XRegisterFrom(temp).X(), MemOperand(XRegisterFrom(temp).X(), 0));
2688      __ Bind(&pc_rel_dex_cache_patches_.back().label);  // Bind after LDR.
2689      pc_rel_dex_cache_patches_.back().pc_insn_label = pc_insn_label;
2690      break;
2691    }
2692    case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
2693      Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2694      Register reg = XRegisterFrom(temp);
2695      Register method_reg;
2696      if (current_method.IsRegister()) {
2697        method_reg = XRegisterFrom(current_method);
2698      } else {
2699        DCHECK(invoke->GetLocations()->Intrinsified());
2700        DCHECK(!current_method.IsValid());
2701        method_reg = reg;
2702        __ Ldr(reg.X(), MemOperand(sp, kCurrentMethodStackOffset));
2703      }
2704
2705      // temp = current_method->dex_cache_resolved_methods_;
2706      __ Ldr(reg.X(),
2707             MemOperand(method_reg.X(),
2708                        ArtMethod::DexCacheResolvedMethodsOffset(kArm64WordSize).Int32Value()));
2709      // temp = temp[index_in_cache];
2710      uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
2711    __ Ldr(reg.X(), MemOperand(reg.X(), GetCachePointerOffset(index_in_cache)));
2712      break;
2713    }
2714  }
2715
2716  switch (invoke->GetCodePtrLocation()) {
2717    case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
2718      __ Bl(&frame_entry_label_);
2719      break;
2720    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
2721      relative_call_patches_.emplace_back(invoke->GetTargetMethod());
2722      vixl::Label* label = &relative_call_patches_.back().label;
2723      __ Bl(label);  // Arbitrarily branch to the instruction after BL, override at link time.
2724      __ Bind(label);  // Bind after BL.
2725      break;
2726    }
2727    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2728    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2729      // LR prepared above for better instruction scheduling.
2730      DCHECK(direct_code_loaded);
2731      // lr()
2732      __ Blr(lr);
2733      break;
2734    case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
2735      // LR = callee_method->entry_point_from_quick_compiled_code_;
2736      __ Ldr(lr, MemOperand(
2737          XRegisterFrom(callee_method).X(),
2738          ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
2739      // lr()
2740      __ Blr(lr);
2741      break;
2742  }
2743
2744  DCHECK(!IsLeafMethod());
2745}
2746
2747void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) {
2748  LocationSummary* locations = invoke->GetLocations();
2749  Location receiver = locations->InAt(0);
2750  Register temp = XRegisterFrom(temp_in);
2751  size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2752      invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
2753  Offset class_offset = mirror::Object::ClassOffset();
2754  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
2755
2756  BlockPoolsScope block_pools(GetVIXLAssembler());
2757
2758  DCHECK(receiver.IsRegister());
2759  __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
2760  MaybeRecordImplicitNullCheck(invoke);
2761  GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
2762  // temp = temp->GetMethodAt(method_offset);
2763  __ Ldr(temp, MemOperand(temp, method_offset));
2764  // lr = temp->GetEntryPoint();
2765  __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
2766  // lr();
2767  __ Blr(lr);
2768}
2769
2770void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
2771  DCHECK(linker_patches->empty());
2772  size_t size =
2773      method_patches_.size() +
2774      call_patches_.size() +
2775      relative_call_patches_.size() +
2776      pc_rel_dex_cache_patches_.size();
2777  linker_patches->reserve(size);
2778  for (const auto& entry : method_patches_) {
2779    const MethodReference& target_method = entry.first;
2780    vixl::Literal<uint64_t>* literal = entry.second;
2781    linker_patches->push_back(LinkerPatch::MethodPatch(literal->offset(),
2782                                                       target_method.dex_file,
2783                                                       target_method.dex_method_index));
2784  }
2785  for (const auto& entry : call_patches_) {
2786    const MethodReference& target_method = entry.first;
2787    vixl::Literal<uint64_t>* literal = entry.second;
2788    linker_patches->push_back(LinkerPatch::CodePatch(literal->offset(),
2789                                                     target_method.dex_file,
2790                                                     target_method.dex_method_index));
2791  }
2792  for (const MethodPatchInfo<vixl::Label>& info : relative_call_patches_) {
2793    linker_patches->push_back(LinkerPatch::RelativeCodePatch(info.label.location() - 4u,
2794                                                             info.target_method.dex_file,
2795                                                             info.target_method.dex_method_index));
2796  }
2797  for (const PcRelativeDexCacheAccessInfo& info : pc_rel_dex_cache_patches_) {
2798    linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.location() - 4u,
2799                                                              &info.target_dex_file,
2800                                                              info.pc_insn_label->location() - 4u,
2801                                                              info.element_offset));
2802  }
2803}
2804
2805vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateUint64Literal(uint64_t value) {
2806  // Look up the literal for value.
2807  auto lb = uint64_literals_.lower_bound(value);
2808  if (lb != uint64_literals_.end() && !uint64_literals_.key_comp()(value, lb->first)) {
2809    return lb->second;
2810  }
2811  // We don't have a literal for this value, insert a new one.
2812  vixl::Literal<uint64_t>* literal = __ CreateLiteralDestroyedWithPool<uint64_t>(value);
2813  uint64_literals_.PutBefore(lb, value, literal);
2814  return literal;
2815}
2816
2817vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodLiteral(
2818    MethodReference target_method,
2819    MethodToLiteralMap* map) {
2820  // Look up the literal for target_method.
2821  auto lb = map->lower_bound(target_method);
2822  if (lb != map->end() && !map->key_comp()(target_method, lb->first)) {
2823    return lb->second;
2824  }
2825  // We don't have a literal for this method yet, insert a new one.
2826  vixl::Literal<uint64_t>* literal = __ CreateLiteralDestroyedWithPool<uint64_t>(0u);
2827  map->PutBefore(lb, target_method, literal);
2828  return literal;
2829}
2830
2831vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodAddressLiteral(
2832    MethodReference target_method) {
2833  return DeduplicateMethodLiteral(target_method, &method_patches_);
2834}
2835
2836vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodCodeLiteral(
2837    MethodReference target_method) {
2838  return DeduplicateMethodLiteral(target_method, &call_patches_);
2839}
2840
2841
2842void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2843  // When we do not run baseline, explicit clinit checks triggered by static
2844  // invokes must have been pruned by art::PrepareForRegisterAllocation.
2845  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2846
2847  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2848    return;
2849  }
2850
2851  BlockPoolsScope block_pools(GetVIXLAssembler());
2852  LocationSummary* locations = invoke->GetLocations();
2853  codegen_->GenerateStaticOrDirectCall(
2854      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
2855  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2856}
2857
2858void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2859  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2860    return;
2861  }
2862
2863  codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
2864  DCHECK(!codegen_->IsLeafMethod());
2865  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2866}
2867
2868void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
2869  LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2870                                                              : LocationSummary::kNoCall;
2871  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2872  locations->SetInAt(0, Location::RequiresRegister());
2873  locations->SetOut(Location::RequiresRegister());
2874}
2875
2876void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
2877  Register out = OutputRegister(cls);
2878  Register current_method = InputRegisterAt(cls, 0);
2879  if (cls->IsReferrersClass()) {
2880    DCHECK(!cls->CanCallRuntime());
2881    DCHECK(!cls->MustGenerateClinitCheck());
2882    __ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
2883  } else {
2884    DCHECK(cls->CanCallRuntime());
2885    MemberOffset resolved_types_offset = ArtMethod::DexCacheResolvedTypesOffset(kArm64PointerSize);
2886    __ Ldr(out.X(), MemOperand(current_method, resolved_types_offset.Int32Value()));
2887    __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
2888    // TODO: We will need a read barrier here.
2889
2890    SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
2891        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
2892    codegen_->AddSlowPath(slow_path);
2893    __ Cbz(out, slow_path->GetEntryLabel());
2894    if (cls->MustGenerateClinitCheck()) {
2895      GenerateClassInitializationCheck(slow_path, out);
2896    } else {
2897      __ Bind(slow_path->GetExitLabel());
2898    }
2899  }
2900}
2901
2902static MemOperand GetExceptionTlsAddress() {
2903  return MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
2904}
2905
2906void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
2907  LocationSummary* locations =
2908      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2909  locations->SetOut(Location::RequiresRegister());
2910}
2911
2912void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
2913  __ Ldr(OutputRegister(instruction), GetExceptionTlsAddress());
2914}
2915
2916void LocationsBuilderARM64::VisitClearException(HClearException* clear) {
2917  new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
2918}
2919
2920void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
2921  __ Str(wzr, GetExceptionTlsAddress());
2922}
2923
2924void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
2925  load->SetLocations(nullptr);
2926}
2927
2928void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
2929  // Nothing to do, this is driven by the code generator.
2930  UNUSED(load);
2931}
2932
2933void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
2934  LocationSummary* locations =
2935      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2936  locations->SetInAt(0, Location::RequiresRegister());
2937  locations->SetOut(Location::RequiresRegister());
2938}
2939
2940void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
2941  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
2942  codegen_->AddSlowPath(slow_path);
2943
2944  Register out = OutputRegister(load);
2945  Register current_method = InputRegisterAt(load, 0);
2946  __ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
2947  __ Ldr(out.X(), HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
2948  __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(load->GetStringIndex())));
2949  // TODO: We will need a read barrier here.
2950  __ Cbz(out, slow_path->GetEntryLabel());
2951  __ Bind(slow_path->GetExitLabel());
2952}
2953
2954void LocationsBuilderARM64::VisitLocal(HLocal* local) {
2955  local->SetLocations(nullptr);
2956}
2957
2958void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
2959  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2960}
2961
2962void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
2963  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2964  locations->SetOut(Location::ConstantLocation(constant));
2965}
2966
2967void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
2968  // Will be generated at use site.
2969  UNUSED(constant);
2970}
2971
2972void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2973  LocationSummary* locations =
2974      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2975  InvokeRuntimeCallingConvention calling_convention;
2976  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2977}
2978
2979void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2980  codegen_->InvokeRuntime(instruction->IsEnter()
2981        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
2982      instruction,
2983      instruction->GetDexPc(),
2984      nullptr);
2985  CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2986}
2987
2988void LocationsBuilderARM64::VisitMul(HMul* mul) {
2989  LocationSummary* locations =
2990      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2991  switch (mul->GetResultType()) {
2992    case Primitive::kPrimInt:
2993    case Primitive::kPrimLong:
2994      locations->SetInAt(0, Location::RequiresRegister());
2995      locations->SetInAt(1, Location::RequiresRegister());
2996      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2997      break;
2998
2999    case Primitive::kPrimFloat:
3000    case Primitive::kPrimDouble:
3001      locations->SetInAt(0, Location::RequiresFpuRegister());
3002      locations->SetInAt(1, Location::RequiresFpuRegister());
3003      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3004      break;
3005
3006    default:
3007      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
3008  }
3009}
3010
3011void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
3012  switch (mul->GetResultType()) {
3013    case Primitive::kPrimInt:
3014    case Primitive::kPrimLong:
3015      __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
3016      break;
3017
3018    case Primitive::kPrimFloat:
3019    case Primitive::kPrimDouble:
3020      __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
3021      break;
3022
3023    default:
3024      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
3025  }
3026}
3027
3028void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
3029  LocationSummary* locations =
3030      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
3031  switch (neg->GetResultType()) {
3032    case Primitive::kPrimInt:
3033    case Primitive::kPrimLong:
3034      locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg));
3035      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3036      break;
3037
3038    case Primitive::kPrimFloat:
3039    case Primitive::kPrimDouble:
3040      locations->SetInAt(0, Location::RequiresFpuRegister());
3041      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3042      break;
3043
3044    default:
3045      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
3046  }
3047}
3048
3049void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
3050  switch (neg->GetResultType()) {
3051    case Primitive::kPrimInt:
3052    case Primitive::kPrimLong:
3053      __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
3054      break;
3055
3056    case Primitive::kPrimFloat:
3057    case Primitive::kPrimDouble:
3058      __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0));
3059      break;
3060
3061    default:
3062      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
3063  }
3064}
3065
3066void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
3067  LocationSummary* locations =
3068      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3069  InvokeRuntimeCallingConvention calling_convention;
3070  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
3071  locations->SetOut(LocationFrom(x0));
3072  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
3073  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
3074  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
3075                       void*, uint32_t, int32_t, ArtMethod*>();
3076}
3077
3078void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
3079  LocationSummary* locations = instruction->GetLocations();
3080  InvokeRuntimeCallingConvention calling_convention;
3081  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
3082  DCHECK(type_index.Is(w0));
3083  __ Mov(type_index, instruction->GetTypeIndex());
3084  // Note: if heap poisoning is enabled, the entry point takes cares
3085  // of poisoning the reference.
3086  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3087                          instruction,
3088                          instruction->GetDexPc(),
3089                          nullptr);
3090  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
3091}
3092
3093void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
3094  LocationSummary* locations =
3095      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3096  InvokeRuntimeCallingConvention calling_convention;
3097  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
3098  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
3099  locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
3100  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
3101}
3102
3103void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
3104  LocationSummary* locations = instruction->GetLocations();
3105  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
3106  DCHECK(type_index.Is(w0));
3107  __ Mov(type_index, instruction->GetTypeIndex());
3108  // Note: if heap poisoning is enabled, the entry point takes cares
3109  // of poisoning the reference.
3110  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3111                          instruction,
3112                          instruction->GetDexPc(),
3113                          nullptr);
3114  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
3115}
3116
3117void LocationsBuilderARM64::VisitNot(HNot* instruction) {
3118  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3119  locations->SetInAt(0, Location::RequiresRegister());
3120  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3121}
3122
3123void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
3124  switch (instruction->GetResultType()) {
3125    case Primitive::kPrimInt:
3126    case Primitive::kPrimLong:
3127      __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
3128      break;
3129
3130    default:
3131      LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
3132  }
3133}
3134
3135void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
3136  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3137  locations->SetInAt(0, Location::RequiresRegister());
3138  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3139}
3140
3141void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
3142  __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::Operand(1));
3143}
3144
3145void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
3146  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
3147      ? LocationSummary::kCallOnSlowPath
3148      : LocationSummary::kNoCall;
3149  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3150  locations->SetInAt(0, Location::RequiresRegister());
3151  if (instruction->HasUses()) {
3152    locations->SetOut(Location::SameAsFirstInput());
3153  }
3154}
3155
3156void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
3157  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3158    return;
3159  }
3160
3161  BlockPoolsScope block_pools(GetVIXLAssembler());
3162  Location obj = instruction->GetLocations()->InAt(0);
3163  __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
3164  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3165}
3166
3167void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
3168  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
3169  codegen_->AddSlowPath(slow_path);
3170
3171  LocationSummary* locations = instruction->GetLocations();
3172  Location obj = locations->InAt(0);
3173
3174  __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
3175}
3176
3177void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
3178  if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
3179    GenerateImplicitNullCheck(instruction);
3180  } else {
3181    GenerateExplicitNullCheck(instruction);
3182  }
3183}
3184
3185void LocationsBuilderARM64::VisitOr(HOr* instruction) {
3186  HandleBinaryOp(instruction);
3187}
3188
3189void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
3190  HandleBinaryOp(instruction);
3191}
3192
3193void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
3194  LOG(FATAL) << "Unreachable";
3195}
3196
3197void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
3198  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3199}
3200
3201void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
3202  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3203  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
3204  if (location.IsStackSlot()) {
3205    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3206  } else if (location.IsDoubleStackSlot()) {
3207    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3208  }
3209  locations->SetOut(location);
3210}
3211
3212void InstructionCodeGeneratorARM64::VisitParameterValue(
3213    HParameterValue* instruction ATTRIBUTE_UNUSED) {
3214  // Nothing to do, the parameter is already at its location.
3215}
3216
3217void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
3218  LocationSummary* locations =
3219      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3220  locations->SetOut(LocationFrom(kArtMethodRegister));
3221}
3222
3223void InstructionCodeGeneratorARM64::VisitCurrentMethod(
3224    HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
3225  // Nothing to do, the method is already at its location.
3226}
3227
3228void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
3229  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3230  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
3231    locations->SetInAt(i, Location::Any());
3232  }
3233  locations->SetOut(Location::Any());
3234}
3235
3236void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
3237  UNUSED(instruction);
3238  LOG(FATAL) << "Unreachable";
3239}
3240
3241void LocationsBuilderARM64::VisitRem(HRem* rem) {
3242  Primitive::Type type = rem->GetResultType();
3243  LocationSummary::CallKind call_kind =
3244      Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
3245  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
3246
3247  switch (type) {
3248    case Primitive::kPrimInt:
3249    case Primitive::kPrimLong:
3250      locations->SetInAt(0, Location::RequiresRegister());
3251      locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
3252      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3253      break;
3254
3255    case Primitive::kPrimFloat:
3256    case Primitive::kPrimDouble: {
3257      InvokeRuntimeCallingConvention calling_convention;
3258      locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
3259      locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1)));
3260      locations->SetOut(calling_convention.GetReturnLocation(type));
3261
3262      break;
3263    }
3264
3265    default:
3266      LOG(FATAL) << "Unexpected rem type " << type;
3267  }
3268}
3269
3270void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
3271  Primitive::Type type = rem->GetResultType();
3272
3273  switch (type) {
3274    case Primitive::kPrimInt:
3275    case Primitive::kPrimLong: {
3276      GenerateDivRemIntegral(rem);
3277      break;
3278    }
3279
3280    case Primitive::kPrimFloat:
3281    case Primitive::kPrimDouble: {
3282      int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
3283                                                             : QUICK_ENTRY_POINT(pFmod);
3284      codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc(), nullptr);
3285      break;
3286    }
3287
3288    default:
3289      LOG(FATAL) << "Unexpected rem type " << type;
3290  }
3291}
3292
3293void LocationsBuilderARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3294  memory_barrier->SetLocations(nullptr);
3295}
3296
3297void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3298  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
3299}
3300
3301void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
3302  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3303  Primitive::Type return_type = instruction->InputAt(0)->GetType();
3304  locations->SetInAt(0, ARM64ReturnLocation(return_type));
3305}
3306
3307void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
3308  UNUSED(instruction);
3309  codegen_->GenerateFrameExit();
3310}
3311
3312void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
3313  instruction->SetLocations(nullptr);
3314}
3315
3316void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
3317  UNUSED(instruction);
3318  codegen_->GenerateFrameExit();
3319}
3320
3321void LocationsBuilderARM64::VisitShl(HShl* shl) {
3322  HandleShift(shl);
3323}
3324
3325void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) {
3326  HandleShift(shl);
3327}
3328
3329void LocationsBuilderARM64::VisitShr(HShr* shr) {
3330  HandleShift(shr);
3331}
3332
3333void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) {
3334  HandleShift(shr);
3335}
3336
3337void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
3338  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3339  Primitive::Type field_type = store->InputAt(1)->GetType();
3340  switch (field_type) {
3341    case Primitive::kPrimNot:
3342    case Primitive::kPrimBoolean:
3343    case Primitive::kPrimByte:
3344    case Primitive::kPrimChar:
3345    case Primitive::kPrimShort:
3346    case Primitive::kPrimInt:
3347    case Primitive::kPrimFloat:
3348      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3349      break;
3350
3351    case Primitive::kPrimLong:
3352    case Primitive::kPrimDouble:
3353      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3354      break;
3355
3356    default:
3357      LOG(FATAL) << "Unimplemented local type " << field_type;
3358  }
3359}
3360
3361void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
3362  UNUSED(store);
3363}
3364
3365void LocationsBuilderARM64::VisitSub(HSub* instruction) {
3366  HandleBinaryOp(instruction);
3367}
3368
3369void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
3370  HandleBinaryOp(instruction);
3371}
3372
3373void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3374  HandleFieldGet(instruction);
3375}
3376
3377void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3378  HandleFieldGet(instruction, instruction->GetFieldInfo());
3379}
3380
3381void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3382  HandleFieldSet(instruction);
3383}
3384
3385void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3386  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3387}
3388
3389void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
3390  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3391}
3392
3393void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
3394  HBasicBlock* block = instruction->GetBlock();
3395  if (block->GetLoopInformation() != nullptr) {
3396    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3397    // The back edge will generate the suspend check.
3398    return;
3399  }
3400  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3401    // The goto will generate the suspend check.
3402    return;
3403  }
3404  GenerateSuspendCheck(instruction, nullptr);
3405}
3406
3407void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
3408  temp->SetLocations(nullptr);
3409}
3410
3411void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
3412  // Nothing to do, this is driven by the code generator.
3413  UNUSED(temp);
3414}
3415
3416void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
3417  LocationSummary* locations =
3418      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3419  InvokeRuntimeCallingConvention calling_convention;
3420  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
3421}
3422
3423void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
3424  codegen_->InvokeRuntime(
3425      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
3426  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3427}
3428
3429void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
3430  LocationSummary* locations =
3431      new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
3432  Primitive::Type input_type = conversion->GetInputType();
3433  Primitive::Type result_type = conversion->GetResultType();
3434  DCHECK_NE(input_type, result_type);
3435  if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3436      (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3437    LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3438  }
3439
3440  if (Primitive::IsFloatingPointType(input_type)) {
3441    locations->SetInAt(0, Location::RequiresFpuRegister());
3442  } else {
3443    locations->SetInAt(0, Location::RequiresRegister());
3444  }
3445
3446  if (Primitive::IsFloatingPointType(result_type)) {
3447    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3448  } else {
3449    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3450  }
3451}
3452
3453void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) {
3454  Primitive::Type result_type = conversion->GetResultType();
3455  Primitive::Type input_type = conversion->GetInputType();
3456
3457  DCHECK_NE(input_type, result_type);
3458
3459  if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3460    int result_size = Primitive::ComponentSize(result_type);
3461    int input_size = Primitive::ComponentSize(input_type);
3462    int min_size = std::min(result_size, input_size);
3463    Register output = OutputRegister(conversion);
3464    Register source = InputRegisterAt(conversion, 0);
3465    if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) {
3466      __ Ubfx(output, source, 0, result_size * kBitsPerByte);
3467    } else if (result_type == Primitive::kPrimInt && input_type == Primitive::kPrimLong) {
3468      // 'int' values are used directly as W registers, discarding the top
3469      // bits, so we don't need to sign-extend and can just perform a move.
3470      // We do not pass the `kDiscardForSameWReg` argument to force clearing the
3471      // top 32 bits of the target register. We theoretically could leave those
3472      // bits unchanged, but we would have to make sure that no code uses a
3473      // 32bit input value as a 64bit value assuming that the top 32 bits are
3474      // zero.
3475      __ Mov(output.W(), source.W());
3476    } else if ((result_type == Primitive::kPrimChar) ||
3477               ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
3478      __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
3479    } else {
3480      __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
3481    }
3482  } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3483    __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
3484  } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3485    CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3486    __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0));
3487  } else if (Primitive::IsFloatingPointType(result_type) &&
3488             Primitive::IsFloatingPointType(input_type)) {
3489    __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0));
3490  } else {
3491    LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3492                << " to " << result_type;
3493  }
3494}
3495
3496void LocationsBuilderARM64::VisitUShr(HUShr* ushr) {
3497  HandleShift(ushr);
3498}
3499
3500void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) {
3501  HandleShift(ushr);
3502}
3503
3504void LocationsBuilderARM64::VisitXor(HXor* instruction) {
3505  HandleBinaryOp(instruction);
3506}
3507
3508void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
3509  HandleBinaryOp(instruction);
3510}
3511
3512void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) {
3513  // Nothing to do, this should be removed during prepare for register allocator.
3514  UNUSED(instruction);
3515  LOG(FATAL) << "Unreachable";
3516}
3517
3518void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) {
3519  // Nothing to do, this should be removed during prepare for register allocator.
3520  UNUSED(instruction);
3521  LOG(FATAL) << "Unreachable";
3522}
3523
3524void LocationsBuilderARM64::VisitFakeString(HFakeString* instruction) {
3525  DCHECK(codegen_->IsBaseline());
3526  LocationSummary* locations =
3527      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3528  locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3529}
3530
3531void InstructionCodeGeneratorARM64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3532  DCHECK(codegen_->IsBaseline());
3533  // Will be generated at use site.
3534}
3535
3536#undef __
3537#undef QUICK_ENTRY_POINT
3538
3539}  // namespace arm64
3540}  // namespace art
3541