code_generator_arm64.cc revision 76b1e1799a713a19218de26b171b0aef48a59e98
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm64.h"
18
19#include "arch/arm64/instruction_set_features_arm64.h"
20#include "code_generator_utils.h"
21#include "common_arm64.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "entrypoints/quick/quick_entrypoints_enum.h"
24#include "gc/accounting/card_table.h"
25#include "intrinsics.h"
26#include "intrinsics_arm64.h"
27#include "mirror/array-inl.h"
28#include "mirror/art_method.h"
29#include "mirror/class.h"
30#include "offsets.h"
31#include "thread.h"
32#include "utils/arm64/assembler_arm64.h"
33#include "utils/assembler.h"
34#include "utils/stack_checks.h"
35
36
37using namespace vixl;   // NOLINT(build/namespaces)
38
39#ifdef __
40#error "ARM64 Codegen VIXL macro-assembler macro already defined."
41#endif
42
43namespace art {
44
45namespace arm64 {
46
47using helpers::CPURegisterFrom;
48using helpers::DRegisterFrom;
49using helpers::FPRegisterFrom;
50using helpers::HeapOperand;
51using helpers::HeapOperandFrom;
52using helpers::InputCPURegisterAt;
53using helpers::InputFPRegisterAt;
54using helpers::InputRegisterAt;
55using helpers::InputOperandAt;
56using helpers::Int64ConstantFrom;
57using helpers::LocationFrom;
58using helpers::OperandFromMemOperand;
59using helpers::OutputCPURegister;
60using helpers::OutputFPRegister;
61using helpers::OutputRegister;
62using helpers::RegisterFrom;
63using helpers::StackOperandFrom;
64using helpers::VIXLRegCodeFromART;
65using helpers::WRegisterFrom;
66using helpers::XRegisterFrom;
67using helpers::ARM64EncodableConstantOrRegister;
68using helpers::ArtVixlRegCodeCoherentForRegSet;
69
70static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
71static constexpr int kCurrentMethodStackOffset = 0;
72
73inline Condition ARM64Condition(IfCondition cond) {
74  switch (cond) {
75    case kCondEQ: return eq;
76    case kCondNE: return ne;
77    case kCondLT: return lt;
78    case kCondLE: return le;
79    case kCondGT: return gt;
80    case kCondGE: return ge;
81    default:
82      LOG(FATAL) << "Unknown if condition";
83  }
84  return nv;  // Unreachable.
85}
86
87Location ARM64ReturnLocation(Primitive::Type return_type) {
88  DCHECK_NE(return_type, Primitive::kPrimVoid);
89  // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
90  // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
91  // but we use the exact registers for clarity.
92  if (return_type == Primitive::kPrimFloat) {
93    return LocationFrom(s0);
94  } else if (return_type == Primitive::kPrimDouble) {
95    return LocationFrom(d0);
96  } else if (return_type == Primitive::kPrimLong) {
97    return LocationFrom(x0);
98  } else {
99    return LocationFrom(w0);
100  }
101}
102
103Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
104  return ARM64ReturnLocation(return_type);
105}
106
107#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
108#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
109
110// Calculate memory accessing operand for save/restore live registers.
111static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
112                                           RegisterSet* register_set,
113                                           int64_t spill_offset,
114                                           bool is_save) {
115  DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(),
116                                         codegen->GetNumberOfCoreRegisters(),
117                                         register_set->GetFloatingPointRegisters(),
118                                         codegen->GetNumberOfFloatingPointRegisters()));
119
120  CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
121      register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
122  CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
123      register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
124
125  MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
126  UseScratchRegisterScope temps(masm);
127
128  Register base = masm->StackPointer();
129  int64_t core_spill_size = core_list.TotalSizeInBytes();
130  int64_t fp_spill_size = fp_list.TotalSizeInBytes();
131  int64_t reg_size = kXRegSizeInBytes;
132  int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size;
133  uint32_t ls_access_size = WhichPowerOf2(reg_size);
134  if (((core_list.Count() > 1) || (fp_list.Count() > 1)) &&
135      !masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) {
136    // If the offset does not fit in the instruction's immediate field, use an alternate register
137    // to compute the base address(float point registers spill base address).
138    Register new_base = temps.AcquireSameSizeAs(base);
139    __ Add(new_base, base, Operand(spill_offset + core_spill_size));
140    base = new_base;
141    spill_offset = -core_spill_size;
142    int64_t new_max_ls_pair_offset = fp_spill_size - 2 * reg_size;
143    DCHECK(masm->IsImmLSPair(spill_offset, ls_access_size));
144    DCHECK(masm->IsImmLSPair(new_max_ls_pair_offset, ls_access_size));
145  }
146
147  if (is_save) {
148    __ StoreCPURegList(core_list, MemOperand(base, spill_offset));
149    __ StoreCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
150  } else {
151    __ LoadCPURegList(core_list, MemOperand(base, spill_offset));
152    __ LoadCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
153  }
154}
155
156void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
157  RegisterSet* register_set = locations->GetLiveRegisters();
158  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
159  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
160    if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) {
161      // If the register holds an object, update the stack mask.
162      if (locations->RegisterContainsObject(i)) {
163        locations->SetStackBit(stack_offset / kVRegSize);
164      }
165      DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
166      DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
167      saved_core_stack_offsets_[i] = stack_offset;
168      stack_offset += kXRegSizeInBytes;
169    }
170  }
171
172  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
173    if (!codegen->IsFloatingPointCalleeSaveRegister(i) &&
174        register_set->ContainsFloatingPointRegister(i)) {
175      DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
176      DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
177      saved_fpu_stack_offsets_[i] = stack_offset;
178      stack_offset += kDRegSizeInBytes;
179    }
180  }
181
182  SaveRestoreLiveRegistersHelper(codegen, register_set,
183                                 codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
184}
185
186void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
187  RegisterSet* register_set = locations->GetLiveRegisters();
188  SaveRestoreLiveRegistersHelper(codegen, register_set,
189                                 codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
190}
191
192class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
193 public:
194  BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
195                           Location index_location,
196                           Location length_location)
197      : instruction_(instruction),
198        index_location_(index_location),
199        length_location_(length_location) {}
200
201
202  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
203    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
204    __ Bind(GetEntryLabel());
205    // We're moving two locations to locations that could overlap, so we need a parallel
206    // move resolver.
207    InvokeRuntimeCallingConvention calling_convention;
208    codegen->EmitParallelMoves(
209        index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
210        length_location_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
211    arm64_codegen->InvokeRuntime(
212        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
213    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
214  }
215
216 private:
217  HBoundsCheck* const instruction_;
218  const Location index_location_;
219  const Location length_location_;
220
221  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
222};
223
224class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
225 public:
226  explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {}
227
228  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
229    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
230    __ Bind(GetEntryLabel());
231    arm64_codegen->InvokeRuntime(
232        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
233    CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
234  }
235
236 private:
237  HDivZeroCheck* const instruction_;
238  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
239};
240
241class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
242 public:
243  LoadClassSlowPathARM64(HLoadClass* cls,
244                         HInstruction* at,
245                         uint32_t dex_pc,
246                         bool do_clinit)
247      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
248    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
249  }
250
251  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
252    LocationSummary* locations = at_->GetLocations();
253    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
254
255    __ Bind(GetEntryLabel());
256    SaveLiveRegisters(codegen, locations);
257
258    InvokeRuntimeCallingConvention calling_convention;
259    __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
260    int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
261                                            : QUICK_ENTRY_POINT(pInitializeType);
262    arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
263    if (do_clinit_) {
264      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
265    } else {
266      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
267    }
268
269    // Move the class to the desired location.
270    Location out = locations->Out();
271    if (out.IsValid()) {
272      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
273      Primitive::Type type = at_->GetType();
274      arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
275    }
276
277    RestoreLiveRegisters(codegen, locations);
278    __ B(GetExitLabel());
279  }
280
281 private:
282  // The class this slow path will load.
283  HLoadClass* const cls_;
284
285  // The instruction where this slow path is happening.
286  // (Might be the load class or an initialization check).
287  HInstruction* const at_;
288
289  // The dex PC of `at_`.
290  const uint32_t dex_pc_;
291
292  // Whether to initialize the class.
293  const bool do_clinit_;
294
295  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
296};
297
298class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
299 public:
300  explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {}
301
302  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
303    LocationSummary* locations = instruction_->GetLocations();
304    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
305    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
306
307    __ Bind(GetEntryLabel());
308    SaveLiveRegisters(codegen, locations);
309
310    InvokeRuntimeCallingConvention calling_convention;
311    __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex());
312    arm64_codegen->InvokeRuntime(
313        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
314    CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
315    Primitive::Type type = instruction_->GetType();
316    arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
317
318    RestoreLiveRegisters(codegen, locations);
319    __ B(GetExitLabel());
320  }
321
322 private:
323  HLoadString* const instruction_;
324
325  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
326};
327
328class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
329 public:
330  explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
331
332  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
333    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
334    __ Bind(GetEntryLabel());
335    arm64_codegen->InvokeRuntime(
336        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
337    CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
338  }
339
340 private:
341  HNullCheck* const instruction_;
342
343  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
344};
345
346class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
347 public:
348  explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction,
349                                     HBasicBlock* successor)
350      : instruction_(instruction), successor_(successor) {}
351
352  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
353    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
354    __ Bind(GetEntryLabel());
355    SaveLiveRegisters(codegen, instruction_->GetLocations());
356    arm64_codegen->InvokeRuntime(
357        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
358    CheckEntrypointTypes<kQuickTestSuspend, void, void>();
359    RestoreLiveRegisters(codegen, instruction_->GetLocations());
360    if (successor_ == nullptr) {
361      __ B(GetReturnLabel());
362    } else {
363      __ B(arm64_codegen->GetLabelOf(successor_));
364    }
365  }
366
367  vixl::Label* GetReturnLabel() {
368    DCHECK(successor_ == nullptr);
369    return &return_label_;
370  }
371
372  HBasicBlock* GetSuccessor() const {
373    return successor_;
374  }
375
376 private:
377  HSuspendCheck* const instruction_;
378  // If not null, the block to branch to after the suspend check.
379  HBasicBlock* const successor_;
380
381  // If `successor_` is null, the label to branch to after the suspend check.
382  vixl::Label return_label_;
383
384  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
385};
386
387class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
388 public:
389  TypeCheckSlowPathARM64(HInstruction* instruction,
390                         Location class_to_check,
391                         Location object_class,
392                         uint32_t dex_pc)
393      : instruction_(instruction),
394        class_to_check_(class_to_check),
395        object_class_(object_class),
396        dex_pc_(dex_pc) {}
397
398  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
399    LocationSummary* locations = instruction_->GetLocations();
400    DCHECK(instruction_->IsCheckCast()
401           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
402    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
403
404    __ Bind(GetEntryLabel());
405    SaveLiveRegisters(codegen, locations);
406
407    // We're moving two locations to locations that could overlap, so we need a parallel
408    // move resolver.
409    InvokeRuntimeCallingConvention calling_convention;
410    codegen->EmitParallelMoves(
411        class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
412        object_class_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
413
414    if (instruction_->IsInstanceOf()) {
415      arm64_codegen->InvokeRuntime(
416          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
417      Primitive::Type ret_type = instruction_->GetType();
418      Location ret_loc = calling_convention.GetReturnLocation(ret_type);
419      arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
420      CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t,
421                           const mirror::Class*, const mirror::Class*>();
422    } else {
423      DCHECK(instruction_->IsCheckCast());
424      arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
425      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
426    }
427
428    RestoreLiveRegisters(codegen, locations);
429    __ B(GetExitLabel());
430  }
431
432 private:
433  HInstruction* const instruction_;
434  const Location class_to_check_;
435  const Location object_class_;
436  uint32_t dex_pc_;
437
438  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
439};
440
441class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
442 public:
443  explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
444    : instruction_(instruction) {}
445
446  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
447    __ Bind(GetEntryLabel());
448    SaveLiveRegisters(codegen, instruction_->GetLocations());
449    DCHECK(instruction_->IsDeoptimize());
450    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
451    uint32_t dex_pc = deoptimize->GetDexPc();
452    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
453    arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
454  }
455
456 private:
457  HInstruction* const instruction_;
458  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
459};
460
461#undef __
462
463Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type type) {
464  Location next_location;
465  if (type == Primitive::kPrimVoid) {
466    LOG(FATAL) << "Unreachable type " << type;
467  }
468
469  if (Primitive::IsFloatingPointType(type) &&
470      (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
471    next_location = LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++));
472  } else if (!Primitive::IsFloatingPointType(type) &&
473             (gp_index_ < calling_convention.GetNumberOfRegisters())) {
474    next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
475  } else {
476    size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
477    next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
478                                                 : Location::StackSlot(stack_offset);
479  }
480
481  // Space on the stack is reserved for all arguments.
482  stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
483  return next_location;
484}
485
486CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
487                                       const Arm64InstructionSetFeatures& isa_features,
488                                       const CompilerOptions& compiler_options)
489    : CodeGenerator(graph,
490                    kNumberOfAllocatableRegisters,
491                    kNumberOfAllocatableFPRegisters,
492                    kNumberOfAllocatableRegisterPairs,
493                    callee_saved_core_registers.list(),
494                    callee_saved_fp_registers.list(),
495                    compiler_options),
496      block_labels_(nullptr),
497      location_builder_(graph, this),
498      instruction_visitor_(graph, this),
499      move_resolver_(graph->GetArena(), this),
500      isa_features_(isa_features) {
501  // Save the link register (containing the return address) to mimic Quick.
502  AddAllocatedRegister(LocationFrom(lr));
503}
504
505#undef __
506#define __ GetVIXLAssembler()->
507
508void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
509  // Ensure we emit the literal pool.
510  __ FinalizeCode();
511  CodeGenerator::Finalize(allocator);
512}
513
514void ParallelMoveResolverARM64::PrepareForEmitNativeCode() {
515  // Note: There are 6 kinds of moves:
516  // 1. constant -> GPR/FPR (non-cycle)
517  // 2. constant -> stack (non-cycle)
518  // 3. GPR/FPR -> GPR/FPR
519  // 4. GPR/FPR -> stack
520  // 5. stack -> GPR/FPR
521  // 6. stack -> stack (non-cycle)
522  // Case 1, 2 and 6 should never be included in a dependency cycle on ARM64. For case 3, 4, and 5
523  // VIXL uses at most 1 GPR. VIXL has 2 GPR and 1 FPR temps, and there should be no intersecting
524  // cycles on ARM64, so we always have 1 GPR and 1 FPR available VIXL temps to resolve the
525  // dependency.
526  vixl_temps_.Open(GetVIXLAssembler());
527}
528
529void ParallelMoveResolverARM64::FinishEmitNativeCode() {
530  vixl_temps_.Close();
531}
532
533Location ParallelMoveResolverARM64::AllocateScratchLocationFor(Location::Kind kind) {
534  DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister ||
535         kind == Location::kStackSlot || kind == Location::kDoubleStackSlot);
536  kind = (kind == Location::kFpuRegister) ? Location::kFpuRegister : Location::kRegister;
537  Location scratch = GetScratchLocation(kind);
538  if (!scratch.Equals(Location::NoLocation())) {
539    return scratch;
540  }
541  // Allocate from VIXL temp registers.
542  if (kind == Location::kRegister) {
543    scratch = LocationFrom(vixl_temps_.AcquireX());
544  } else {
545    DCHECK(kind == Location::kFpuRegister);
546    scratch = LocationFrom(vixl_temps_.AcquireD());
547  }
548  AddScratchLocation(scratch);
549  return scratch;
550}
551
552void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
553  if (loc.IsRegister()) {
554    vixl_temps_.Release(XRegisterFrom(loc));
555  } else {
556    DCHECK(loc.IsFpuRegister());
557    vixl_temps_.Release(DRegisterFrom(loc));
558  }
559  RemoveScratchLocation(loc);
560}
561
562void ParallelMoveResolverARM64::EmitMove(size_t index) {
563  MoveOperands* move = moves_.Get(index);
564  codegen_->MoveLocation(move->GetDestination(), move->GetSource());
565}
566
567void CodeGeneratorARM64::GenerateFrameEntry() {
568  MacroAssembler* masm = GetVIXLAssembler();
569  BlockPoolsScope block_pools(masm);
570  __ Bind(&frame_entry_label_);
571
572  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
573  if (do_overflow_check) {
574    UseScratchRegisterScope temps(masm);
575    Register temp = temps.AcquireX();
576    DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
577    __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
578    __ Ldr(wzr, MemOperand(temp, 0));
579    RecordPcInfo(nullptr, 0);
580  }
581
582  if (!HasEmptyFrame()) {
583    int frame_size = GetFrameSize();
584    // Stack layout:
585    //      sp[frame_size - 8]        : lr.
586    //      ...                       : other preserved core registers.
587    //      ...                       : other preserved fp registers.
588    //      ...                       : reserved frame space.
589    //      sp[0]                     : current method.
590    __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
591    GetAssembler()->cfi().AdjustCFAOffset(frame_size);
592    GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
593        frame_size - GetCoreSpillSize());
594    GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
595        frame_size - FrameEntrySpillSize());
596  }
597}
598
599void CodeGeneratorARM64::GenerateFrameExit() {
600  BlockPoolsScope block_pools(GetVIXLAssembler());
601  GetAssembler()->cfi().RememberState();
602  if (!HasEmptyFrame()) {
603    int frame_size = GetFrameSize();
604    GetAssembler()->UnspillRegisters(GetFramePreservedFPRegisters(),
605        frame_size - FrameEntrySpillSize());
606    GetAssembler()->UnspillRegisters(GetFramePreservedCoreRegisters(),
607        frame_size - GetCoreSpillSize());
608    __ Drop(frame_size);
609    GetAssembler()->cfi().AdjustCFAOffset(-frame_size);
610  }
611  __ Ret();
612  GetAssembler()->cfi().RestoreState();
613  GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
614}
615
616vixl::CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
617  DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0));
618  return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
619                          core_spill_mask_);
620}
621
622vixl::CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
623  DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
624                                         GetNumberOfFloatingPointRegisters()));
625  return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
626                          fpu_spill_mask_);
627}
628
629void CodeGeneratorARM64::Bind(HBasicBlock* block) {
630  __ Bind(GetLabelOf(block));
631}
632
633void CodeGeneratorARM64::Move(HInstruction* instruction,
634                              Location location,
635                              HInstruction* move_for) {
636  LocationSummary* locations = instruction->GetLocations();
637  Primitive::Type type = instruction->GetType();
638  DCHECK_NE(type, Primitive::kPrimVoid);
639
640  if (instruction->IsCurrentMethod()) {
641    MoveLocation(location, Location::StackSlot(kCurrentMethodStackOffset));
642  } else if (locations != nullptr && locations->Out().Equals(location)) {
643    return;
644  } else if (instruction->IsIntConstant()
645             || instruction->IsLongConstant()
646             || instruction->IsNullConstant()) {
647    int64_t value = GetInt64ValueOf(instruction->AsConstant());
648    if (location.IsRegister()) {
649      Register dst = RegisterFrom(location, type);
650      DCHECK(((instruction->IsIntConstant() || instruction->IsNullConstant()) && dst.Is32Bits()) ||
651             (instruction->IsLongConstant() && dst.Is64Bits()));
652      __ Mov(dst, value);
653    } else {
654      DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
655      UseScratchRegisterScope temps(GetVIXLAssembler());
656      Register temp = (instruction->IsIntConstant() || instruction->IsNullConstant())
657          ? temps.AcquireW()
658          : temps.AcquireX();
659      __ Mov(temp, value);
660      __ Str(temp, StackOperandFrom(location));
661    }
662  } else if (instruction->IsTemporary()) {
663    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
664    MoveLocation(location, temp_location, type);
665  } else if (instruction->IsLoadLocal()) {
666    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
667    if (Primitive::Is64BitType(type)) {
668      MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
669    } else {
670      MoveLocation(location, Location::StackSlot(stack_slot), type);
671    }
672
673  } else {
674    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
675    MoveLocation(location, locations->Out(), type);
676  }
677}
678
679Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
680  Primitive::Type type = load->GetType();
681
682  switch (type) {
683    case Primitive::kPrimNot:
684    case Primitive::kPrimInt:
685    case Primitive::kPrimFloat:
686      return Location::StackSlot(GetStackSlot(load->GetLocal()));
687
688    case Primitive::kPrimLong:
689    case Primitive::kPrimDouble:
690      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
691
692    case Primitive::kPrimBoolean:
693    case Primitive::kPrimByte:
694    case Primitive::kPrimChar:
695    case Primitive::kPrimShort:
696    case Primitive::kPrimVoid:
697      LOG(FATAL) << "Unexpected type " << type;
698  }
699
700  LOG(FATAL) << "Unreachable";
701  return Location::NoLocation();
702}
703
704void CodeGeneratorARM64::MarkGCCard(Register object, Register value, bool value_can_be_null) {
705  UseScratchRegisterScope temps(GetVIXLAssembler());
706  Register card = temps.AcquireX();
707  Register temp = temps.AcquireW();   // Index within the CardTable - 32bit.
708  vixl::Label done;
709  if (value_can_be_null) {
710    __ Cbz(value, &done);
711  }
712  __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
713  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
714  __ Strb(card, MemOperand(card, temp.X()));
715  if (value_can_be_null) {
716    __ Bind(&done);
717  }
718}
719
720void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const {
721  // Blocked core registers:
722  //      lr        : Runtime reserved.
723  //      tr        : Runtime reserved.
724  //      xSuspend  : Runtime reserved. TODO: Unblock this when the runtime stops using it.
725  //      ip1       : VIXL core temp.
726  //      ip0       : VIXL core temp.
727  //
728  // Blocked fp registers:
729  //      d31       : VIXL fp temp.
730  CPURegList reserved_core_registers = vixl_reserved_core_registers;
731  reserved_core_registers.Combine(runtime_reserved_core_registers);
732  while (!reserved_core_registers.IsEmpty()) {
733    blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
734  }
735
736  CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
737  while (!reserved_fp_registers.IsEmpty()) {
738    blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
739  }
740
741  if (is_baseline) {
742    CPURegList reserved_core_baseline_registers = callee_saved_core_registers;
743    while (!reserved_core_baseline_registers.IsEmpty()) {
744      blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true;
745    }
746
747    CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers;
748    while (!reserved_fp_baseline_registers.IsEmpty()) {
749      blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true;
750    }
751  }
752}
753
754Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
755  if (type == Primitive::kPrimVoid) {
756    LOG(FATAL) << "Unreachable type " << type;
757  }
758
759  if (Primitive::IsFloatingPointType(type)) {
760    ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
761    DCHECK_NE(reg, -1);
762    return Location::FpuRegisterLocation(reg);
763  } else {
764    ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
765    DCHECK_NE(reg, -1);
766    return Location::RegisterLocation(reg);
767  }
768}
769
770size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
771  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
772  __ Str(reg, MemOperand(sp, stack_index));
773  return kArm64WordSize;
774}
775
776size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
777  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
778  __ Ldr(reg, MemOperand(sp, stack_index));
779  return kArm64WordSize;
780}
781
782size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
783  FPRegister reg = FPRegister(reg_id, kDRegSize);
784  __ Str(reg, MemOperand(sp, stack_index));
785  return kArm64WordSize;
786}
787
788size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
789  FPRegister reg = FPRegister(reg_id, kDRegSize);
790  __ Ldr(reg, MemOperand(sp, stack_index));
791  return kArm64WordSize;
792}
793
794void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
795  stream << XRegister(reg);
796}
797
798void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
799  stream << DRegister(reg);
800}
801
802void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
803  if (constant->IsIntConstant()) {
804    __ Mov(Register(destination), constant->AsIntConstant()->GetValue());
805  } else if (constant->IsLongConstant()) {
806    __ Mov(Register(destination), constant->AsLongConstant()->GetValue());
807  } else if (constant->IsNullConstant()) {
808    __ Mov(Register(destination), 0);
809  } else if (constant->IsFloatConstant()) {
810    __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
811  } else {
812    DCHECK(constant->IsDoubleConstant());
813    __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
814  }
815}
816
817
818static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
819  DCHECK(constant.IsConstant());
820  HConstant* cst = constant.GetConstant();
821  return (cst->IsIntConstant() && type == Primitive::kPrimInt) ||
822         // Null is mapped to a core W register, which we associate with kPrimInt.
823         (cst->IsNullConstant() && type == Primitive::kPrimInt) ||
824         (cst->IsLongConstant() && type == Primitive::kPrimLong) ||
825         (cst->IsFloatConstant() && type == Primitive::kPrimFloat) ||
826         (cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
827}
828
829void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
830  if (source.Equals(destination)) {
831    return;
832  }
833
834  // A valid move can always be inferred from the destination and source
835  // locations. When moving from and to a register, the argument type can be
836  // used to generate 32bit instead of 64bit moves. In debug mode we also
837  // checks the coherency of the locations and the type.
838  bool unspecified_type = (type == Primitive::kPrimVoid);
839
840  if (destination.IsRegister() || destination.IsFpuRegister()) {
841    if (unspecified_type) {
842      HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
843      if (source.IsStackSlot() ||
844          (src_cst != nullptr && (src_cst->IsIntConstant()
845                                  || src_cst->IsFloatConstant()
846                                  || src_cst->IsNullConstant()))) {
847        // For stack slots and 32bit constants, a 64bit type is appropriate.
848        type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
849      } else {
850        // If the source is a double stack slot or a 64bit constant, a 64bit
851        // type is appropriate. Else the source is a register, and since the
852        // type has not been specified, we chose a 64bit type to force a 64bit
853        // move.
854        type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
855      }
856    }
857    DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
858           (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
859    CPURegister dst = CPURegisterFrom(destination, type);
860    if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
861      DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
862      __ Ldr(dst, StackOperandFrom(source));
863    } else if (source.IsConstant()) {
864      DCHECK(CoherentConstantAndType(source, type));
865      MoveConstant(dst, source.GetConstant());
866    } else {
867      if (destination.IsRegister()) {
868        __ Mov(Register(dst), RegisterFrom(source, type));
869      } else {
870        DCHECK(destination.IsFpuRegister());
871        __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
872      }
873    }
874  } else {  // The destination is not a register. It must be a stack slot.
875    DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
876    if (source.IsRegister() || source.IsFpuRegister()) {
877      if (unspecified_type) {
878        if (source.IsRegister()) {
879          type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
880        } else {
881          type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
882        }
883      }
884      DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
885             (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
886      __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
887    } else if (source.IsConstant()) {
888      DCHECK(unspecified_type || CoherentConstantAndType(source, type));
889      UseScratchRegisterScope temps(GetVIXLAssembler());
890      HConstant* src_cst = source.GetConstant();
891      CPURegister temp;
892      if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) {
893        temp = temps.AcquireW();
894      } else if (src_cst->IsLongConstant()) {
895        temp = temps.AcquireX();
896      } else if (src_cst->IsFloatConstant()) {
897        temp = temps.AcquireS();
898      } else {
899        DCHECK(src_cst->IsDoubleConstant());
900        temp = temps.AcquireD();
901      }
902      MoveConstant(temp, src_cst);
903      __ Str(temp, StackOperandFrom(destination));
904    } else {
905      DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
906      DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
907      UseScratchRegisterScope temps(GetVIXLAssembler());
908      // There is generally less pressure on FP registers.
909      FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
910      __ Ldr(temp, StackOperandFrom(source));
911      __ Str(temp, StackOperandFrom(destination));
912    }
913  }
914}
915
916void CodeGeneratorARM64::Load(Primitive::Type type,
917                              CPURegister dst,
918                              const MemOperand& src) {
919  switch (type) {
920    case Primitive::kPrimBoolean:
921      __ Ldrb(Register(dst), src);
922      break;
923    case Primitive::kPrimByte:
924      __ Ldrsb(Register(dst), src);
925      break;
926    case Primitive::kPrimShort:
927      __ Ldrsh(Register(dst), src);
928      break;
929    case Primitive::kPrimChar:
930      __ Ldrh(Register(dst), src);
931      break;
932    case Primitive::kPrimInt:
933    case Primitive::kPrimNot:
934    case Primitive::kPrimLong:
935    case Primitive::kPrimFloat:
936    case Primitive::kPrimDouble:
937      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
938      __ Ldr(dst, src);
939      break;
940    case Primitive::kPrimVoid:
941      LOG(FATAL) << "Unreachable type " << type;
942  }
943}
944
945void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
946                                     CPURegister dst,
947                                     const MemOperand& src) {
948  MacroAssembler* masm = GetVIXLAssembler();
949  BlockPoolsScope block_pools(masm);
950  UseScratchRegisterScope temps(masm);
951  Register temp_base = temps.AcquireX();
952  Primitive::Type type = instruction->GetType();
953
954  DCHECK(!src.IsPreIndex());
955  DCHECK(!src.IsPostIndex());
956
957  // TODO(vixl): Let the MacroAssembler handle MemOperand.
958  __ Add(temp_base, src.base(), OperandFromMemOperand(src));
959  MemOperand base = MemOperand(temp_base);
960  switch (type) {
961    case Primitive::kPrimBoolean:
962      __ Ldarb(Register(dst), base);
963      MaybeRecordImplicitNullCheck(instruction);
964      break;
965    case Primitive::kPrimByte:
966      __ Ldarb(Register(dst), base);
967      MaybeRecordImplicitNullCheck(instruction);
968      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
969      break;
970    case Primitive::kPrimChar:
971      __ Ldarh(Register(dst), base);
972      MaybeRecordImplicitNullCheck(instruction);
973      break;
974    case Primitive::kPrimShort:
975      __ Ldarh(Register(dst), base);
976      MaybeRecordImplicitNullCheck(instruction);
977      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
978      break;
979    case Primitive::kPrimInt:
980    case Primitive::kPrimNot:
981    case Primitive::kPrimLong:
982      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
983      __ Ldar(Register(dst), base);
984      MaybeRecordImplicitNullCheck(instruction);
985      break;
986    case Primitive::kPrimFloat:
987    case Primitive::kPrimDouble: {
988      DCHECK(dst.IsFPRegister());
989      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
990
991      Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
992      __ Ldar(temp, base);
993      MaybeRecordImplicitNullCheck(instruction);
994      __ Fmov(FPRegister(dst), temp);
995      break;
996    }
997    case Primitive::kPrimVoid:
998      LOG(FATAL) << "Unreachable type " << type;
999  }
1000}
1001
1002void CodeGeneratorARM64::Store(Primitive::Type type,
1003                               CPURegister src,
1004                               const MemOperand& dst) {
1005  switch (type) {
1006    case Primitive::kPrimBoolean:
1007    case Primitive::kPrimByte:
1008      __ Strb(Register(src), dst);
1009      break;
1010    case Primitive::kPrimChar:
1011    case Primitive::kPrimShort:
1012      __ Strh(Register(src), dst);
1013      break;
1014    case Primitive::kPrimInt:
1015    case Primitive::kPrimNot:
1016    case Primitive::kPrimLong:
1017    case Primitive::kPrimFloat:
1018    case Primitive::kPrimDouble:
1019      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1020      __ Str(src, dst);
1021      break;
1022    case Primitive::kPrimVoid:
1023      LOG(FATAL) << "Unreachable type " << type;
1024  }
1025}
1026
1027void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
1028                                      CPURegister src,
1029                                      const MemOperand& dst) {
1030  UseScratchRegisterScope temps(GetVIXLAssembler());
1031  Register temp_base = temps.AcquireX();
1032
1033  DCHECK(!dst.IsPreIndex());
1034  DCHECK(!dst.IsPostIndex());
1035
1036  // TODO(vixl): Let the MacroAssembler handle this.
1037  Operand op = OperandFromMemOperand(dst);
1038  __ Add(temp_base, dst.base(), op);
1039  MemOperand base = MemOperand(temp_base);
1040  switch (type) {
1041    case Primitive::kPrimBoolean:
1042    case Primitive::kPrimByte:
1043      __ Stlrb(Register(src), base);
1044      break;
1045    case Primitive::kPrimChar:
1046    case Primitive::kPrimShort:
1047      __ Stlrh(Register(src), base);
1048      break;
1049    case Primitive::kPrimInt:
1050    case Primitive::kPrimNot:
1051    case Primitive::kPrimLong:
1052      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1053      __ Stlr(Register(src), base);
1054      break;
1055    case Primitive::kPrimFloat:
1056    case Primitive::kPrimDouble: {
1057      DCHECK(src.IsFPRegister());
1058      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1059
1060      Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1061      __ Fmov(temp, FPRegister(src));
1062      __ Stlr(temp, base);
1063      break;
1064    }
1065    case Primitive::kPrimVoid:
1066      LOG(FATAL) << "Unreachable type " << type;
1067  }
1068}
1069
1070void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) {
1071  DCHECK(RequiresCurrentMethod());
1072  DCHECK(current_method.IsW());
1073  __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
1074}
1075
1076void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
1077                                       HInstruction* instruction,
1078                                       uint32_t dex_pc,
1079                                       SlowPathCode* slow_path) {
1080  BlockPoolsScope block_pools(GetVIXLAssembler());
1081  __ Ldr(lr, MemOperand(tr, entry_point_offset));
1082  __ Blr(lr);
1083  RecordPcInfo(instruction, dex_pc, slow_path);
1084  DCHECK(instruction->IsSuspendCheck()
1085         || instruction->IsBoundsCheck()
1086         || instruction->IsNullCheck()
1087         || instruction->IsDivZeroCheck()
1088         || !IsLeafMethod());
1089}
1090
1091void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
1092                                                                     vixl::Register class_reg) {
1093  UseScratchRegisterScope temps(GetVIXLAssembler());
1094  Register temp = temps.AcquireW();
1095  size_t status_offset = mirror::Class::StatusOffset().SizeValue();
1096  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1097
1098  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
1099  if (use_acquire_release) {
1100    // TODO(vixl): Let the MacroAssembler handle MemOperand.
1101    __ Add(temp, class_reg, status_offset);
1102    __ Ldar(temp, HeapOperand(temp));
1103    __ Cmp(temp, mirror::Class::kStatusInitialized);
1104    __ B(lt, slow_path->GetEntryLabel());
1105  } else {
1106    __ Ldr(temp, HeapOperand(class_reg, status_offset));
1107    __ Cmp(temp, mirror::Class::kStatusInitialized);
1108    __ B(lt, slow_path->GetEntryLabel());
1109    __ Dmb(InnerShareable, BarrierReads);
1110  }
1111  __ Bind(slow_path->GetExitLabel());
1112}
1113
1114void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) {
1115  BarrierType type = BarrierAll;
1116
1117  switch (kind) {
1118    case MemBarrierKind::kAnyAny:
1119    case MemBarrierKind::kAnyStore: {
1120      type = BarrierAll;
1121      break;
1122    }
1123    case MemBarrierKind::kLoadAny: {
1124      type = BarrierReads;
1125      break;
1126    }
1127    case MemBarrierKind::kStoreStore: {
1128      type = BarrierWrites;
1129      break;
1130    }
1131    default:
1132      LOG(FATAL) << "Unexpected memory barrier " << kind;
1133  }
1134  __ Dmb(InnerShareable, type);
1135}
1136
1137void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
1138                                                         HBasicBlock* successor) {
1139  SuspendCheckSlowPathARM64* slow_path =
1140      down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
1141  if (slow_path == nullptr) {
1142    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
1143    instruction->SetSlowPath(slow_path);
1144    codegen_->AddSlowPath(slow_path);
1145    if (successor != nullptr) {
1146      DCHECK(successor->IsLoopHeader());
1147      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
1148    }
1149  } else {
1150    DCHECK_EQ(slow_path->GetSuccessor(), successor);
1151  }
1152
1153  UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
1154  Register temp = temps.AcquireW();
1155
1156  __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
1157  if (successor == nullptr) {
1158    __ Cbnz(temp, slow_path->GetEntryLabel());
1159    __ Bind(slow_path->GetReturnLabel());
1160  } else {
1161    __ Cbz(temp, codegen_->GetLabelOf(successor));
1162    __ B(slow_path->GetEntryLabel());
1163    // slow_path will return to GetLabelOf(successor).
1164  }
1165}
1166
1167InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
1168                                                             CodeGeneratorARM64* codegen)
1169      : HGraphVisitor(graph),
1170        assembler_(codegen->GetAssembler()),
1171        codegen_(codegen) {}
1172
1173#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)              \
1174  /* No unimplemented IR. */
1175
1176#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
1177
1178enum UnimplementedInstructionBreakCode {
1179  // Using a base helps identify when we hit such breakpoints.
1180  UnimplementedInstructionBreakCodeBaseCode = 0x900,
1181#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
1182  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
1183#undef ENUM_UNIMPLEMENTED_INSTRUCTION
1184};
1185
1186#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name)                               \
1187  void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) {                   \
1188    UNUSED(instr);                                                                    \
1189    __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name));                               \
1190  }                                                                                   \
1191  void LocationsBuilderARM64::Visit##name(H##name* instr) {                           \
1192    LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
1193    locations->SetOut(Location::Any());                                               \
1194  }
1195  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
1196#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
1197
1198#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
1199#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
1200
1201void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
1202  DCHECK_EQ(instr->InputCount(), 2U);
1203  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1204  Primitive::Type type = instr->GetResultType();
1205  switch (type) {
1206    case Primitive::kPrimInt:
1207    case Primitive::kPrimLong:
1208      locations->SetInAt(0, Location::RequiresRegister());
1209      locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr));
1210      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1211      break;
1212
1213    case Primitive::kPrimFloat:
1214    case Primitive::kPrimDouble:
1215      locations->SetInAt(0, Location::RequiresFpuRegister());
1216      locations->SetInAt(1, Location::RequiresFpuRegister());
1217      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1218      break;
1219
1220    default:
1221      LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
1222  }
1223}
1224
1225void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
1226  LocationSummary* locations =
1227      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1228  locations->SetInAt(0, Location::RequiresRegister());
1229  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1230    locations->SetOut(Location::RequiresFpuRegister());
1231  } else {
1232    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1233  }
1234}
1235
1236void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
1237                                                   const FieldInfo& field_info) {
1238  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
1239  BlockPoolsScope block_pools(GetVIXLAssembler());
1240
1241  MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
1242  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1243
1244  if (field_info.IsVolatile()) {
1245    if (use_acquire_release) {
1246      // NB: LoadAcquire will record the pc info if needed.
1247      codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
1248    } else {
1249      codegen_->Load(field_info.GetFieldType(), OutputCPURegister(instruction), field);
1250      codegen_->MaybeRecordImplicitNullCheck(instruction);
1251      // For IRIW sequential consistency kLoadAny is not sufficient.
1252      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1253    }
1254  } else {
1255    codegen_->Load(field_info.GetFieldType(), OutputCPURegister(instruction), field);
1256    codegen_->MaybeRecordImplicitNullCheck(instruction);
1257  }
1258}
1259
1260void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
1261  LocationSummary* locations =
1262      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1263  locations->SetInAt(0, Location::RequiresRegister());
1264  if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
1265    locations->SetInAt(1, Location::RequiresFpuRegister());
1266  } else {
1267    locations->SetInAt(1, Location::RequiresRegister());
1268  }
1269}
1270
1271void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
1272                                                   const FieldInfo& field_info,
1273                                                   bool value_can_be_null) {
1274  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
1275  BlockPoolsScope block_pools(GetVIXLAssembler());
1276
1277  Register obj = InputRegisterAt(instruction, 0);
1278  CPURegister value = InputCPURegisterAt(instruction, 1);
1279  Offset offset = field_info.GetFieldOffset();
1280  Primitive::Type field_type = field_info.GetFieldType();
1281  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1282
1283  if (field_info.IsVolatile()) {
1284    if (use_acquire_release) {
1285      codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset));
1286      codegen_->MaybeRecordImplicitNullCheck(instruction);
1287    } else {
1288      GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
1289      codegen_->Store(field_type, value, HeapOperand(obj, offset));
1290      codegen_->MaybeRecordImplicitNullCheck(instruction);
1291      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1292    }
1293  } else {
1294    codegen_->Store(field_type, value, HeapOperand(obj, offset));
1295    codegen_->MaybeRecordImplicitNullCheck(instruction);
1296  }
1297
1298  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
1299    codegen_->MarkGCCard(obj, Register(value), value_can_be_null);
1300  }
1301}
1302
1303void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
1304  Primitive::Type type = instr->GetType();
1305
1306  switch (type) {
1307    case Primitive::kPrimInt:
1308    case Primitive::kPrimLong: {
1309      Register dst = OutputRegister(instr);
1310      Register lhs = InputRegisterAt(instr, 0);
1311      Operand rhs = InputOperandAt(instr, 1);
1312      if (instr->IsAdd()) {
1313        __ Add(dst, lhs, rhs);
1314      } else if (instr->IsAnd()) {
1315        __ And(dst, lhs, rhs);
1316      } else if (instr->IsOr()) {
1317        __ Orr(dst, lhs, rhs);
1318      } else if (instr->IsSub()) {
1319        __ Sub(dst, lhs, rhs);
1320      } else {
1321        DCHECK(instr->IsXor());
1322        __ Eor(dst, lhs, rhs);
1323      }
1324      break;
1325    }
1326    case Primitive::kPrimFloat:
1327    case Primitive::kPrimDouble: {
1328      FPRegister dst = OutputFPRegister(instr);
1329      FPRegister lhs = InputFPRegisterAt(instr, 0);
1330      FPRegister rhs = InputFPRegisterAt(instr, 1);
1331      if (instr->IsAdd()) {
1332        __ Fadd(dst, lhs, rhs);
1333      } else if (instr->IsSub()) {
1334        __ Fsub(dst, lhs, rhs);
1335      } else {
1336        LOG(FATAL) << "Unexpected floating-point binary operation";
1337      }
1338      break;
1339    }
1340    default:
1341      LOG(FATAL) << "Unexpected binary operation type " << type;
1342  }
1343}
1344
1345void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
1346  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1347
1348  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1349  Primitive::Type type = instr->GetResultType();
1350  switch (type) {
1351    case Primitive::kPrimInt:
1352    case Primitive::kPrimLong: {
1353      locations->SetInAt(0, Location::RequiresRegister());
1354      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1355      locations->SetOut(Location::RequiresRegister());
1356      break;
1357    }
1358    default:
1359      LOG(FATAL) << "Unexpected shift type " << type;
1360  }
1361}
1362
1363void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) {
1364  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1365
1366  Primitive::Type type = instr->GetType();
1367  switch (type) {
1368    case Primitive::kPrimInt:
1369    case Primitive::kPrimLong: {
1370      Register dst = OutputRegister(instr);
1371      Register lhs = InputRegisterAt(instr, 0);
1372      Operand rhs = InputOperandAt(instr, 1);
1373      if (rhs.IsImmediate()) {
1374        uint32_t shift_value = (type == Primitive::kPrimInt)
1375          ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue)
1376          : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue);
1377        if (instr->IsShl()) {
1378          __ Lsl(dst, lhs, shift_value);
1379        } else if (instr->IsShr()) {
1380          __ Asr(dst, lhs, shift_value);
1381        } else {
1382          __ Lsr(dst, lhs, shift_value);
1383        }
1384      } else {
1385        Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
1386
1387        if (instr->IsShl()) {
1388          __ Lsl(dst, lhs, rhs_reg);
1389        } else if (instr->IsShr()) {
1390          __ Asr(dst, lhs, rhs_reg);
1391        } else {
1392          __ Lsr(dst, lhs, rhs_reg);
1393        }
1394      }
1395      break;
1396    }
1397    default:
1398      LOG(FATAL) << "Unexpected shift operation type " << type;
1399  }
1400}
1401
1402void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
1403  HandleBinaryOp(instruction);
1404}
1405
1406void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
1407  HandleBinaryOp(instruction);
1408}
1409
1410void LocationsBuilderARM64::VisitAnd(HAnd* instruction) {
1411  HandleBinaryOp(instruction);
1412}
1413
1414void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
1415  HandleBinaryOp(instruction);
1416}
1417
1418void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
1419  LocationSummary* locations =
1420      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1421  locations->SetInAt(0, Location::RequiresRegister());
1422  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1423  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1424    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1425  } else {
1426    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1427  }
1428}
1429
1430void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
1431  LocationSummary* locations = instruction->GetLocations();
1432  Primitive::Type type = instruction->GetType();
1433  Register obj = InputRegisterAt(instruction, 0);
1434  Location index = locations->InAt(1);
1435  size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
1436  MemOperand source = HeapOperand(obj);
1437  MacroAssembler* masm = GetVIXLAssembler();
1438  UseScratchRegisterScope temps(masm);
1439  BlockPoolsScope block_pools(masm);
1440
1441  if (index.IsConstant()) {
1442    offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
1443    source = HeapOperand(obj, offset);
1444  } else {
1445    Register temp = temps.AcquireSameSizeAs(obj);
1446    Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
1447    __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type)));
1448    source = HeapOperand(temp, offset);
1449  }
1450
1451  codegen_->Load(type, OutputCPURegister(instruction), source);
1452  codegen_->MaybeRecordImplicitNullCheck(instruction);
1453}
1454
1455void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
1456  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1457  locations->SetInAt(0, Location::RequiresRegister());
1458  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1459}
1460
1461void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
1462  BlockPoolsScope block_pools(GetVIXLAssembler());
1463  __ Ldr(OutputRegister(instruction),
1464         HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
1465  codegen_->MaybeRecordImplicitNullCheck(instruction);
1466}
1467
1468void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
1469  if (instruction->NeedsTypeCheck()) {
1470    LocationSummary* locations =
1471        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
1472    InvokeRuntimeCallingConvention calling_convention;
1473    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
1474    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
1475    locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
1476  } else {
1477    LocationSummary* locations =
1478        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1479    locations->SetInAt(0, Location::RequiresRegister());
1480    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1481    if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1482      locations->SetInAt(2, Location::RequiresFpuRegister());
1483    } else {
1484      locations->SetInAt(2, Location::RequiresRegister());
1485    }
1486  }
1487}
1488
1489void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
1490  Primitive::Type value_type = instruction->GetComponentType();
1491  LocationSummary* locations = instruction->GetLocations();
1492  bool needs_runtime_call = locations->WillCall();
1493
1494  if (needs_runtime_call) {
1495    codegen_->InvokeRuntime(
1496        QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr);
1497    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
1498  } else {
1499    Register obj = InputRegisterAt(instruction, 0);
1500    CPURegister value = InputCPURegisterAt(instruction, 2);
1501    Location index = locations->InAt(1);
1502    size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
1503    MemOperand destination = HeapOperand(obj);
1504    MacroAssembler* masm = GetVIXLAssembler();
1505    BlockPoolsScope block_pools(masm);
1506    {
1507      // We use a block to end the scratch scope before the write barrier, thus
1508      // freeing the temporary registers so they can be used in `MarkGCCard`.
1509      UseScratchRegisterScope temps(masm);
1510
1511      if (index.IsConstant()) {
1512        offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
1513        destination = HeapOperand(obj, offset);
1514      } else {
1515        Register temp = temps.AcquireSameSizeAs(obj);
1516        Register index_reg = InputRegisterAt(instruction, 1);
1517        __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
1518        destination = HeapOperand(temp, offset);
1519      }
1520
1521      codegen_->Store(value_type, value, destination);
1522      codegen_->MaybeRecordImplicitNullCheck(instruction);
1523    }
1524    if (CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue())) {
1525      codegen_->MarkGCCard(obj, value.W(), instruction->GetValueCanBeNull());
1526    }
1527  }
1528}
1529
1530void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1531  LocationSummary* locations =
1532      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1533  locations->SetInAt(0, Location::RequiresRegister());
1534  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
1535  if (instruction->HasUses()) {
1536    locations->SetOut(Location::SameAsFirstInput());
1537  }
1538}
1539
1540void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1541  LocationSummary* locations = instruction->GetLocations();
1542  BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
1543      instruction, locations->InAt(0), locations->InAt(1));
1544  codegen_->AddSlowPath(slow_path);
1545
1546  __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
1547  __ B(slow_path->GetEntryLabel(), hs);
1548}
1549
1550void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
1551  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1552      instruction, LocationSummary::kCallOnSlowPath);
1553  locations->SetInAt(0, Location::RequiresRegister());
1554  locations->SetInAt(1, Location::RequiresRegister());
1555  locations->AddTemp(Location::RequiresRegister());
1556}
1557
1558void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
1559  LocationSummary* locations = instruction->GetLocations();
1560  Register obj = InputRegisterAt(instruction, 0);;
1561  Register cls = InputRegisterAt(instruction, 1);;
1562  Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
1563
1564  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
1565      instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
1566  codegen_->AddSlowPath(slow_path);
1567
1568  // Avoid null check if we know obj is not null.
1569  if (instruction->MustDoNullCheck()) {
1570    __ Cbz(obj, slow_path->GetExitLabel());
1571  }
1572  // Compare the class of `obj` with `cls`.
1573  __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset()));
1574  __ Cmp(obj_cls, cls);
1575  __ B(ne, slow_path->GetEntryLabel());
1576  __ Bind(slow_path->GetExitLabel());
1577}
1578
1579void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
1580  LocationSummary* locations =
1581      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1582  locations->SetInAt(0, Location::RequiresRegister());
1583  if (check->HasUses()) {
1584    locations->SetOut(Location::SameAsFirstInput());
1585  }
1586}
1587
1588void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
1589  // We assume the class is not null.
1590  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
1591      check->GetLoadClass(), check, check->GetDexPc(), true);
1592  codegen_->AddSlowPath(slow_path);
1593  GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
1594}
1595
1596void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
1597  LocationSummary* locations =
1598      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
1599  Primitive::Type in_type = compare->InputAt(0)->GetType();
1600  switch (in_type) {
1601    case Primitive::kPrimLong: {
1602      locations->SetInAt(0, Location::RequiresRegister());
1603      locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare));
1604      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1605      break;
1606    }
1607    case Primitive::kPrimFloat:
1608    case Primitive::kPrimDouble: {
1609      locations->SetInAt(0, Location::RequiresFpuRegister());
1610      HInstruction* right = compare->InputAt(1);
1611      if ((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) ||
1612          (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0))) {
1613        locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1614      } else {
1615        locations->SetInAt(1, Location::RequiresFpuRegister());
1616      }
1617      locations->SetOut(Location::RequiresRegister());
1618      break;
1619    }
1620    default:
1621      LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1622  }
1623}
1624
1625void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) {
1626  Primitive::Type in_type = compare->InputAt(0)->GetType();
1627
1628  //  0 if: left == right
1629  //  1 if: left  > right
1630  // -1 if: left  < right
1631  switch (in_type) {
1632    case Primitive::kPrimLong: {
1633      Register result = OutputRegister(compare);
1634      Register left = InputRegisterAt(compare, 0);
1635      Operand right = InputOperandAt(compare, 1);
1636
1637      __ Cmp(left, right);
1638      __ Cset(result, ne);
1639      __ Cneg(result, result, lt);
1640      break;
1641    }
1642    case Primitive::kPrimFloat:
1643    case Primitive::kPrimDouble: {
1644      Register result = OutputRegister(compare);
1645      FPRegister left = InputFPRegisterAt(compare, 0);
1646      if (compare->GetLocations()->InAt(1).IsConstant()) {
1647        if (kIsDebugBuild) {
1648          HInstruction* right = compare->GetLocations()->InAt(1).GetConstant();
1649          DCHECK((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) ||
1650                  (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0)));
1651        }
1652        // 0.0 is the only immediate that can be encoded directly in a FCMP instruction.
1653        __ Fcmp(left, 0.0);
1654      } else {
1655        __ Fcmp(left, InputFPRegisterAt(compare, 1));
1656      }
1657      if (compare->IsGtBias()) {
1658        __ Cset(result, ne);
1659      } else {
1660        __ Csetm(result, ne);
1661      }
1662      __ Cneg(result, result, compare->IsGtBias() ? mi : gt);
1663      break;
1664    }
1665    default:
1666      LOG(FATAL) << "Unimplemented compare type " << in_type;
1667  }
1668}
1669
1670void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
1671  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1672  locations->SetInAt(0, Location::RequiresRegister());
1673  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
1674  if (instruction->NeedsMaterialization()) {
1675    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1676  }
1677}
1678
1679void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
1680  if (!instruction->NeedsMaterialization()) {
1681    return;
1682  }
1683
1684  LocationSummary* locations = instruction->GetLocations();
1685  Register lhs = InputRegisterAt(instruction, 0);
1686  Operand rhs = InputOperandAt(instruction, 1);
1687  Register res = RegisterFrom(locations->Out(), instruction->GetType());
1688  Condition cond = ARM64Condition(instruction->GetCondition());
1689
1690  __ Cmp(lhs, rhs);
1691  __ Cset(res, cond);
1692}
1693
1694#define FOR_EACH_CONDITION_INSTRUCTION(M)                                                \
1695  M(Equal)                                                                               \
1696  M(NotEqual)                                                                            \
1697  M(LessThan)                                                                            \
1698  M(LessThanOrEqual)                                                                     \
1699  M(GreaterThan)                                                                         \
1700  M(GreaterThanOrEqual)
1701#define DEFINE_CONDITION_VISITORS(Name)                                                  \
1702void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }         \
1703void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
1704FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
1705#undef DEFINE_CONDITION_VISITORS
1706#undef FOR_EACH_CONDITION_INSTRUCTION
1707
1708void InstructionCodeGeneratorARM64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
1709  DCHECK(instruction->IsDiv() || instruction->IsRem());
1710
1711  LocationSummary* locations = instruction->GetLocations();
1712  Location second = locations->InAt(1);
1713  DCHECK(second.IsConstant());
1714
1715  Register out = OutputRegister(instruction);
1716  Register dividend = InputRegisterAt(instruction, 0);
1717  int64_t imm = Int64FromConstant(second.GetConstant());
1718  DCHECK(imm == 1 || imm == -1);
1719
1720  if (instruction->IsRem()) {
1721    __ Mov(out, 0);
1722  } else {
1723    if (imm == 1) {
1724      __ Mov(out, dividend);
1725    } else {
1726      __ Neg(out, dividend);
1727    }
1728  }
1729}
1730
1731void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
1732  DCHECK(instruction->IsDiv() || instruction->IsRem());
1733
1734  LocationSummary* locations = instruction->GetLocations();
1735  Location second = locations->InAt(1);
1736  DCHECK(second.IsConstant());
1737
1738  Register out = OutputRegister(instruction);
1739  Register dividend = InputRegisterAt(instruction, 0);
1740  int64_t imm = Int64FromConstant(second.GetConstant());
1741  uint64_t abs_imm = static_cast<uint64_t>(std::abs(imm));
1742  DCHECK(IsPowerOfTwo(abs_imm));
1743  int ctz_imm = CTZ(abs_imm);
1744
1745  UseScratchRegisterScope temps(GetVIXLAssembler());
1746  Register temp = temps.AcquireSameSizeAs(out);
1747
1748  if (instruction->IsDiv()) {
1749    __ Add(temp, dividend, abs_imm - 1);
1750    __ Cmp(dividend, 0);
1751    __ Csel(out, temp, dividend, lt);
1752    if (imm > 0) {
1753      __ Asr(out, out, ctz_imm);
1754    } else {
1755      __ Neg(out, Operand(out, ASR, ctz_imm));
1756    }
1757  } else {
1758    int bits = instruction->GetResultType() == Primitive::kPrimInt ? 32 : 64;
1759    __ Asr(temp, dividend, bits - 1);
1760    __ Lsr(temp, temp, bits - ctz_imm);
1761    __ Add(out, dividend, temp);
1762    __ And(out, out, abs_imm - 1);
1763    __ Sub(out, out, temp);
1764  }
1765}
1766
1767void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
1768  DCHECK(instruction->IsDiv() || instruction->IsRem());
1769
1770  LocationSummary* locations = instruction->GetLocations();
1771  Location second = locations->InAt(1);
1772  DCHECK(second.IsConstant());
1773
1774  Register out = OutputRegister(instruction);
1775  Register dividend = InputRegisterAt(instruction, 0);
1776  int64_t imm = Int64FromConstant(second.GetConstant());
1777
1778  Primitive::Type type = instruction->GetResultType();
1779  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
1780
1781  int64_t magic;
1782  int shift;
1783  CalculateMagicAndShiftForDivRem(imm, type == Primitive::kPrimLong /* is_long */, &magic, &shift);
1784
1785  UseScratchRegisterScope temps(GetVIXLAssembler());
1786  Register temp = temps.AcquireSameSizeAs(out);
1787
1788  // temp = get_high(dividend * magic)
1789  __ Mov(temp, magic);
1790  if (type == Primitive::kPrimLong) {
1791    __ Smulh(temp, dividend, temp);
1792  } else {
1793    __ Smull(temp.X(), dividend, temp);
1794    __ Lsr(temp.X(), temp.X(), 32);
1795  }
1796
1797  if (imm > 0 && magic < 0) {
1798    __ Add(temp, temp, dividend);
1799  } else if (imm < 0 && magic > 0) {
1800    __ Sub(temp, temp, dividend);
1801  }
1802
1803  if (shift != 0) {
1804    __ Asr(temp, temp, shift);
1805  }
1806
1807  if (instruction->IsDiv()) {
1808    __ Sub(out, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
1809  } else {
1810    __ Sub(temp, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
1811    // TODO: Strength reduction for msub.
1812    Register temp_imm = temps.AcquireSameSizeAs(out);
1813    __ Mov(temp_imm, imm);
1814    __ Msub(out, temp, temp_imm, dividend);
1815  }
1816}
1817
1818void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
1819  DCHECK(instruction->IsDiv() || instruction->IsRem());
1820  Primitive::Type type = instruction->GetResultType();
1821  DCHECK(type == Primitive::kPrimInt || Primitive::kPrimLong);
1822
1823  LocationSummary* locations = instruction->GetLocations();
1824  Register out = OutputRegister(instruction);
1825  Location second = locations->InAt(1);
1826
1827  if (second.IsConstant()) {
1828    int64_t imm = Int64FromConstant(second.GetConstant());
1829
1830    if (imm == 0) {
1831      // Do not generate anything. DivZeroCheck would prevent any code to be executed.
1832    } else if (imm == 1 || imm == -1) {
1833      DivRemOneOrMinusOne(instruction);
1834    } else if (IsPowerOfTwo(std::abs(imm))) {
1835      DivRemByPowerOfTwo(instruction);
1836    } else {
1837      DCHECK(imm <= -2 || imm >= 2);
1838      GenerateDivRemWithAnyConstant(instruction);
1839    }
1840  } else {
1841    Register dividend = InputRegisterAt(instruction, 0);
1842    Register divisor = InputRegisterAt(instruction, 1);
1843    if (instruction->IsDiv()) {
1844      __ Sdiv(out, dividend, divisor);
1845    } else {
1846      UseScratchRegisterScope temps(GetVIXLAssembler());
1847      Register temp = temps.AcquireSameSizeAs(out);
1848      __ Sdiv(temp, dividend, divisor);
1849      __ Msub(out, temp, divisor, dividend);
1850    }
1851  }
1852}
1853
1854void LocationsBuilderARM64::VisitDiv(HDiv* div) {
1855  LocationSummary* locations =
1856      new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1857  switch (div->GetResultType()) {
1858    case Primitive::kPrimInt:
1859    case Primitive::kPrimLong:
1860      locations->SetInAt(0, Location::RequiresRegister());
1861      locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
1862      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1863      break;
1864
1865    case Primitive::kPrimFloat:
1866    case Primitive::kPrimDouble:
1867      locations->SetInAt(0, Location::RequiresFpuRegister());
1868      locations->SetInAt(1, Location::RequiresFpuRegister());
1869      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1870      break;
1871
1872    default:
1873      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1874  }
1875}
1876
1877void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
1878  Primitive::Type type = div->GetResultType();
1879  switch (type) {
1880    case Primitive::kPrimInt:
1881    case Primitive::kPrimLong:
1882      GenerateDivRemIntegral(div);
1883      break;
1884
1885    case Primitive::kPrimFloat:
1886    case Primitive::kPrimDouble:
1887      __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
1888      break;
1889
1890    default:
1891      LOG(FATAL) << "Unexpected div type " << type;
1892  }
1893}
1894
1895void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1896  LocationSummary* locations =
1897      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1898  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1899  if (instruction->HasUses()) {
1900    locations->SetOut(Location::SameAsFirstInput());
1901  }
1902}
1903
1904void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1905  SlowPathCodeARM64* slow_path =
1906      new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
1907  codegen_->AddSlowPath(slow_path);
1908  Location value = instruction->GetLocations()->InAt(0);
1909
1910  Primitive::Type type = instruction->GetType();
1911
1912  if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) {
1913      LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck.";
1914    return;
1915  }
1916
1917  if (value.IsConstant()) {
1918    int64_t divisor = Int64ConstantFrom(value);
1919    if (divisor == 0) {
1920      __ B(slow_path->GetEntryLabel());
1921    } else {
1922      // A division by a non-null constant is valid. We don't need to perform
1923      // any check, so simply fall through.
1924    }
1925  } else {
1926    __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
1927  }
1928}
1929
1930void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
1931  LocationSummary* locations =
1932      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1933  locations->SetOut(Location::ConstantLocation(constant));
1934}
1935
1936void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
1937  UNUSED(constant);
1938  // Will be generated at use site.
1939}
1940
1941void LocationsBuilderARM64::VisitExit(HExit* exit) {
1942  exit->SetLocations(nullptr);
1943}
1944
1945void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
1946  UNUSED(exit);
1947}
1948
1949void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
1950  LocationSummary* locations =
1951      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1952  locations->SetOut(Location::ConstantLocation(constant));
1953}
1954
1955void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
1956  UNUSED(constant);
1957  // Will be generated at use site.
1958}
1959
1960void LocationsBuilderARM64::VisitGoto(HGoto* got) {
1961  got->SetLocations(nullptr);
1962}
1963
1964void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
1965  HBasicBlock* successor = got->GetSuccessor();
1966  DCHECK(!successor->IsExitBlock());
1967  HBasicBlock* block = got->GetBlock();
1968  HInstruction* previous = got->GetPrevious();
1969  HLoopInformation* info = block->GetLoopInformation();
1970
1971  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1972    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1973    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1974    return;
1975  }
1976  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1977    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1978  }
1979  if (!codegen_->GoesToNextBlock(block, successor)) {
1980    __ B(codegen_->GetLabelOf(successor));
1981  }
1982}
1983
1984void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruction,
1985                                                          vixl::Label* true_target,
1986                                                          vixl::Label* false_target,
1987                                                          vixl::Label* always_true_target) {
1988  HInstruction* cond = instruction->InputAt(0);
1989  HCondition* condition = cond->AsCondition();
1990
1991  if (cond->IsIntConstant()) {
1992    int32_t cond_value = cond->AsIntConstant()->GetValue();
1993    if (cond_value == 1) {
1994      if (always_true_target != nullptr) {
1995        __ B(always_true_target);
1996      }
1997      return;
1998    } else {
1999      DCHECK_EQ(cond_value, 0);
2000    }
2001  } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2002    // The condition instruction has been materialized, compare the output to 0.
2003    Location cond_val = instruction->GetLocations()->InAt(0);
2004    DCHECK(cond_val.IsRegister());
2005    __ Cbnz(InputRegisterAt(instruction, 0), true_target);
2006  } else {
2007    // The condition instruction has not been materialized, use its inputs as
2008    // the comparison and its condition as the branch condition.
2009    Register lhs = InputRegisterAt(condition, 0);
2010    Operand rhs = InputOperandAt(condition, 1);
2011    Condition arm64_cond = ARM64Condition(condition->GetCondition());
2012    if ((arm64_cond != gt && arm64_cond != le) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
2013      switch (arm64_cond) {
2014        case eq:
2015          __ Cbz(lhs, true_target);
2016          break;
2017        case ne:
2018          __ Cbnz(lhs, true_target);
2019          break;
2020        case lt:
2021          // Test the sign bit and branch accordingly.
2022          __ Tbnz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target);
2023          break;
2024        case ge:
2025          // Test the sign bit and branch accordingly.
2026          __ Tbz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target);
2027          break;
2028        default:
2029          // Without the `static_cast` the compiler throws an error for
2030          // `-Werror=sign-promo`.
2031          LOG(FATAL) << "Unexpected condition: " << static_cast<int>(arm64_cond);
2032      }
2033    } else {
2034      __ Cmp(lhs, rhs);
2035      __ B(arm64_cond, true_target);
2036    }
2037  }
2038  if (false_target != nullptr) {
2039    __ B(false_target);
2040  }
2041}
2042
2043void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
2044  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2045  HInstruction* cond = if_instr->InputAt(0);
2046  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2047    locations->SetInAt(0, Location::RequiresRegister());
2048  }
2049}
2050
2051void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
2052  vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2053  vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2054  vixl::Label* always_true_target = true_target;
2055  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2056                                if_instr->IfTrueSuccessor())) {
2057    always_true_target = nullptr;
2058  }
2059  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2060                                if_instr->IfFalseSuccessor())) {
2061    false_target = nullptr;
2062  }
2063  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2064}
2065
2066void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2067  LocationSummary* locations = new (GetGraph()->GetArena())
2068      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2069  HInstruction* cond = deoptimize->InputAt(0);
2070  DCHECK(cond->IsCondition());
2071  if (cond->AsCondition()->NeedsMaterialization()) {
2072    locations->SetInAt(0, Location::RequiresRegister());
2073  }
2074}
2075
2076void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2077  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
2078      DeoptimizationSlowPathARM64(deoptimize);
2079  codegen_->AddSlowPath(slow_path);
2080  vixl::Label* slow_path_entry = slow_path->GetEntryLabel();
2081  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2082}
2083
2084void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2085  HandleFieldGet(instruction);
2086}
2087
2088void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2089  HandleFieldGet(instruction, instruction->GetFieldInfo());
2090}
2091
2092void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2093  HandleFieldSet(instruction);
2094}
2095
2096void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2097  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
2098}
2099
2100void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
2101  LocationSummary::CallKind call_kind =
2102      instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
2103  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2104  locations->SetInAt(0, Location::RequiresRegister());
2105  locations->SetInAt(1, Location::RequiresRegister());
2106  // The output does overlap inputs.
2107  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2108}
2109
2110void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
2111  LocationSummary* locations = instruction->GetLocations();
2112  Register obj = InputRegisterAt(instruction, 0);;
2113  Register cls = InputRegisterAt(instruction, 1);;
2114  Register out = OutputRegister(instruction);
2115
2116  vixl::Label done;
2117
2118  // Return 0 if `obj` is null.
2119  // Avoid null check if we know `obj` is not null.
2120  if (instruction->MustDoNullCheck()) {
2121    __ Mov(out, 0);
2122    __ Cbz(obj, &done);
2123  }
2124
2125  // Compare the class of `obj` with `cls`.
2126  __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset()));
2127  __ Cmp(out, cls);
2128  if (instruction->IsClassFinal()) {
2129    // Classes must be equal for the instanceof to succeed.
2130    __ Cset(out, eq);
2131  } else {
2132    // If the classes are not equal, we go into a slow path.
2133    DCHECK(locations->OnlyCallsOnSlowPath());
2134    SlowPathCodeARM64* slow_path =
2135        new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
2136        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
2137    codegen_->AddSlowPath(slow_path);
2138    __ B(ne, slow_path->GetEntryLabel());
2139    __ Mov(out, 1);
2140    __ Bind(slow_path->GetExitLabel());
2141  }
2142
2143  __ Bind(&done);
2144}
2145
2146void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
2147  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2148  locations->SetOut(Location::ConstantLocation(constant));
2149}
2150
2151void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
2152  // Will be generated at use site.
2153  UNUSED(constant);
2154}
2155
2156void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
2157  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2158  locations->SetOut(Location::ConstantLocation(constant));
2159}
2160
2161void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) {
2162  // Will be generated at use site.
2163  UNUSED(constant);
2164}
2165
2166void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
2167  LocationSummary* locations =
2168      new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
2169  locations->AddTemp(LocationFrom(x0));
2170
2171  InvokeDexCallingConventionVisitorARM64 calling_convention_visitor;
2172  for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
2173    HInstruction* input = invoke->InputAt(i);
2174    locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
2175  }
2176
2177  Primitive::Type return_type = invoke->GetType();
2178  if (return_type != Primitive::kPrimVoid) {
2179    locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type));
2180  }
2181}
2182
2183void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
2184  HandleInvoke(invoke);
2185}
2186
2187void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
2188  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2189  Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
2190  uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
2191          (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
2192  Location receiver = invoke->GetLocations()->InAt(0);
2193  Offset class_offset = mirror::Object::ClassOffset();
2194  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
2195
2196  // The register ip1 is required to be used for the hidden argument in
2197  // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
2198  MacroAssembler* masm = GetVIXLAssembler();
2199  UseScratchRegisterScope scratch_scope(masm);
2200  BlockPoolsScope block_pools(masm);
2201  scratch_scope.Exclude(ip1);
2202  __ Mov(ip1, invoke->GetDexMethodIndex());
2203
2204  // temp = object->GetClass();
2205  if (receiver.IsStackSlot()) {
2206    __ Ldr(temp, StackOperandFrom(receiver));
2207    __ Ldr(temp, HeapOperand(temp, class_offset));
2208  } else {
2209    __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
2210  }
2211  codegen_->MaybeRecordImplicitNullCheck(invoke);
2212  // temp = temp->GetImtEntryAt(method_offset);
2213  __ Ldr(temp, HeapOperand(temp, method_offset));
2214  // lr = temp->GetEntryPoint();
2215  __ Ldr(lr, HeapOperand(temp, entry_point));
2216  // lr();
2217  __ Blr(lr);
2218  DCHECK(!codegen_->IsLeafMethod());
2219  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2220}
2221
2222void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2223  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
2224  if (intrinsic.TryDispatch(invoke)) {
2225    return;
2226  }
2227
2228  HandleInvoke(invoke);
2229}
2230
2231void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2232  // When we do not run baseline, explicit clinit checks triggered by static
2233  // invokes must have been pruned by art::PrepareForRegisterAllocation.
2234  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2235
2236  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
2237  if (intrinsic.TryDispatch(invoke)) {
2238    return;
2239  }
2240
2241  HandleInvoke(invoke);
2242}
2243
2244static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codegen) {
2245  if (invoke->GetLocations()->Intrinsified()) {
2246    IntrinsicCodeGeneratorARM64 intrinsic(codegen);
2247    intrinsic.Dispatch(invoke);
2248    return true;
2249  }
2250  return false;
2251}
2252
2253void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
2254  // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
2255  DCHECK(temp.Is(kArtMethodRegister));
2256  size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() +
2257      invoke->GetDexMethodIndex() * kHeapRefSize;
2258
2259  // TODO: Implement all kinds of calls:
2260  // 1) boot -> boot
2261  // 2) app -> boot
2262  // 3) app -> app
2263  //
2264  // Currently we implement the app -> app logic, which looks up in the resolve cache.
2265
2266  if (invoke->IsStringInit()) {
2267    // temp = thread->string_init_entrypoint
2268    __ Ldr(temp, HeapOperand(tr, invoke->GetStringInitOffset()));
2269    // LR = temp->entry_point_from_quick_compiled_code_;
2270    __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2271        kArm64WordSize)));
2272    // lr()
2273    __ Blr(lr);
2274  } else {
2275    // temp = method;
2276    LoadCurrentMethod(temp);
2277    if (!invoke->IsRecursive()) {
2278      // temp = temp->dex_cache_resolved_methods_;
2279      __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
2280      // temp = temp[index_in_cache];
2281      __ Ldr(temp, HeapOperand(temp, index_in_cache));
2282      // lr = temp->entry_point_from_quick_compiled_code_;
2283      __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2284          kArm64WordSize)));
2285      // lr();
2286      __ Blr(lr);
2287    } else {
2288      __ Bl(&frame_entry_label_);
2289    }
2290  }
2291
2292  DCHECK(!IsLeafMethod());
2293}
2294
2295void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2296  // When we do not run baseline, explicit clinit checks triggered by static
2297  // invokes must have been pruned by art::PrepareForRegisterAllocation.
2298  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2299
2300  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2301    return;
2302  }
2303
2304  BlockPoolsScope block_pools(GetVIXLAssembler());
2305  Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
2306  codegen_->GenerateStaticOrDirectCall(invoke, temp);
2307  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2308}
2309
2310void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2311  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2312    return;
2313  }
2314
2315  LocationSummary* locations = invoke->GetLocations();
2316  Location receiver = locations->InAt(0);
2317  Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
2318  size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
2319    invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
2320  Offset class_offset = mirror::Object::ClassOffset();
2321  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
2322
2323  BlockPoolsScope block_pools(GetVIXLAssembler());
2324
2325  // temp = object->GetClass();
2326  if (receiver.IsStackSlot()) {
2327    __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex()));
2328    __ Ldr(temp, HeapOperand(temp, class_offset));
2329  } else {
2330    DCHECK(receiver.IsRegister());
2331    __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
2332  }
2333  codegen_->MaybeRecordImplicitNullCheck(invoke);
2334  // temp = temp->GetMethodAt(method_offset);
2335  __ Ldr(temp, HeapOperand(temp, method_offset));
2336  // lr = temp->GetEntryPoint();
2337  __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue()));
2338  // lr();
2339  __ Blr(lr);
2340  DCHECK(!codegen_->IsLeafMethod());
2341  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2342}
2343
2344void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
2345  LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2346                                                              : LocationSummary::kNoCall;
2347  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2348  locations->SetInAt(0, Location::RequiresRegister());
2349  locations->SetOut(Location::RequiresRegister());
2350}
2351
2352void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
2353  Register out = OutputRegister(cls);
2354  Register current_method = InputRegisterAt(cls, 0);
2355  if (cls->IsReferrersClass()) {
2356    DCHECK(!cls->CanCallRuntime());
2357    DCHECK(!cls->MustGenerateClinitCheck());
2358    __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DeclaringClassOffset()));
2359  } else {
2360    DCHECK(cls->CanCallRuntime());
2361    __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
2362    __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
2363
2364    SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
2365        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
2366    codegen_->AddSlowPath(slow_path);
2367    __ Cbz(out, slow_path->GetEntryLabel());
2368    if (cls->MustGenerateClinitCheck()) {
2369      GenerateClassInitializationCheck(slow_path, out);
2370    } else {
2371      __ Bind(slow_path->GetExitLabel());
2372    }
2373  }
2374}
2375
2376void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
2377  LocationSummary* locations =
2378      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2379  locations->SetOut(Location::RequiresRegister());
2380}
2381
2382void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
2383  MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
2384  __ Ldr(OutputRegister(instruction), exception);
2385  __ Str(wzr, exception);
2386}
2387
2388void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
2389  load->SetLocations(nullptr);
2390}
2391
2392void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
2393  // Nothing to do, this is driven by the code generator.
2394  UNUSED(load);
2395}
2396
2397void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
2398  LocationSummary* locations =
2399      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2400  locations->SetOut(Location::RequiresRegister());
2401}
2402
2403void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
2404  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
2405  codegen_->AddSlowPath(slow_path);
2406
2407  Register out = OutputRegister(load);
2408  codegen_->LoadCurrentMethod(out);
2409  __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
2410  __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
2411  __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
2412  __ Cbz(out, slow_path->GetEntryLabel());
2413  __ Bind(slow_path->GetExitLabel());
2414}
2415
2416void LocationsBuilderARM64::VisitLocal(HLocal* local) {
2417  local->SetLocations(nullptr);
2418}
2419
2420void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
2421  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2422}
2423
2424void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
2425  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2426  locations->SetOut(Location::ConstantLocation(constant));
2427}
2428
2429void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
2430  // Will be generated at use site.
2431  UNUSED(constant);
2432}
2433
2434void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2435  LocationSummary* locations =
2436      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2437  InvokeRuntimeCallingConvention calling_convention;
2438  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2439}
2440
2441void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2442  codegen_->InvokeRuntime(instruction->IsEnter()
2443        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
2444      instruction,
2445      instruction->GetDexPc(),
2446      nullptr);
2447  CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2448}
2449
2450void LocationsBuilderARM64::VisitMul(HMul* mul) {
2451  LocationSummary* locations =
2452      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2453  switch (mul->GetResultType()) {
2454    case Primitive::kPrimInt:
2455    case Primitive::kPrimLong:
2456      locations->SetInAt(0, Location::RequiresRegister());
2457      locations->SetInAt(1, Location::RequiresRegister());
2458      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2459      break;
2460
2461    case Primitive::kPrimFloat:
2462    case Primitive::kPrimDouble:
2463      locations->SetInAt(0, Location::RequiresFpuRegister());
2464      locations->SetInAt(1, Location::RequiresFpuRegister());
2465      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2466      break;
2467
2468    default:
2469      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2470  }
2471}
2472
2473void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
2474  switch (mul->GetResultType()) {
2475    case Primitive::kPrimInt:
2476    case Primitive::kPrimLong:
2477      __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
2478      break;
2479
2480    case Primitive::kPrimFloat:
2481    case Primitive::kPrimDouble:
2482      __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
2483      break;
2484
2485    default:
2486      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2487  }
2488}
2489
2490void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
2491  LocationSummary* locations =
2492      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2493  switch (neg->GetResultType()) {
2494    case Primitive::kPrimInt:
2495    case Primitive::kPrimLong:
2496      locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg));
2497      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2498      break;
2499
2500    case Primitive::kPrimFloat:
2501    case Primitive::kPrimDouble:
2502      locations->SetInAt(0, Location::RequiresFpuRegister());
2503      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2504      break;
2505
2506    default:
2507      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2508  }
2509}
2510
2511void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
2512  switch (neg->GetResultType()) {
2513    case Primitive::kPrimInt:
2514    case Primitive::kPrimLong:
2515      __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
2516      break;
2517
2518    case Primitive::kPrimFloat:
2519    case Primitive::kPrimDouble:
2520      __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0));
2521      break;
2522
2523    default:
2524      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2525  }
2526}
2527
2528void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
2529  LocationSummary* locations =
2530      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2531  InvokeRuntimeCallingConvention calling_convention;
2532  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2533  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
2534  locations->SetOut(LocationFrom(x0));
2535  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
2536  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
2537                       void*, uint32_t, int32_t, mirror::ArtMethod*>();
2538}
2539
2540void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
2541  LocationSummary* locations = instruction->GetLocations();
2542  InvokeRuntimeCallingConvention calling_convention;
2543  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
2544  DCHECK(type_index.Is(w0));
2545  Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
2546  DCHECK(current_method.Is(w2));
2547  codegen_->LoadCurrentMethod(current_method);
2548  __ Mov(type_index, instruction->GetTypeIndex());
2549  codegen_->InvokeRuntime(
2550      GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2551      instruction,
2552      instruction->GetDexPc(),
2553      nullptr);
2554  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
2555                       void*, uint32_t, int32_t, mirror::ArtMethod*>();
2556}
2557
2558void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
2559  LocationSummary* locations =
2560      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2561  InvokeRuntimeCallingConvention calling_convention;
2562  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2563  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
2564  locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2565  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
2566}
2567
2568void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
2569  LocationSummary* locations = instruction->GetLocations();
2570  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
2571  DCHECK(type_index.Is(w0));
2572  Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
2573  DCHECK(current_method.Is(w1));
2574  codegen_->LoadCurrentMethod(current_method);
2575  __ Mov(type_index, instruction->GetTypeIndex());
2576  codegen_->InvokeRuntime(
2577      GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2578      instruction,
2579      instruction->GetDexPc(),
2580      nullptr);
2581  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
2582}
2583
2584void LocationsBuilderARM64::VisitNot(HNot* instruction) {
2585  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2586  locations->SetInAt(0, Location::RequiresRegister());
2587  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2588}
2589
2590void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
2591  switch (instruction->GetResultType()) {
2592    case Primitive::kPrimInt:
2593    case Primitive::kPrimLong:
2594      __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
2595      break;
2596
2597    default:
2598      LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2599  }
2600}
2601
2602void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
2603  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2604  locations->SetInAt(0, Location::RequiresRegister());
2605  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2606}
2607
2608void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
2609  __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::Operand(1));
2610}
2611
2612void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
2613  LocationSummary* locations =
2614      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2615  locations->SetInAt(0, Location::RequiresRegister());
2616  if (instruction->HasUses()) {
2617    locations->SetOut(Location::SameAsFirstInput());
2618  }
2619}
2620
2621void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2622  if (codegen_->CanMoveNullCheckToUser(instruction)) {
2623    return;
2624  }
2625
2626  BlockPoolsScope block_pools(GetVIXLAssembler());
2627  Location obj = instruction->GetLocations()->InAt(0);
2628  __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
2629  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2630}
2631
2632void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2633  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
2634  codegen_->AddSlowPath(slow_path);
2635
2636  LocationSummary* locations = instruction->GetLocations();
2637  Location obj = locations->InAt(0);
2638
2639  __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
2640}
2641
2642void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
2643  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
2644    GenerateImplicitNullCheck(instruction);
2645  } else {
2646    GenerateExplicitNullCheck(instruction);
2647  }
2648}
2649
2650void LocationsBuilderARM64::VisitOr(HOr* instruction) {
2651  HandleBinaryOp(instruction);
2652}
2653
2654void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
2655  HandleBinaryOp(instruction);
2656}
2657
2658void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2659  LOG(FATAL) << "Unreachable";
2660}
2661
2662void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
2663  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2664}
2665
2666void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
2667  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2668  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2669  if (location.IsStackSlot()) {
2670    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2671  } else if (location.IsDoubleStackSlot()) {
2672    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2673  }
2674  locations->SetOut(location);
2675}
2676
2677void InstructionCodeGeneratorARM64::VisitParameterValue(
2678    HParameterValue* instruction ATTRIBUTE_UNUSED) {
2679  // Nothing to do, the parameter is already at its location.
2680}
2681
2682void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
2683  LocationSummary* locations =
2684      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2685  locations->SetOut(LocationFrom(x0));
2686}
2687
2688void InstructionCodeGeneratorARM64::VisitCurrentMethod(
2689    HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
2690  // Nothing to do, the method is already at its location.
2691}
2692
2693void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
2694  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2695  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2696    locations->SetInAt(i, Location::Any());
2697  }
2698  locations->SetOut(Location::Any());
2699}
2700
2701void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
2702  UNUSED(instruction);
2703  LOG(FATAL) << "Unreachable";
2704}
2705
2706void LocationsBuilderARM64::VisitRem(HRem* rem) {
2707  Primitive::Type type = rem->GetResultType();
2708  LocationSummary::CallKind call_kind =
2709      Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2710  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2711
2712  switch (type) {
2713    case Primitive::kPrimInt:
2714    case Primitive::kPrimLong:
2715      locations->SetInAt(0, Location::RequiresRegister());
2716      locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2717      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2718      break;
2719
2720    case Primitive::kPrimFloat:
2721    case Primitive::kPrimDouble: {
2722      InvokeRuntimeCallingConvention calling_convention;
2723      locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
2724      locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1)));
2725      locations->SetOut(calling_convention.GetReturnLocation(type));
2726
2727      break;
2728    }
2729
2730    default:
2731      LOG(FATAL) << "Unexpected rem type " << type;
2732  }
2733}
2734
2735void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
2736  Primitive::Type type = rem->GetResultType();
2737
2738  switch (type) {
2739    case Primitive::kPrimInt:
2740    case Primitive::kPrimLong: {
2741      GenerateDivRemIntegral(rem);
2742      break;
2743    }
2744
2745    case Primitive::kPrimFloat:
2746    case Primitive::kPrimDouble: {
2747      int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
2748                                                             : QUICK_ENTRY_POINT(pFmod);
2749      codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc(), nullptr);
2750      break;
2751    }
2752
2753    default:
2754      LOG(FATAL) << "Unexpected rem type " << type;
2755  }
2756}
2757
2758void LocationsBuilderARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2759  memory_barrier->SetLocations(nullptr);
2760}
2761
2762void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2763  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
2764}
2765
2766void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
2767  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2768  Primitive::Type return_type = instruction->InputAt(0)->GetType();
2769  locations->SetInAt(0, ARM64ReturnLocation(return_type));
2770}
2771
2772void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
2773  UNUSED(instruction);
2774  codegen_->GenerateFrameExit();
2775}
2776
2777void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
2778  instruction->SetLocations(nullptr);
2779}
2780
2781void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
2782  UNUSED(instruction);
2783  codegen_->GenerateFrameExit();
2784}
2785
2786void LocationsBuilderARM64::VisitShl(HShl* shl) {
2787  HandleShift(shl);
2788}
2789
2790void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) {
2791  HandleShift(shl);
2792}
2793
2794void LocationsBuilderARM64::VisitShr(HShr* shr) {
2795  HandleShift(shr);
2796}
2797
2798void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) {
2799  HandleShift(shr);
2800}
2801
2802void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
2803  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
2804  Primitive::Type field_type = store->InputAt(1)->GetType();
2805  switch (field_type) {
2806    case Primitive::kPrimNot:
2807    case Primitive::kPrimBoolean:
2808    case Primitive::kPrimByte:
2809    case Primitive::kPrimChar:
2810    case Primitive::kPrimShort:
2811    case Primitive::kPrimInt:
2812    case Primitive::kPrimFloat:
2813      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
2814      break;
2815
2816    case Primitive::kPrimLong:
2817    case Primitive::kPrimDouble:
2818      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
2819      break;
2820
2821    default:
2822      LOG(FATAL) << "Unimplemented local type " << field_type;
2823  }
2824}
2825
2826void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
2827  UNUSED(store);
2828}
2829
2830void LocationsBuilderARM64::VisitSub(HSub* instruction) {
2831  HandleBinaryOp(instruction);
2832}
2833
2834void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
2835  HandleBinaryOp(instruction);
2836}
2837
2838void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2839  HandleFieldGet(instruction);
2840}
2841
2842void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2843  HandleFieldGet(instruction, instruction->GetFieldInfo());
2844}
2845
2846void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
2847  HandleFieldSet(instruction);
2848}
2849
2850void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
2851  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
2852}
2853
2854void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
2855  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
2856}
2857
2858void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
2859  HBasicBlock* block = instruction->GetBlock();
2860  if (block->GetLoopInformation() != nullptr) {
2861    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
2862    // The back edge will generate the suspend check.
2863    return;
2864  }
2865  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
2866    // The goto will generate the suspend check.
2867    return;
2868  }
2869  GenerateSuspendCheck(instruction, nullptr);
2870}
2871
2872void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
2873  temp->SetLocations(nullptr);
2874}
2875
2876void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
2877  // Nothing to do, this is driven by the code generator.
2878  UNUSED(temp);
2879}
2880
2881void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
2882  LocationSummary* locations =
2883      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2884  InvokeRuntimeCallingConvention calling_convention;
2885  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2886}
2887
2888void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
2889  codegen_->InvokeRuntime(
2890      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
2891  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
2892}
2893
2894void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
2895  LocationSummary* locations =
2896      new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
2897  Primitive::Type input_type = conversion->GetInputType();
2898  Primitive::Type result_type = conversion->GetResultType();
2899  DCHECK_NE(input_type, result_type);
2900  if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
2901      (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
2902    LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
2903  }
2904
2905  if (Primitive::IsFloatingPointType(input_type)) {
2906    locations->SetInAt(0, Location::RequiresFpuRegister());
2907  } else {
2908    locations->SetInAt(0, Location::RequiresRegister());
2909  }
2910
2911  if (Primitive::IsFloatingPointType(result_type)) {
2912    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2913  } else {
2914    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2915  }
2916}
2917
2918void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) {
2919  Primitive::Type result_type = conversion->GetResultType();
2920  Primitive::Type input_type = conversion->GetInputType();
2921
2922  DCHECK_NE(input_type, result_type);
2923
2924  if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
2925    int result_size = Primitive::ComponentSize(result_type);
2926    int input_size = Primitive::ComponentSize(input_type);
2927    int min_size = std::min(result_size, input_size);
2928    Register output = OutputRegister(conversion);
2929    Register source = InputRegisterAt(conversion, 0);
2930    if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) {
2931      __ Ubfx(output, source, 0, result_size * kBitsPerByte);
2932    } else if ((result_type == Primitive::kPrimChar) ||
2933               ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
2934      __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
2935    } else {
2936      __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
2937    }
2938  } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
2939    __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
2940  } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
2941    CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
2942    __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0));
2943  } else if (Primitive::IsFloatingPointType(result_type) &&
2944             Primitive::IsFloatingPointType(input_type)) {
2945    __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0));
2946  } else {
2947    LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
2948                << " to " << result_type;
2949  }
2950}
2951
2952void LocationsBuilderARM64::VisitUShr(HUShr* ushr) {
2953  HandleShift(ushr);
2954}
2955
2956void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) {
2957  HandleShift(ushr);
2958}
2959
2960void LocationsBuilderARM64::VisitXor(HXor* instruction) {
2961  HandleBinaryOp(instruction);
2962}
2963
2964void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
2965  HandleBinaryOp(instruction);
2966}
2967
2968void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) {
2969  // Nothing to do, this should be removed during prepare for register allocator.
2970  UNUSED(instruction);
2971  LOG(FATAL) << "Unreachable";
2972}
2973
2974void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) {
2975  // Nothing to do, this should be removed during prepare for register allocator.
2976  UNUSED(instruction);
2977  LOG(FATAL) << "Unreachable";
2978}
2979
2980#undef __
2981#undef QUICK_ENTRY_POINT
2982
2983}  // namespace arm64
2984}  // namespace art
2985