code_generator_arm64.cc revision 5cfe61f27ed9203498169355bb95db756486d292
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm64.h"
18
19#include "arch/arm64/instruction_set_features_arm64.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "common_arm64.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "entrypoints/quick/quick_entrypoints_enum.h"
25#include "gc/accounting/card_table.h"
26#include "intrinsics.h"
27#include "intrinsics_arm64.h"
28#include "mirror/array-inl.h"
29#include "mirror/class-inl.h"
30#include "offsets.h"
31#include "thread.h"
32#include "utils/arm64/assembler_arm64.h"
33#include "utils/assembler.h"
34#include "utils/stack_checks.h"
35
36
37using namespace vixl;   // NOLINT(build/namespaces)
38
39#ifdef __
40#error "ARM64 Codegen VIXL macro-assembler macro already defined."
41#endif
42
43namespace art {
44
45namespace arm64 {
46
47using helpers::CPURegisterFrom;
48using helpers::DRegisterFrom;
49using helpers::FPRegisterFrom;
50using helpers::HeapOperand;
51using helpers::HeapOperandFrom;
52using helpers::InputCPURegisterAt;
53using helpers::InputFPRegisterAt;
54using helpers::InputRegisterAt;
55using helpers::InputOperandAt;
56using helpers::Int64ConstantFrom;
57using helpers::LocationFrom;
58using helpers::OperandFromMemOperand;
59using helpers::OutputCPURegister;
60using helpers::OutputFPRegister;
61using helpers::OutputRegister;
62using helpers::RegisterFrom;
63using helpers::StackOperandFrom;
64using helpers::VIXLRegCodeFromART;
65using helpers::WRegisterFrom;
66using helpers::XRegisterFrom;
67using helpers::ARM64EncodableConstantOrRegister;
68using helpers::ArtVixlRegCodeCoherentForRegSet;
69
70static constexpr int kCurrentMethodStackOffset = 0;
71
72inline Condition ARM64Condition(IfCondition cond) {
73  switch (cond) {
74    case kCondEQ: return eq;
75    case kCondNE: return ne;
76    case kCondLT: return lt;
77    case kCondLE: return le;
78    case kCondGT: return gt;
79    case kCondGE: return ge;
80  }
81  LOG(FATAL) << "Unreachable";
82  UNREACHABLE();
83}
84
85Location ARM64ReturnLocation(Primitive::Type return_type) {
86  // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
87  // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
88  // but we use the exact registers for clarity.
89  if (return_type == Primitive::kPrimFloat) {
90    return LocationFrom(s0);
91  } else if (return_type == Primitive::kPrimDouble) {
92    return LocationFrom(d0);
93  } else if (return_type == Primitive::kPrimLong) {
94    return LocationFrom(x0);
95  } else if (return_type == Primitive::kPrimVoid) {
96    return Location::NoLocation();
97  } else {
98    return LocationFrom(w0);
99  }
100}
101
102Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
103  return ARM64ReturnLocation(return_type);
104}
105
106#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
107#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
108
109// Calculate memory accessing operand for save/restore live registers.
110static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
111                                           RegisterSet* register_set,
112                                           int64_t spill_offset,
113                                           bool is_save) {
114  DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(),
115                                         codegen->GetNumberOfCoreRegisters(),
116                                         register_set->GetFloatingPointRegisters(),
117                                         codegen->GetNumberOfFloatingPointRegisters()));
118
119  CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
120      register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
121  CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
122      register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
123
124  MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
125  UseScratchRegisterScope temps(masm);
126
127  Register base = masm->StackPointer();
128  int64_t core_spill_size = core_list.TotalSizeInBytes();
129  int64_t fp_spill_size = fp_list.TotalSizeInBytes();
130  int64_t reg_size = kXRegSizeInBytes;
131  int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size;
132  uint32_t ls_access_size = WhichPowerOf2(reg_size);
133  if (((core_list.Count() > 1) || (fp_list.Count() > 1)) &&
134      !masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) {
135    // If the offset does not fit in the instruction's immediate field, use an alternate register
136    // to compute the base address(float point registers spill base address).
137    Register new_base = temps.AcquireSameSizeAs(base);
138    __ Add(new_base, base, Operand(spill_offset + core_spill_size));
139    base = new_base;
140    spill_offset = -core_spill_size;
141    int64_t new_max_ls_pair_offset = fp_spill_size - 2 * reg_size;
142    DCHECK(masm->IsImmLSPair(spill_offset, ls_access_size));
143    DCHECK(masm->IsImmLSPair(new_max_ls_pair_offset, ls_access_size));
144  }
145
146  if (is_save) {
147    __ StoreCPURegList(core_list, MemOperand(base, spill_offset));
148    __ StoreCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
149  } else {
150    __ LoadCPURegList(core_list, MemOperand(base, spill_offset));
151    __ LoadCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
152  }
153}
154
155void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
156  RegisterSet* register_set = locations->GetLiveRegisters();
157  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
158  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
159    if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) {
160      // If the register holds an object, update the stack mask.
161      if (locations->RegisterContainsObject(i)) {
162        locations->SetStackBit(stack_offset / kVRegSize);
163      }
164      DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
165      DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
166      saved_core_stack_offsets_[i] = stack_offset;
167      stack_offset += kXRegSizeInBytes;
168    }
169  }
170
171  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
172    if (!codegen->IsFloatingPointCalleeSaveRegister(i) &&
173        register_set->ContainsFloatingPointRegister(i)) {
174      DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
175      DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
176      saved_fpu_stack_offsets_[i] = stack_offset;
177      stack_offset += kDRegSizeInBytes;
178    }
179  }
180
181  SaveRestoreLiveRegistersHelper(codegen, register_set,
182                                 codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
183}
184
185void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
186  RegisterSet* register_set = locations->GetLiveRegisters();
187  SaveRestoreLiveRegistersHelper(codegen, register_set,
188                                 codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
189}
190
191class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
192 public:
193  BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
194                           Location index_location,
195                           Location length_location)
196      : instruction_(instruction),
197        index_location_(index_location),
198        length_location_(length_location) {}
199
200
201  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
202    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
203    __ Bind(GetEntryLabel());
204    // We're moving two locations to locations that could overlap, so we need a parallel
205    // move resolver.
206    InvokeRuntimeCallingConvention calling_convention;
207    codegen->EmitParallelMoves(
208        index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
209        length_location_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
210    arm64_codegen->InvokeRuntime(
211        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
212    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
213  }
214
215  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
216
217 private:
218  HBoundsCheck* const instruction_;
219  const Location index_location_;
220  const Location length_location_;
221
222  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
223};
224
225class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
226 public:
227  explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {}
228
229  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
230    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
231    __ Bind(GetEntryLabel());
232    arm64_codegen->InvokeRuntime(
233        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
234    CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
235  }
236
237  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
238
239 private:
240  HDivZeroCheck* const instruction_;
241  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
242};
243
244class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
245 public:
246  LoadClassSlowPathARM64(HLoadClass* cls,
247                         HInstruction* at,
248                         uint32_t dex_pc,
249                         bool do_clinit)
250      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
251    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
252  }
253
254  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
255    LocationSummary* locations = at_->GetLocations();
256    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
257
258    __ Bind(GetEntryLabel());
259    SaveLiveRegisters(codegen, locations);
260
261    InvokeRuntimeCallingConvention calling_convention;
262    __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
263    int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
264                                            : QUICK_ENTRY_POINT(pInitializeType);
265    arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
266    if (do_clinit_) {
267      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
268    } else {
269      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
270    }
271
272    // Move the class to the desired location.
273    Location out = locations->Out();
274    if (out.IsValid()) {
275      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
276      Primitive::Type type = at_->GetType();
277      arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
278    }
279
280    RestoreLiveRegisters(codegen, locations);
281    __ B(GetExitLabel());
282  }
283
284  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM64"; }
285
286 private:
287  // The class this slow path will load.
288  HLoadClass* const cls_;
289
290  // The instruction where this slow path is happening.
291  // (Might be the load class or an initialization check).
292  HInstruction* const at_;
293
294  // The dex PC of `at_`.
295  const uint32_t dex_pc_;
296
297  // Whether to initialize the class.
298  const bool do_clinit_;
299
300  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
301};
302
303class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
304 public:
305  explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {}
306
307  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
308    LocationSummary* locations = instruction_->GetLocations();
309    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
310    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
311
312    __ Bind(GetEntryLabel());
313    SaveLiveRegisters(codegen, locations);
314
315    InvokeRuntimeCallingConvention calling_convention;
316    __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex());
317    arm64_codegen->InvokeRuntime(
318        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
319    CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
320    Primitive::Type type = instruction_->GetType();
321    arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
322
323    RestoreLiveRegisters(codegen, locations);
324    __ B(GetExitLabel());
325  }
326
327  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
328
329 private:
330  HLoadString* const instruction_;
331
332  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
333};
334
335class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
336 public:
337  explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
338
339  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
340    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
341    __ Bind(GetEntryLabel());
342    arm64_codegen->InvokeRuntime(
343        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
344    CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
345  }
346
347  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
348
349 private:
350  HNullCheck* const instruction_;
351
352  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
353};
354
355class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
356 public:
357  explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction,
358                                     HBasicBlock* successor)
359      : instruction_(instruction), successor_(successor) {}
360
361  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
362    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
363    __ Bind(GetEntryLabel());
364    SaveLiveRegisters(codegen, instruction_->GetLocations());
365    arm64_codegen->InvokeRuntime(
366        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
367    CheckEntrypointTypes<kQuickTestSuspend, void, void>();
368    RestoreLiveRegisters(codegen, instruction_->GetLocations());
369    if (successor_ == nullptr) {
370      __ B(GetReturnLabel());
371    } else {
372      __ B(arm64_codegen->GetLabelOf(successor_));
373    }
374  }
375
376  vixl::Label* GetReturnLabel() {
377    DCHECK(successor_ == nullptr);
378    return &return_label_;
379  }
380
381  HBasicBlock* GetSuccessor() const {
382    return successor_;
383  }
384
385  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; }
386
387 private:
388  HSuspendCheck* const instruction_;
389  // If not null, the block to branch to after the suspend check.
390  HBasicBlock* const successor_;
391
392  // If `successor_` is null, the label to branch to after the suspend check.
393  vixl::Label return_label_;
394
395  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
396};
397
398class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
399 public:
400  TypeCheckSlowPathARM64(HInstruction* instruction,
401                         Location class_to_check,
402                         Location object_class,
403                         uint32_t dex_pc)
404      : instruction_(instruction),
405        class_to_check_(class_to_check),
406        object_class_(object_class),
407        dex_pc_(dex_pc) {}
408
409  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
410    LocationSummary* locations = instruction_->GetLocations();
411    DCHECK(instruction_->IsCheckCast()
412           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
413    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
414
415    __ Bind(GetEntryLabel());
416    SaveLiveRegisters(codegen, locations);
417
418    // We're moving two locations to locations that could overlap, so we need a parallel
419    // move resolver.
420    InvokeRuntimeCallingConvention calling_convention;
421    codegen->EmitParallelMoves(
422        class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
423        object_class_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
424
425    if (instruction_->IsInstanceOf()) {
426      arm64_codegen->InvokeRuntime(
427          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
428      Primitive::Type ret_type = instruction_->GetType();
429      Location ret_loc = calling_convention.GetReturnLocation(ret_type);
430      arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
431      CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t,
432                           const mirror::Class*, const mirror::Class*>();
433    } else {
434      DCHECK(instruction_->IsCheckCast());
435      arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
436      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
437    }
438
439    RestoreLiveRegisters(codegen, locations);
440    __ B(GetExitLabel());
441  }
442
443  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
444
445 private:
446  HInstruction* const instruction_;
447  const Location class_to_check_;
448  const Location object_class_;
449  uint32_t dex_pc_;
450
451  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
452};
453
454class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
455 public:
456  explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
457    : instruction_(instruction) {}
458
459  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
460    __ Bind(GetEntryLabel());
461    SaveLiveRegisters(codegen, instruction_->GetLocations());
462    DCHECK(instruction_->IsDeoptimize());
463    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
464    uint32_t dex_pc = deoptimize->GetDexPc();
465    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
466    arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
467  }
468
469  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
470
471 private:
472  HInstruction* const instruction_;
473  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
474};
475
476#undef __
477
478Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type type) {
479  Location next_location;
480  if (type == Primitive::kPrimVoid) {
481    LOG(FATAL) << "Unreachable type " << type;
482  }
483
484  if (Primitive::IsFloatingPointType(type) &&
485      (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
486    next_location = LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++));
487  } else if (!Primitive::IsFloatingPointType(type) &&
488             (gp_index_ < calling_convention.GetNumberOfRegisters())) {
489    next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
490  } else {
491    size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
492    next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
493                                                 : Location::StackSlot(stack_offset);
494  }
495
496  // Space on the stack is reserved for all arguments.
497  stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
498  return next_location;
499}
500
501Location InvokeDexCallingConventionVisitorARM64::GetMethodLocation() const {
502  return LocationFrom(kArtMethodRegister);
503}
504
505CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
506                                       const Arm64InstructionSetFeatures& isa_features,
507                                       const CompilerOptions& compiler_options)
508    : CodeGenerator(graph,
509                    kNumberOfAllocatableRegisters,
510                    kNumberOfAllocatableFPRegisters,
511                    kNumberOfAllocatableRegisterPairs,
512                    callee_saved_core_registers.list(),
513                    callee_saved_fp_registers.list(),
514                    compiler_options),
515      block_labels_(nullptr),
516      location_builder_(graph, this),
517      instruction_visitor_(graph, this),
518      move_resolver_(graph->GetArena(), this),
519      isa_features_(isa_features) {
520  // Save the link register (containing the return address) to mimic Quick.
521  AddAllocatedRegister(LocationFrom(lr));
522}
523
524#undef __
525#define __ GetVIXLAssembler()->
526
527void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
528  // Ensure we emit the literal pool.
529  __ FinalizeCode();
530  CodeGenerator::Finalize(allocator);
531}
532
533void ParallelMoveResolverARM64::PrepareForEmitNativeCode() {
534  // Note: There are 6 kinds of moves:
535  // 1. constant -> GPR/FPR (non-cycle)
536  // 2. constant -> stack (non-cycle)
537  // 3. GPR/FPR -> GPR/FPR
538  // 4. GPR/FPR -> stack
539  // 5. stack -> GPR/FPR
540  // 6. stack -> stack (non-cycle)
541  // Case 1, 2 and 6 should never be included in a dependency cycle on ARM64. For case 3, 4, and 5
542  // VIXL uses at most 1 GPR. VIXL has 2 GPR and 1 FPR temps, and there should be no intersecting
543  // cycles on ARM64, so we always have 1 GPR and 1 FPR available VIXL temps to resolve the
544  // dependency.
545  vixl_temps_.Open(GetVIXLAssembler());
546}
547
548void ParallelMoveResolverARM64::FinishEmitNativeCode() {
549  vixl_temps_.Close();
550}
551
552Location ParallelMoveResolverARM64::AllocateScratchLocationFor(Location::Kind kind) {
553  DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister ||
554         kind == Location::kStackSlot || kind == Location::kDoubleStackSlot);
555  kind = (kind == Location::kFpuRegister) ? Location::kFpuRegister : Location::kRegister;
556  Location scratch = GetScratchLocation(kind);
557  if (!scratch.Equals(Location::NoLocation())) {
558    return scratch;
559  }
560  // Allocate from VIXL temp registers.
561  if (kind == Location::kRegister) {
562    scratch = LocationFrom(vixl_temps_.AcquireX());
563  } else {
564    DCHECK(kind == Location::kFpuRegister);
565    scratch = LocationFrom(vixl_temps_.AcquireD());
566  }
567  AddScratchLocation(scratch);
568  return scratch;
569}
570
571void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
572  if (loc.IsRegister()) {
573    vixl_temps_.Release(XRegisterFrom(loc));
574  } else {
575    DCHECK(loc.IsFpuRegister());
576    vixl_temps_.Release(DRegisterFrom(loc));
577  }
578  RemoveScratchLocation(loc);
579}
580
581void ParallelMoveResolverARM64::EmitMove(size_t index) {
582  MoveOperands* move = moves_.Get(index);
583  codegen_->MoveLocation(move->GetDestination(), move->GetSource());
584}
585
586void CodeGeneratorARM64::GenerateFrameEntry() {
587  MacroAssembler* masm = GetVIXLAssembler();
588  BlockPoolsScope block_pools(masm);
589  __ Bind(&frame_entry_label_);
590
591  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
592  if (do_overflow_check) {
593    UseScratchRegisterScope temps(masm);
594    Register temp = temps.AcquireX();
595    DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
596    __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
597    __ Ldr(wzr, MemOperand(temp, 0));
598    RecordPcInfo(nullptr, 0);
599  }
600
601  if (!HasEmptyFrame()) {
602    int frame_size = GetFrameSize();
603    // Stack layout:
604    //      sp[frame_size - 8]        : lr.
605    //      ...                       : other preserved core registers.
606    //      ...                       : other preserved fp registers.
607    //      ...                       : reserved frame space.
608    //      sp[0]                     : current method.
609    __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
610    GetAssembler()->cfi().AdjustCFAOffset(frame_size);
611    GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
612        frame_size - GetCoreSpillSize());
613    GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
614        frame_size - FrameEntrySpillSize());
615  }
616}
617
618void CodeGeneratorARM64::GenerateFrameExit() {
619  BlockPoolsScope block_pools(GetVIXLAssembler());
620  GetAssembler()->cfi().RememberState();
621  if (!HasEmptyFrame()) {
622    int frame_size = GetFrameSize();
623    GetAssembler()->UnspillRegisters(GetFramePreservedFPRegisters(),
624        frame_size - FrameEntrySpillSize());
625    GetAssembler()->UnspillRegisters(GetFramePreservedCoreRegisters(),
626        frame_size - GetCoreSpillSize());
627    __ Drop(frame_size);
628    GetAssembler()->cfi().AdjustCFAOffset(-frame_size);
629  }
630  __ Ret();
631  GetAssembler()->cfi().RestoreState();
632  GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
633}
634
635vixl::CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
636  DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0));
637  return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
638                          core_spill_mask_);
639}
640
641vixl::CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
642  DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
643                                         GetNumberOfFloatingPointRegisters()));
644  return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
645                          fpu_spill_mask_);
646}
647
648void CodeGeneratorARM64::Bind(HBasicBlock* block) {
649  __ Bind(GetLabelOf(block));
650}
651
652void CodeGeneratorARM64::Move(HInstruction* instruction,
653                              Location location,
654                              HInstruction* move_for) {
655  LocationSummary* locations = instruction->GetLocations();
656  Primitive::Type type = instruction->GetType();
657  DCHECK_NE(type, Primitive::kPrimVoid);
658
659  if (instruction->IsCurrentMethod()) {
660    MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset));
661  } else if (locations != nullptr && locations->Out().Equals(location)) {
662    return;
663  } else if (instruction->IsIntConstant()
664             || instruction->IsLongConstant()
665             || instruction->IsNullConstant()) {
666    int64_t value = GetInt64ValueOf(instruction->AsConstant());
667    if (location.IsRegister()) {
668      Register dst = RegisterFrom(location, type);
669      DCHECK(((instruction->IsIntConstant() || instruction->IsNullConstant()) && dst.Is32Bits()) ||
670             (instruction->IsLongConstant() && dst.Is64Bits()));
671      __ Mov(dst, value);
672    } else {
673      DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
674      UseScratchRegisterScope temps(GetVIXLAssembler());
675      Register temp = (instruction->IsIntConstant() || instruction->IsNullConstant())
676          ? temps.AcquireW()
677          : temps.AcquireX();
678      __ Mov(temp, value);
679      __ Str(temp, StackOperandFrom(location));
680    }
681  } else if (instruction->IsTemporary()) {
682    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
683    MoveLocation(location, temp_location, type);
684  } else if (instruction->IsLoadLocal()) {
685    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
686    if (Primitive::Is64BitType(type)) {
687      MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
688    } else {
689      MoveLocation(location, Location::StackSlot(stack_slot), type);
690    }
691
692  } else {
693    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
694    MoveLocation(location, locations->Out(), type);
695  }
696}
697
698Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
699  Primitive::Type type = load->GetType();
700
701  switch (type) {
702    case Primitive::kPrimNot:
703    case Primitive::kPrimInt:
704    case Primitive::kPrimFloat:
705      return Location::StackSlot(GetStackSlot(load->GetLocal()));
706
707    case Primitive::kPrimLong:
708    case Primitive::kPrimDouble:
709      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
710
711    case Primitive::kPrimBoolean:
712    case Primitive::kPrimByte:
713    case Primitive::kPrimChar:
714    case Primitive::kPrimShort:
715    case Primitive::kPrimVoid:
716      LOG(FATAL) << "Unexpected type " << type;
717  }
718
719  LOG(FATAL) << "Unreachable";
720  return Location::NoLocation();
721}
722
723void CodeGeneratorARM64::MarkGCCard(Register object, Register value, bool value_can_be_null) {
724  UseScratchRegisterScope temps(GetVIXLAssembler());
725  Register card = temps.AcquireX();
726  Register temp = temps.AcquireW();   // Index within the CardTable - 32bit.
727  vixl::Label done;
728  if (value_can_be_null) {
729    __ Cbz(value, &done);
730  }
731  __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
732  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
733  __ Strb(card, MemOperand(card, temp.X()));
734  if (value_can_be_null) {
735    __ Bind(&done);
736  }
737}
738
739void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const {
740  // Blocked core registers:
741  //      lr        : Runtime reserved.
742  //      tr        : Runtime reserved.
743  //      xSuspend  : Runtime reserved. TODO: Unblock this when the runtime stops using it.
744  //      ip1       : VIXL core temp.
745  //      ip0       : VIXL core temp.
746  //
747  // Blocked fp registers:
748  //      d31       : VIXL fp temp.
749  CPURegList reserved_core_registers = vixl_reserved_core_registers;
750  reserved_core_registers.Combine(runtime_reserved_core_registers);
751  while (!reserved_core_registers.IsEmpty()) {
752    blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
753  }
754
755  CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
756  while (!reserved_fp_registers.IsEmpty()) {
757    blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
758  }
759
760  if (is_baseline) {
761    CPURegList reserved_core_baseline_registers = callee_saved_core_registers;
762    while (!reserved_core_baseline_registers.IsEmpty()) {
763      blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true;
764    }
765
766    CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers;
767    while (!reserved_fp_baseline_registers.IsEmpty()) {
768      blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true;
769    }
770  }
771}
772
773Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
774  if (type == Primitive::kPrimVoid) {
775    LOG(FATAL) << "Unreachable type " << type;
776  }
777
778  if (Primitive::IsFloatingPointType(type)) {
779    ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
780    DCHECK_NE(reg, -1);
781    return Location::FpuRegisterLocation(reg);
782  } else {
783    ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
784    DCHECK_NE(reg, -1);
785    return Location::RegisterLocation(reg);
786  }
787}
788
789size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
790  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
791  __ Str(reg, MemOperand(sp, stack_index));
792  return kArm64WordSize;
793}
794
795size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
796  Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
797  __ Ldr(reg, MemOperand(sp, stack_index));
798  return kArm64WordSize;
799}
800
801size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
802  FPRegister reg = FPRegister(reg_id, kDRegSize);
803  __ Str(reg, MemOperand(sp, stack_index));
804  return kArm64WordSize;
805}
806
807size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
808  FPRegister reg = FPRegister(reg_id, kDRegSize);
809  __ Ldr(reg, MemOperand(sp, stack_index));
810  return kArm64WordSize;
811}
812
813void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
814  stream << XRegister(reg);
815}
816
817void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
818  stream << DRegister(reg);
819}
820
821void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
822  if (constant->IsIntConstant()) {
823    __ Mov(Register(destination), constant->AsIntConstant()->GetValue());
824  } else if (constant->IsLongConstant()) {
825    __ Mov(Register(destination), constant->AsLongConstant()->GetValue());
826  } else if (constant->IsNullConstant()) {
827    __ Mov(Register(destination), 0);
828  } else if (constant->IsFloatConstant()) {
829    __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
830  } else {
831    DCHECK(constant->IsDoubleConstant());
832    __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
833  }
834}
835
836
837static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
838  DCHECK(constant.IsConstant());
839  HConstant* cst = constant.GetConstant();
840  return (cst->IsIntConstant() && type == Primitive::kPrimInt) ||
841         // Null is mapped to a core W register, which we associate with kPrimInt.
842         (cst->IsNullConstant() && type == Primitive::kPrimInt) ||
843         (cst->IsLongConstant() && type == Primitive::kPrimLong) ||
844         (cst->IsFloatConstant() && type == Primitive::kPrimFloat) ||
845         (cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
846}
847
848void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
849  if (source.Equals(destination)) {
850    return;
851  }
852
853  // A valid move can always be inferred from the destination and source
854  // locations. When moving from and to a register, the argument type can be
855  // used to generate 32bit instead of 64bit moves. In debug mode we also
856  // checks the coherency of the locations and the type.
857  bool unspecified_type = (type == Primitive::kPrimVoid);
858
859  if (destination.IsRegister() || destination.IsFpuRegister()) {
860    if (unspecified_type) {
861      HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
862      if (source.IsStackSlot() ||
863          (src_cst != nullptr && (src_cst->IsIntConstant()
864                                  || src_cst->IsFloatConstant()
865                                  || src_cst->IsNullConstant()))) {
866        // For stack slots and 32bit constants, a 64bit type is appropriate.
867        type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
868      } else {
869        // If the source is a double stack slot or a 64bit constant, a 64bit
870        // type is appropriate. Else the source is a register, and since the
871        // type has not been specified, we chose a 64bit type to force a 64bit
872        // move.
873        type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
874      }
875    }
876    DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
877           (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
878    CPURegister dst = CPURegisterFrom(destination, type);
879    if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
880      DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
881      __ Ldr(dst, StackOperandFrom(source));
882    } else if (source.IsConstant()) {
883      DCHECK(CoherentConstantAndType(source, type));
884      MoveConstant(dst, source.GetConstant());
885    } else {
886      if (destination.IsRegister()) {
887        __ Mov(Register(dst), RegisterFrom(source, type));
888      } else {
889        DCHECK(destination.IsFpuRegister());
890        __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
891      }
892    }
893  } else {  // The destination is not a register. It must be a stack slot.
894    DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
895    if (source.IsRegister() || source.IsFpuRegister()) {
896      if (unspecified_type) {
897        if (source.IsRegister()) {
898          type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
899        } else {
900          type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
901        }
902      }
903      DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
904             (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
905      __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
906    } else if (source.IsConstant()) {
907      DCHECK(unspecified_type || CoherentConstantAndType(source, type));
908      UseScratchRegisterScope temps(GetVIXLAssembler());
909      HConstant* src_cst = source.GetConstant();
910      CPURegister temp;
911      if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) {
912        temp = temps.AcquireW();
913      } else if (src_cst->IsLongConstant()) {
914        temp = temps.AcquireX();
915      } else if (src_cst->IsFloatConstant()) {
916        temp = temps.AcquireS();
917      } else {
918        DCHECK(src_cst->IsDoubleConstant());
919        temp = temps.AcquireD();
920      }
921      MoveConstant(temp, src_cst);
922      __ Str(temp, StackOperandFrom(destination));
923    } else {
924      DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
925      DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
926      UseScratchRegisterScope temps(GetVIXLAssembler());
927      // There is generally less pressure on FP registers.
928      FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
929      __ Ldr(temp, StackOperandFrom(source));
930      __ Str(temp, StackOperandFrom(destination));
931    }
932  }
933}
934
935void CodeGeneratorARM64::Load(Primitive::Type type,
936                              CPURegister dst,
937                              const MemOperand& src) {
938  switch (type) {
939    case Primitive::kPrimBoolean:
940      __ Ldrb(Register(dst), src);
941      break;
942    case Primitive::kPrimByte:
943      __ Ldrsb(Register(dst), src);
944      break;
945    case Primitive::kPrimShort:
946      __ Ldrsh(Register(dst), src);
947      break;
948    case Primitive::kPrimChar:
949      __ Ldrh(Register(dst), src);
950      break;
951    case Primitive::kPrimInt:
952    case Primitive::kPrimNot:
953    case Primitive::kPrimLong:
954    case Primitive::kPrimFloat:
955    case Primitive::kPrimDouble:
956      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
957      __ Ldr(dst, src);
958      break;
959    case Primitive::kPrimVoid:
960      LOG(FATAL) << "Unreachable type " << type;
961  }
962}
963
964void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
965                                     CPURegister dst,
966                                     const MemOperand& src) {
967  MacroAssembler* masm = GetVIXLAssembler();
968  BlockPoolsScope block_pools(masm);
969  UseScratchRegisterScope temps(masm);
970  Register temp_base = temps.AcquireX();
971  Primitive::Type type = instruction->GetType();
972
973  DCHECK(!src.IsPreIndex());
974  DCHECK(!src.IsPostIndex());
975
976  // TODO(vixl): Let the MacroAssembler handle MemOperand.
977  __ Add(temp_base, src.base(), OperandFromMemOperand(src));
978  MemOperand base = MemOperand(temp_base);
979  switch (type) {
980    case Primitive::kPrimBoolean:
981      __ Ldarb(Register(dst), base);
982      MaybeRecordImplicitNullCheck(instruction);
983      break;
984    case Primitive::kPrimByte:
985      __ Ldarb(Register(dst), base);
986      MaybeRecordImplicitNullCheck(instruction);
987      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
988      break;
989    case Primitive::kPrimChar:
990      __ Ldarh(Register(dst), base);
991      MaybeRecordImplicitNullCheck(instruction);
992      break;
993    case Primitive::kPrimShort:
994      __ Ldarh(Register(dst), base);
995      MaybeRecordImplicitNullCheck(instruction);
996      __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
997      break;
998    case Primitive::kPrimInt:
999    case Primitive::kPrimNot:
1000    case Primitive::kPrimLong:
1001      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1002      __ Ldar(Register(dst), base);
1003      MaybeRecordImplicitNullCheck(instruction);
1004      break;
1005    case Primitive::kPrimFloat:
1006    case Primitive::kPrimDouble: {
1007      DCHECK(dst.IsFPRegister());
1008      DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1009
1010      Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1011      __ Ldar(temp, base);
1012      MaybeRecordImplicitNullCheck(instruction);
1013      __ Fmov(FPRegister(dst), temp);
1014      break;
1015    }
1016    case Primitive::kPrimVoid:
1017      LOG(FATAL) << "Unreachable type " << type;
1018  }
1019}
1020
1021void CodeGeneratorARM64::Store(Primitive::Type type,
1022                               CPURegister src,
1023                               const MemOperand& dst) {
1024  switch (type) {
1025    case Primitive::kPrimBoolean:
1026    case Primitive::kPrimByte:
1027      __ Strb(Register(src), dst);
1028      break;
1029    case Primitive::kPrimChar:
1030    case Primitive::kPrimShort:
1031      __ Strh(Register(src), dst);
1032      break;
1033    case Primitive::kPrimInt:
1034    case Primitive::kPrimNot:
1035    case Primitive::kPrimLong:
1036    case Primitive::kPrimFloat:
1037    case Primitive::kPrimDouble:
1038      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1039      __ Str(src, dst);
1040      break;
1041    case Primitive::kPrimVoid:
1042      LOG(FATAL) << "Unreachable type " << type;
1043  }
1044}
1045
1046void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
1047                                      CPURegister src,
1048                                      const MemOperand& dst) {
1049  UseScratchRegisterScope temps(GetVIXLAssembler());
1050  Register temp_base = temps.AcquireX();
1051
1052  DCHECK(!dst.IsPreIndex());
1053  DCHECK(!dst.IsPostIndex());
1054
1055  // TODO(vixl): Let the MacroAssembler handle this.
1056  Operand op = OperandFromMemOperand(dst);
1057  __ Add(temp_base, dst.base(), op);
1058  MemOperand base = MemOperand(temp_base);
1059  switch (type) {
1060    case Primitive::kPrimBoolean:
1061    case Primitive::kPrimByte:
1062      __ Stlrb(Register(src), base);
1063      break;
1064    case Primitive::kPrimChar:
1065    case Primitive::kPrimShort:
1066      __ Stlrh(Register(src), base);
1067      break;
1068    case Primitive::kPrimInt:
1069    case Primitive::kPrimNot:
1070    case Primitive::kPrimLong:
1071      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1072      __ Stlr(Register(src), base);
1073      break;
1074    case Primitive::kPrimFloat:
1075    case Primitive::kPrimDouble: {
1076      DCHECK(src.IsFPRegister());
1077      DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1078
1079      Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1080      __ Fmov(temp, FPRegister(src));
1081      __ Stlr(temp, base);
1082      break;
1083    }
1084    case Primitive::kPrimVoid:
1085      LOG(FATAL) << "Unreachable type " << type;
1086  }
1087}
1088
1089void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
1090                                       HInstruction* instruction,
1091                                       uint32_t dex_pc,
1092                                       SlowPathCode* slow_path) {
1093  BlockPoolsScope block_pools(GetVIXLAssembler());
1094  __ Ldr(lr, MemOperand(tr, entry_point_offset));
1095  __ Blr(lr);
1096  RecordPcInfo(instruction, dex_pc, slow_path);
1097  DCHECK(instruction->IsSuspendCheck()
1098         || instruction->IsBoundsCheck()
1099         || instruction->IsNullCheck()
1100         || instruction->IsDivZeroCheck()
1101         || !IsLeafMethod());
1102}
1103
1104void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
1105                                                                     vixl::Register class_reg) {
1106  UseScratchRegisterScope temps(GetVIXLAssembler());
1107  Register temp = temps.AcquireW();
1108  size_t status_offset = mirror::Class::StatusOffset().SizeValue();
1109  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1110
1111  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
1112  if (use_acquire_release) {
1113    // TODO(vixl): Let the MacroAssembler handle MemOperand.
1114    __ Add(temp, class_reg, status_offset);
1115    __ Ldar(temp, HeapOperand(temp));
1116    __ Cmp(temp, mirror::Class::kStatusInitialized);
1117    __ B(lt, slow_path->GetEntryLabel());
1118  } else {
1119    __ Ldr(temp, HeapOperand(class_reg, status_offset));
1120    __ Cmp(temp, mirror::Class::kStatusInitialized);
1121    __ B(lt, slow_path->GetEntryLabel());
1122    __ Dmb(InnerShareable, BarrierReads);
1123  }
1124  __ Bind(slow_path->GetExitLabel());
1125}
1126
1127void InstructionCodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) {
1128  BarrierType type = BarrierAll;
1129
1130  switch (kind) {
1131    case MemBarrierKind::kAnyAny:
1132    case MemBarrierKind::kAnyStore: {
1133      type = BarrierAll;
1134      break;
1135    }
1136    case MemBarrierKind::kLoadAny: {
1137      type = BarrierReads;
1138      break;
1139    }
1140    case MemBarrierKind::kStoreStore: {
1141      type = BarrierWrites;
1142      break;
1143    }
1144    default:
1145      LOG(FATAL) << "Unexpected memory barrier " << kind;
1146  }
1147  __ Dmb(InnerShareable, type);
1148}
1149
1150void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
1151                                                         HBasicBlock* successor) {
1152  SuspendCheckSlowPathARM64* slow_path =
1153      down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
1154  if (slow_path == nullptr) {
1155    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
1156    instruction->SetSlowPath(slow_path);
1157    codegen_->AddSlowPath(slow_path);
1158    if (successor != nullptr) {
1159      DCHECK(successor->IsLoopHeader());
1160      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
1161    }
1162  } else {
1163    DCHECK_EQ(slow_path->GetSuccessor(), successor);
1164  }
1165
1166  UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
1167  Register temp = temps.AcquireW();
1168
1169  __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
1170  if (successor == nullptr) {
1171    __ Cbnz(temp, slow_path->GetEntryLabel());
1172    __ Bind(slow_path->GetReturnLabel());
1173  } else {
1174    __ Cbz(temp, codegen_->GetLabelOf(successor));
1175    __ B(slow_path->GetEntryLabel());
1176    // slow_path will return to GetLabelOf(successor).
1177  }
1178}
1179
1180InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
1181                                                             CodeGeneratorARM64* codegen)
1182      : HGraphVisitor(graph),
1183        assembler_(codegen->GetAssembler()),
1184        codegen_(codegen) {}
1185
1186#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)              \
1187  /* No unimplemented IR. */
1188
1189#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
1190
1191enum UnimplementedInstructionBreakCode {
1192  // Using a base helps identify when we hit such breakpoints.
1193  UnimplementedInstructionBreakCodeBaseCode = 0x900,
1194#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
1195  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
1196#undef ENUM_UNIMPLEMENTED_INSTRUCTION
1197};
1198
1199#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name)                               \
1200  void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) {                   \
1201    UNUSED(instr);                                                                    \
1202    __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name));                               \
1203  }                                                                                   \
1204  void LocationsBuilderARM64::Visit##name(H##name* instr) {                           \
1205    LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
1206    locations->SetOut(Location::Any());                                               \
1207  }
1208  FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
1209#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
1210
1211#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
1212#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
1213
1214void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
1215  DCHECK_EQ(instr->InputCount(), 2U);
1216  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1217  Primitive::Type type = instr->GetResultType();
1218  switch (type) {
1219    case Primitive::kPrimInt:
1220    case Primitive::kPrimLong:
1221      locations->SetInAt(0, Location::RequiresRegister());
1222      locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr));
1223      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1224      break;
1225
1226    case Primitive::kPrimFloat:
1227    case Primitive::kPrimDouble:
1228      locations->SetInAt(0, Location::RequiresFpuRegister());
1229      locations->SetInAt(1, Location::RequiresFpuRegister());
1230      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1231      break;
1232
1233    default:
1234      LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
1235  }
1236}
1237
1238void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
1239  LocationSummary* locations =
1240      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1241  locations->SetInAt(0, Location::RequiresRegister());
1242  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1243    locations->SetOut(Location::RequiresFpuRegister());
1244  } else {
1245    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1246  }
1247}
1248
1249void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
1250                                                   const FieldInfo& field_info) {
1251  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
1252  Primitive::Type field_type = field_info.GetFieldType();
1253  BlockPoolsScope block_pools(GetVIXLAssembler());
1254
1255  MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
1256  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1257
1258  if (field_info.IsVolatile()) {
1259    if (use_acquire_release) {
1260      // NB: LoadAcquire will record the pc info if needed.
1261      codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
1262    } else {
1263      codegen_->Load(field_type, OutputCPURegister(instruction), field);
1264      codegen_->MaybeRecordImplicitNullCheck(instruction);
1265      // For IRIW sequential consistency kLoadAny is not sufficient.
1266      GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1267    }
1268  } else {
1269    codegen_->Load(field_type, OutputCPURegister(instruction), field);
1270    codegen_->MaybeRecordImplicitNullCheck(instruction);
1271  }
1272
1273  if (field_type == Primitive::kPrimNot) {
1274    GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W());
1275  }
1276}
1277
1278void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
1279  LocationSummary* locations =
1280      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1281  locations->SetInAt(0, Location::RequiresRegister());
1282  if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
1283    locations->SetInAt(1, Location::RequiresFpuRegister());
1284  } else {
1285    locations->SetInAt(1, Location::RequiresRegister());
1286  }
1287}
1288
1289void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
1290                                                   const FieldInfo& field_info,
1291                                                   bool value_can_be_null) {
1292  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
1293  BlockPoolsScope block_pools(GetVIXLAssembler());
1294
1295  Register obj = InputRegisterAt(instruction, 0);
1296  CPURegister value = InputCPURegisterAt(instruction, 1);
1297  CPURegister source = value;
1298  Offset offset = field_info.GetFieldOffset();
1299  Primitive::Type field_type = field_info.GetFieldType();
1300  bool use_acquire_release = codegen_->GetInstructionSetFeatures().PreferAcquireRelease();
1301
1302  {
1303    // We use a block to end the scratch scope before the write barrier, thus
1304    // freeing the temporary registers so they can be used in `MarkGCCard`.
1305    UseScratchRegisterScope temps(GetVIXLAssembler());
1306
1307    if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) {
1308      DCHECK(value.IsW());
1309      Register temp = temps.AcquireW();
1310      __ Mov(temp, value.W());
1311      GetAssembler()->PoisonHeapReference(temp.W());
1312      source = temp;
1313    }
1314
1315    if (field_info.IsVolatile()) {
1316      if (use_acquire_release) {
1317        codegen_->StoreRelease(field_type, source, HeapOperand(obj, offset));
1318        codegen_->MaybeRecordImplicitNullCheck(instruction);
1319      } else {
1320        GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
1321        codegen_->Store(field_type, source, HeapOperand(obj, offset));
1322        codegen_->MaybeRecordImplicitNullCheck(instruction);
1323        GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
1324      }
1325    } else {
1326      codegen_->Store(field_type, source, HeapOperand(obj, offset));
1327      codegen_->MaybeRecordImplicitNullCheck(instruction);
1328    }
1329  }
1330
1331  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
1332    codegen_->MarkGCCard(obj, Register(value), value_can_be_null);
1333  }
1334}
1335
1336void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
1337  Primitive::Type type = instr->GetType();
1338
1339  switch (type) {
1340    case Primitive::kPrimInt:
1341    case Primitive::kPrimLong: {
1342      Register dst = OutputRegister(instr);
1343      Register lhs = InputRegisterAt(instr, 0);
1344      Operand rhs = InputOperandAt(instr, 1);
1345      if (instr->IsAdd()) {
1346        __ Add(dst, lhs, rhs);
1347      } else if (instr->IsAnd()) {
1348        __ And(dst, lhs, rhs);
1349      } else if (instr->IsOr()) {
1350        __ Orr(dst, lhs, rhs);
1351      } else if (instr->IsSub()) {
1352        __ Sub(dst, lhs, rhs);
1353      } else {
1354        DCHECK(instr->IsXor());
1355        __ Eor(dst, lhs, rhs);
1356      }
1357      break;
1358    }
1359    case Primitive::kPrimFloat:
1360    case Primitive::kPrimDouble: {
1361      FPRegister dst = OutputFPRegister(instr);
1362      FPRegister lhs = InputFPRegisterAt(instr, 0);
1363      FPRegister rhs = InputFPRegisterAt(instr, 1);
1364      if (instr->IsAdd()) {
1365        __ Fadd(dst, lhs, rhs);
1366      } else if (instr->IsSub()) {
1367        __ Fsub(dst, lhs, rhs);
1368      } else {
1369        LOG(FATAL) << "Unexpected floating-point binary operation";
1370      }
1371      break;
1372    }
1373    default:
1374      LOG(FATAL) << "Unexpected binary operation type " << type;
1375  }
1376}
1377
1378void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
1379  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1380
1381  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1382  Primitive::Type type = instr->GetResultType();
1383  switch (type) {
1384    case Primitive::kPrimInt:
1385    case Primitive::kPrimLong: {
1386      locations->SetInAt(0, Location::RequiresRegister());
1387      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1388      locations->SetOut(Location::RequiresRegister());
1389      break;
1390    }
1391    default:
1392      LOG(FATAL) << "Unexpected shift type " << type;
1393  }
1394}
1395
1396void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) {
1397  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1398
1399  Primitive::Type type = instr->GetType();
1400  switch (type) {
1401    case Primitive::kPrimInt:
1402    case Primitive::kPrimLong: {
1403      Register dst = OutputRegister(instr);
1404      Register lhs = InputRegisterAt(instr, 0);
1405      Operand rhs = InputOperandAt(instr, 1);
1406      if (rhs.IsImmediate()) {
1407        uint32_t shift_value = (type == Primitive::kPrimInt)
1408          ? static_cast<uint32_t>(rhs.immediate() & kMaxIntShiftValue)
1409          : static_cast<uint32_t>(rhs.immediate() & kMaxLongShiftValue);
1410        if (instr->IsShl()) {
1411          __ Lsl(dst, lhs, shift_value);
1412        } else if (instr->IsShr()) {
1413          __ Asr(dst, lhs, shift_value);
1414        } else {
1415          __ Lsr(dst, lhs, shift_value);
1416        }
1417      } else {
1418        Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
1419
1420        if (instr->IsShl()) {
1421          __ Lsl(dst, lhs, rhs_reg);
1422        } else if (instr->IsShr()) {
1423          __ Asr(dst, lhs, rhs_reg);
1424        } else {
1425          __ Lsr(dst, lhs, rhs_reg);
1426        }
1427      }
1428      break;
1429    }
1430    default:
1431      LOG(FATAL) << "Unexpected shift operation type " << type;
1432  }
1433}
1434
1435void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
1436  HandleBinaryOp(instruction);
1437}
1438
1439void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
1440  HandleBinaryOp(instruction);
1441}
1442
1443void LocationsBuilderARM64::VisitAnd(HAnd* instruction) {
1444  HandleBinaryOp(instruction);
1445}
1446
1447void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
1448  HandleBinaryOp(instruction);
1449}
1450
1451void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
1452  LocationSummary* locations =
1453      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1454  locations->SetInAt(0, Location::RequiresRegister());
1455  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1456  if (Primitive::IsFloatingPointType(instruction->GetType())) {
1457    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1458  } else {
1459    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1460  }
1461}
1462
1463void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
1464  LocationSummary* locations = instruction->GetLocations();
1465  Primitive::Type type = instruction->GetType();
1466  Register obj = InputRegisterAt(instruction, 0);
1467  Location index = locations->InAt(1);
1468  size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
1469  MemOperand source = HeapOperand(obj);
1470  MacroAssembler* masm = GetVIXLAssembler();
1471  UseScratchRegisterScope temps(masm);
1472  BlockPoolsScope block_pools(masm);
1473
1474  if (index.IsConstant()) {
1475    offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
1476    source = HeapOperand(obj, offset);
1477  } else {
1478    Register temp = temps.AcquireSameSizeAs(obj);
1479    Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
1480    __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type)));
1481    source = HeapOperand(temp, offset);
1482  }
1483
1484  codegen_->Load(type, OutputCPURegister(instruction), source);
1485  codegen_->MaybeRecordImplicitNullCheck(instruction);
1486
1487  if (type == Primitive::kPrimNot) {
1488    GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W());
1489  }
1490}
1491
1492void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
1493  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1494  locations->SetInAt(0, Location::RequiresRegister());
1495  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1496}
1497
1498void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
1499  BlockPoolsScope block_pools(GetVIXLAssembler());
1500  __ Ldr(OutputRegister(instruction),
1501         HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
1502  codegen_->MaybeRecordImplicitNullCheck(instruction);
1503}
1504
1505void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
1506  if (instruction->NeedsTypeCheck()) {
1507    LocationSummary* locations =
1508        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
1509    InvokeRuntimeCallingConvention calling_convention;
1510    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
1511    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
1512    locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
1513  } else {
1514    LocationSummary* locations =
1515        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1516    locations->SetInAt(0, Location::RequiresRegister());
1517    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1518    if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1519      locations->SetInAt(2, Location::RequiresFpuRegister());
1520    } else {
1521      locations->SetInAt(2, Location::RequiresRegister());
1522    }
1523  }
1524}
1525
1526void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
1527  Primitive::Type value_type = instruction->GetComponentType();
1528  LocationSummary* locations = instruction->GetLocations();
1529  bool needs_runtime_call = locations->WillCall();
1530
1531  if (needs_runtime_call) {
1532    // Note: if heap poisoning is enabled, pAputObject takes cares
1533    // of poisoning the reference.
1534    codegen_->InvokeRuntime(
1535        QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr);
1536    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
1537  } else {
1538    Register obj = InputRegisterAt(instruction, 0);
1539    CPURegister value = InputCPURegisterAt(instruction, 2);
1540    CPURegister source = value;
1541    Location index = locations->InAt(1);
1542    size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
1543    MemOperand destination = HeapOperand(obj);
1544    MacroAssembler* masm = GetVIXLAssembler();
1545    BlockPoolsScope block_pools(masm);
1546    {
1547      // We use a block to end the scratch scope before the write barrier, thus
1548      // freeing the temporary registers so they can be used in `MarkGCCard`.
1549      UseScratchRegisterScope temps(masm);
1550
1551      if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
1552        DCHECK(value.IsW());
1553        Register temp = temps.AcquireW();
1554        __ Mov(temp, value.W());
1555        GetAssembler()->PoisonHeapReference(temp.W());
1556        source = temp;
1557      }
1558
1559      if (index.IsConstant()) {
1560        offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
1561        destination = HeapOperand(obj, offset);
1562      } else {
1563        Register temp = temps.AcquireSameSizeAs(obj);
1564        Register index_reg = InputRegisterAt(instruction, 1);
1565        __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
1566        destination = HeapOperand(temp, offset);
1567      }
1568
1569      codegen_->Store(value_type, source, destination);
1570      codegen_->MaybeRecordImplicitNullCheck(instruction);
1571    }
1572    if (CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue())) {
1573      codegen_->MarkGCCard(obj, value.W(), instruction->GetValueCanBeNull());
1574    }
1575  }
1576}
1577
1578void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1579  LocationSummary* locations =
1580      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1581  locations->SetInAt(0, Location::RequiresRegister());
1582  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
1583  if (instruction->HasUses()) {
1584    locations->SetOut(Location::SameAsFirstInput());
1585  }
1586}
1587
1588void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
1589  LocationSummary* locations = instruction->GetLocations();
1590  BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
1591      instruction, locations->InAt(0), locations->InAt(1));
1592  codegen_->AddSlowPath(slow_path);
1593
1594  __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
1595  __ B(slow_path->GetEntryLabel(), hs);
1596}
1597
1598void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
1599  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1600      instruction, LocationSummary::kCallOnSlowPath);
1601  locations->SetInAt(0, Location::RequiresRegister());
1602  locations->SetInAt(1, Location::RequiresRegister());
1603  locations->AddTemp(Location::RequiresRegister());
1604}
1605
1606void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
1607  LocationSummary* locations = instruction->GetLocations();
1608  Register obj = InputRegisterAt(instruction, 0);;
1609  Register cls = InputRegisterAt(instruction, 1);;
1610  Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
1611
1612  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
1613      instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
1614  codegen_->AddSlowPath(slow_path);
1615
1616  // Avoid null check if we know obj is not null.
1617  if (instruction->MustDoNullCheck()) {
1618    __ Cbz(obj, slow_path->GetExitLabel());
1619  }
1620  // Compare the class of `obj` with `cls`.
1621  __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset()));
1622  GetAssembler()->MaybeUnpoisonHeapReference(obj_cls.W());
1623  __ Cmp(obj_cls, cls);
1624  // The checkcast succeeds if the classes are equal (fast path).
1625  // Otherwise, we need to go into the slow path to check the types.
1626  __ B(ne, slow_path->GetEntryLabel());
1627  __ Bind(slow_path->GetExitLabel());
1628}
1629
1630void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
1631  LocationSummary* locations =
1632      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1633  locations->SetInAt(0, Location::RequiresRegister());
1634  if (check->HasUses()) {
1635    locations->SetOut(Location::SameAsFirstInput());
1636  }
1637}
1638
1639void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
1640  // We assume the class is not null.
1641  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
1642      check->GetLoadClass(), check, check->GetDexPc(), true);
1643  codegen_->AddSlowPath(slow_path);
1644  GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
1645}
1646
1647static bool IsFloatingPointZeroConstant(HInstruction* instruction) {
1648  return (instruction->IsFloatConstant() && (instruction->AsFloatConstant()->GetValue() == 0.0f))
1649      || (instruction->IsDoubleConstant() && (instruction->AsDoubleConstant()->GetValue() == 0.0));
1650}
1651
1652void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
1653  LocationSummary* locations =
1654      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
1655  Primitive::Type in_type = compare->InputAt(0)->GetType();
1656  switch (in_type) {
1657    case Primitive::kPrimLong: {
1658      locations->SetInAt(0, Location::RequiresRegister());
1659      locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare));
1660      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1661      break;
1662    }
1663    case Primitive::kPrimFloat:
1664    case Primitive::kPrimDouble: {
1665      locations->SetInAt(0, Location::RequiresFpuRegister());
1666      locations->SetInAt(1,
1667                         IsFloatingPointZeroConstant(compare->InputAt(1))
1668                             ? Location::ConstantLocation(compare->InputAt(1)->AsConstant())
1669                             : Location::RequiresFpuRegister());
1670      locations->SetOut(Location::RequiresRegister());
1671      break;
1672    }
1673    default:
1674      LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1675  }
1676}
1677
1678void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) {
1679  Primitive::Type in_type = compare->InputAt(0)->GetType();
1680
1681  //  0 if: left == right
1682  //  1 if: left  > right
1683  // -1 if: left  < right
1684  switch (in_type) {
1685    case Primitive::kPrimLong: {
1686      Register result = OutputRegister(compare);
1687      Register left = InputRegisterAt(compare, 0);
1688      Operand right = InputOperandAt(compare, 1);
1689
1690      __ Cmp(left, right);
1691      __ Cset(result, ne);
1692      __ Cneg(result, result, lt);
1693      break;
1694    }
1695    case Primitive::kPrimFloat:
1696    case Primitive::kPrimDouble: {
1697      Register result = OutputRegister(compare);
1698      FPRegister left = InputFPRegisterAt(compare, 0);
1699      if (compare->GetLocations()->InAt(1).IsConstant()) {
1700        DCHECK(IsFloatingPointZeroConstant(compare->GetLocations()->InAt(1).GetConstant()));
1701        // 0.0 is the only immediate that can be encoded directly in an FCMP instruction.
1702        __ Fcmp(left, 0.0);
1703      } else {
1704        __ Fcmp(left, InputFPRegisterAt(compare, 1));
1705      }
1706      if (compare->IsGtBias()) {
1707        __ Cset(result, ne);
1708      } else {
1709        __ Csetm(result, ne);
1710      }
1711      __ Cneg(result, result, compare->IsGtBias() ? mi : gt);
1712      break;
1713    }
1714    default:
1715      LOG(FATAL) << "Unimplemented compare type " << in_type;
1716  }
1717}
1718
1719void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
1720  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1721
1722  if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
1723    locations->SetInAt(0, Location::RequiresFpuRegister());
1724    locations->SetInAt(1,
1725                       IsFloatingPointZeroConstant(instruction->InputAt(1))
1726                           ? Location::ConstantLocation(instruction->InputAt(1)->AsConstant())
1727                           : Location::RequiresFpuRegister());
1728  } else {
1729    // Integer cases.
1730    locations->SetInAt(0, Location::RequiresRegister());
1731    locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
1732  }
1733
1734  if (instruction->NeedsMaterialization()) {
1735    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1736  }
1737}
1738
1739void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
1740  if (!instruction->NeedsMaterialization()) {
1741    return;
1742  }
1743
1744  LocationSummary* locations = instruction->GetLocations();
1745  Register res = RegisterFrom(locations->Out(), instruction->GetType());
1746  IfCondition if_cond = instruction->GetCondition();
1747  Condition arm64_cond = ARM64Condition(if_cond);
1748
1749  if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
1750    FPRegister lhs = InputFPRegisterAt(instruction, 0);
1751    if (locations->InAt(1).IsConstant()) {
1752      DCHECK(IsFloatingPointZeroConstant(locations->InAt(1).GetConstant()));
1753      // 0.0 is the only immediate that can be encoded directly in an FCMP instruction.
1754      __ Fcmp(lhs, 0.0);
1755    } else {
1756      __ Fcmp(lhs, InputFPRegisterAt(instruction, 1));
1757    }
1758    __ Cset(res, arm64_cond);
1759    if (instruction->IsFPConditionTrueIfNaN()) {
1760      __ Csel(res, res, Operand(1), vs);  // VS for unordered.
1761    } else if (instruction->IsFPConditionFalseIfNaN()) {
1762      __ Csel(res, res, Operand(0), vs);  // VS for unordered.
1763    }
1764  } else {
1765    // Integer cases.
1766    Register lhs = InputRegisterAt(instruction, 0);
1767    Operand rhs = InputOperandAt(instruction, 1);
1768    __ Cmp(lhs, rhs);
1769    __ Cset(res, arm64_cond);
1770  }
1771}
1772
1773#define FOR_EACH_CONDITION_INSTRUCTION(M)                                                \
1774  M(Equal)                                                                               \
1775  M(NotEqual)                                                                            \
1776  M(LessThan)                                                                            \
1777  M(LessThanOrEqual)                                                                     \
1778  M(GreaterThan)                                                                         \
1779  M(GreaterThanOrEqual)
1780#define DEFINE_CONDITION_VISITORS(Name)                                                  \
1781void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }         \
1782void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
1783FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
1784#undef DEFINE_CONDITION_VISITORS
1785#undef FOR_EACH_CONDITION_INSTRUCTION
1786
1787void InstructionCodeGeneratorARM64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
1788  DCHECK(instruction->IsDiv() || instruction->IsRem());
1789
1790  LocationSummary* locations = instruction->GetLocations();
1791  Location second = locations->InAt(1);
1792  DCHECK(second.IsConstant());
1793
1794  Register out = OutputRegister(instruction);
1795  Register dividend = InputRegisterAt(instruction, 0);
1796  int64_t imm = Int64FromConstant(second.GetConstant());
1797  DCHECK(imm == 1 || imm == -1);
1798
1799  if (instruction->IsRem()) {
1800    __ Mov(out, 0);
1801  } else {
1802    if (imm == 1) {
1803      __ Mov(out, dividend);
1804    } else {
1805      __ Neg(out, dividend);
1806    }
1807  }
1808}
1809
1810void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
1811  DCHECK(instruction->IsDiv() || instruction->IsRem());
1812
1813  LocationSummary* locations = instruction->GetLocations();
1814  Location second = locations->InAt(1);
1815  DCHECK(second.IsConstant());
1816
1817  Register out = OutputRegister(instruction);
1818  Register dividend = InputRegisterAt(instruction, 0);
1819  int64_t imm = Int64FromConstant(second.GetConstant());
1820  uint64_t abs_imm = static_cast<uint64_t>(std::abs(imm));
1821  DCHECK(IsPowerOfTwo(abs_imm));
1822  int ctz_imm = CTZ(abs_imm);
1823
1824  UseScratchRegisterScope temps(GetVIXLAssembler());
1825  Register temp = temps.AcquireSameSizeAs(out);
1826
1827  if (instruction->IsDiv()) {
1828    __ Add(temp, dividend, abs_imm - 1);
1829    __ Cmp(dividend, 0);
1830    __ Csel(out, temp, dividend, lt);
1831    if (imm > 0) {
1832      __ Asr(out, out, ctz_imm);
1833    } else {
1834      __ Neg(out, Operand(out, ASR, ctz_imm));
1835    }
1836  } else {
1837    int bits = instruction->GetResultType() == Primitive::kPrimInt ? 32 : 64;
1838    __ Asr(temp, dividend, bits - 1);
1839    __ Lsr(temp, temp, bits - ctz_imm);
1840    __ Add(out, dividend, temp);
1841    __ And(out, out, abs_imm - 1);
1842    __ Sub(out, out, temp);
1843  }
1844}
1845
1846void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
1847  DCHECK(instruction->IsDiv() || instruction->IsRem());
1848
1849  LocationSummary* locations = instruction->GetLocations();
1850  Location second = locations->InAt(1);
1851  DCHECK(second.IsConstant());
1852
1853  Register out = OutputRegister(instruction);
1854  Register dividend = InputRegisterAt(instruction, 0);
1855  int64_t imm = Int64FromConstant(second.GetConstant());
1856
1857  Primitive::Type type = instruction->GetResultType();
1858  DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
1859
1860  int64_t magic;
1861  int shift;
1862  CalculateMagicAndShiftForDivRem(imm, type == Primitive::kPrimLong /* is_long */, &magic, &shift);
1863
1864  UseScratchRegisterScope temps(GetVIXLAssembler());
1865  Register temp = temps.AcquireSameSizeAs(out);
1866
1867  // temp = get_high(dividend * magic)
1868  __ Mov(temp, magic);
1869  if (type == Primitive::kPrimLong) {
1870    __ Smulh(temp, dividend, temp);
1871  } else {
1872    __ Smull(temp.X(), dividend, temp);
1873    __ Lsr(temp.X(), temp.X(), 32);
1874  }
1875
1876  if (imm > 0 && magic < 0) {
1877    __ Add(temp, temp, dividend);
1878  } else if (imm < 0 && magic > 0) {
1879    __ Sub(temp, temp, dividend);
1880  }
1881
1882  if (shift != 0) {
1883    __ Asr(temp, temp, shift);
1884  }
1885
1886  if (instruction->IsDiv()) {
1887    __ Sub(out, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
1888  } else {
1889    __ Sub(temp, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
1890    // TODO: Strength reduction for msub.
1891    Register temp_imm = temps.AcquireSameSizeAs(out);
1892    __ Mov(temp_imm, imm);
1893    __ Msub(out, temp, temp_imm, dividend);
1894  }
1895}
1896
1897void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
1898  DCHECK(instruction->IsDiv() || instruction->IsRem());
1899  Primitive::Type type = instruction->GetResultType();
1900  DCHECK(type == Primitive::kPrimInt || Primitive::kPrimLong);
1901
1902  LocationSummary* locations = instruction->GetLocations();
1903  Register out = OutputRegister(instruction);
1904  Location second = locations->InAt(1);
1905
1906  if (second.IsConstant()) {
1907    int64_t imm = Int64FromConstant(second.GetConstant());
1908
1909    if (imm == 0) {
1910      // Do not generate anything. DivZeroCheck would prevent any code to be executed.
1911    } else if (imm == 1 || imm == -1) {
1912      DivRemOneOrMinusOne(instruction);
1913    } else if (IsPowerOfTwo(std::abs(imm))) {
1914      DivRemByPowerOfTwo(instruction);
1915    } else {
1916      DCHECK(imm <= -2 || imm >= 2);
1917      GenerateDivRemWithAnyConstant(instruction);
1918    }
1919  } else {
1920    Register dividend = InputRegisterAt(instruction, 0);
1921    Register divisor = InputRegisterAt(instruction, 1);
1922    if (instruction->IsDiv()) {
1923      __ Sdiv(out, dividend, divisor);
1924    } else {
1925      UseScratchRegisterScope temps(GetVIXLAssembler());
1926      Register temp = temps.AcquireSameSizeAs(out);
1927      __ Sdiv(temp, dividend, divisor);
1928      __ Msub(out, temp, divisor, dividend);
1929    }
1930  }
1931}
1932
1933void LocationsBuilderARM64::VisitDiv(HDiv* div) {
1934  LocationSummary* locations =
1935      new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1936  switch (div->GetResultType()) {
1937    case Primitive::kPrimInt:
1938    case Primitive::kPrimLong:
1939      locations->SetInAt(0, Location::RequiresRegister());
1940      locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
1941      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1942      break;
1943
1944    case Primitive::kPrimFloat:
1945    case Primitive::kPrimDouble:
1946      locations->SetInAt(0, Location::RequiresFpuRegister());
1947      locations->SetInAt(1, Location::RequiresFpuRegister());
1948      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1949      break;
1950
1951    default:
1952      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1953  }
1954}
1955
1956void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
1957  Primitive::Type type = div->GetResultType();
1958  switch (type) {
1959    case Primitive::kPrimInt:
1960    case Primitive::kPrimLong:
1961      GenerateDivRemIntegral(div);
1962      break;
1963
1964    case Primitive::kPrimFloat:
1965    case Primitive::kPrimDouble:
1966      __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
1967      break;
1968
1969    default:
1970      LOG(FATAL) << "Unexpected div type " << type;
1971  }
1972}
1973
1974void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1975  LocationSummary* locations =
1976      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1977  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1978  if (instruction->HasUses()) {
1979    locations->SetOut(Location::SameAsFirstInput());
1980  }
1981}
1982
1983void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1984  SlowPathCodeARM64* slow_path =
1985      new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
1986  codegen_->AddSlowPath(slow_path);
1987  Location value = instruction->GetLocations()->InAt(0);
1988
1989  Primitive::Type type = instruction->GetType();
1990
1991  if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) {
1992      LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck.";
1993    return;
1994  }
1995
1996  if (value.IsConstant()) {
1997    int64_t divisor = Int64ConstantFrom(value);
1998    if (divisor == 0) {
1999      __ B(slow_path->GetEntryLabel());
2000    } else {
2001      // A division by a non-null constant is valid. We don't need to perform
2002      // any check, so simply fall through.
2003    }
2004  } else {
2005    __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
2006  }
2007}
2008
2009void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
2010  LocationSummary* locations =
2011      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2012  locations->SetOut(Location::ConstantLocation(constant));
2013}
2014
2015void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
2016  UNUSED(constant);
2017  // Will be generated at use site.
2018}
2019
2020void LocationsBuilderARM64::VisitExit(HExit* exit) {
2021  exit->SetLocations(nullptr);
2022}
2023
2024void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
2025  UNUSED(exit);
2026}
2027
2028void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
2029  LocationSummary* locations =
2030      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2031  locations->SetOut(Location::ConstantLocation(constant));
2032}
2033
2034void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
2035  UNUSED(constant);
2036  // Will be generated at use site.
2037}
2038
2039void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
2040  DCHECK(!successor->IsExitBlock());
2041  HBasicBlock* block = got->GetBlock();
2042  HInstruction* previous = got->GetPrevious();
2043  HLoopInformation* info = block->GetLoopInformation();
2044
2045  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
2046    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
2047    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
2048    return;
2049  }
2050  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
2051    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
2052  }
2053  if (!codegen_->GoesToNextBlock(block, successor)) {
2054    __ B(codegen_->GetLabelOf(successor));
2055  }
2056}
2057
2058void LocationsBuilderARM64::VisitGoto(HGoto* got) {
2059  got->SetLocations(nullptr);
2060}
2061
2062void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
2063  HandleGoto(got, got->GetSuccessor());
2064}
2065
2066void LocationsBuilderARM64::VisitTryBoundary(HTryBoundary* try_boundary) {
2067  try_boundary->SetLocations(nullptr);
2068}
2069
2070void InstructionCodeGeneratorARM64::VisitTryBoundary(HTryBoundary* try_boundary) {
2071  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
2072  if (!successor->IsExitBlock()) {
2073    HandleGoto(try_boundary, successor);
2074  }
2075}
2076
2077void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruction,
2078                                                          vixl::Label* true_target,
2079                                                          vixl::Label* false_target,
2080                                                          vixl::Label* always_true_target) {
2081  HInstruction* cond = instruction->InputAt(0);
2082  HCondition* condition = cond->AsCondition();
2083
2084  if (cond->IsIntConstant()) {
2085    int32_t cond_value = cond->AsIntConstant()->GetValue();
2086    if (cond_value == 1) {
2087      if (always_true_target != nullptr) {
2088        __ B(always_true_target);
2089      }
2090      return;
2091    } else {
2092      DCHECK_EQ(cond_value, 0);
2093    }
2094  } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2095    // The condition instruction has been materialized, compare the output to 0.
2096    Location cond_val = instruction->GetLocations()->InAt(0);
2097    DCHECK(cond_val.IsRegister());
2098    __ Cbnz(InputRegisterAt(instruction, 0), true_target);
2099  } else {
2100    // The condition instruction has not been materialized, use its inputs as
2101    // the comparison and its condition as the branch condition.
2102    Primitive::Type type =
2103        cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
2104
2105    if (Primitive::IsFloatingPointType(type)) {
2106      // FP compares don't like null false_targets.
2107      if (false_target == nullptr) {
2108        false_target = codegen_->GetLabelOf(instruction->AsIf()->IfFalseSuccessor());
2109      }
2110      FPRegister lhs = InputFPRegisterAt(condition, 0);
2111      if (condition->GetLocations()->InAt(1).IsConstant()) {
2112        DCHECK(IsFloatingPointZeroConstant(condition->GetLocations()->InAt(1).GetConstant()));
2113        // 0.0 is the only immediate that can be encoded directly in an FCMP instruction.
2114        __ Fcmp(lhs, 0.0);
2115      } else {
2116        __ Fcmp(lhs, InputFPRegisterAt(condition, 1));
2117      }
2118      if (condition->IsFPConditionTrueIfNaN()) {
2119        __ B(vs, true_target);  // VS for unordered.
2120      } else if (condition->IsFPConditionFalseIfNaN()) {
2121        __ B(vs, false_target);  // VS for unordered.
2122      }
2123      __ B(ARM64Condition(condition->GetCondition()), true_target);
2124    } else {
2125      // Integer cases.
2126      Register lhs = InputRegisterAt(condition, 0);
2127      Operand rhs = InputOperandAt(condition, 1);
2128      Condition arm64_cond = ARM64Condition(condition->GetCondition());
2129      if ((arm64_cond != gt && arm64_cond != le) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
2130        switch (arm64_cond) {
2131          case eq:
2132            __ Cbz(lhs, true_target);
2133            break;
2134          case ne:
2135            __ Cbnz(lhs, true_target);
2136            break;
2137          case lt:
2138            // Test the sign bit and branch accordingly.
2139            __ Tbnz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target);
2140            break;
2141          case ge:
2142            // Test the sign bit and branch accordingly.
2143            __ Tbz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target);
2144            break;
2145          default:
2146            // Without the `static_cast` the compiler throws an error for
2147            // `-Werror=sign-promo`.
2148            LOG(FATAL) << "Unexpected condition: " << static_cast<int>(arm64_cond);
2149        }
2150      } else {
2151        __ Cmp(lhs, rhs);
2152        __ B(arm64_cond, true_target);
2153      }
2154    }
2155  }
2156  if (false_target != nullptr) {
2157    __ B(false_target);
2158  }
2159}
2160
2161void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
2162  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2163  HInstruction* cond = if_instr->InputAt(0);
2164  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2165    locations->SetInAt(0, Location::RequiresRegister());
2166  }
2167}
2168
2169void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
2170  vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2171  vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2172  vixl::Label* always_true_target = true_target;
2173  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2174                                if_instr->IfTrueSuccessor())) {
2175    always_true_target = nullptr;
2176  }
2177  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2178                                if_instr->IfFalseSuccessor())) {
2179    false_target = nullptr;
2180  }
2181  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2182}
2183
2184void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2185  LocationSummary* locations = new (GetGraph()->GetArena())
2186      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2187  HInstruction* cond = deoptimize->InputAt(0);
2188  DCHECK(cond->IsCondition());
2189  if (cond->AsCondition()->NeedsMaterialization()) {
2190    locations->SetInAt(0, Location::RequiresRegister());
2191  }
2192}
2193
2194void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2195  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
2196      DeoptimizationSlowPathARM64(deoptimize);
2197  codegen_->AddSlowPath(slow_path);
2198  vixl::Label* slow_path_entry = slow_path->GetEntryLabel();
2199  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2200}
2201
2202void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2203  HandleFieldGet(instruction);
2204}
2205
2206void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2207  HandleFieldGet(instruction, instruction->GetFieldInfo());
2208}
2209
2210void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2211  HandleFieldSet(instruction);
2212}
2213
2214void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2215  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
2216}
2217
2218void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
2219  LocationSummary::CallKind call_kind =
2220      instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
2221  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2222  locations->SetInAt(0, Location::RequiresRegister());
2223  locations->SetInAt(1, Location::RequiresRegister());
2224  // The output does overlap inputs.
2225  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2226}
2227
2228void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
2229  LocationSummary* locations = instruction->GetLocations();
2230  Register obj = InputRegisterAt(instruction, 0);;
2231  Register cls = InputRegisterAt(instruction, 1);;
2232  Register out = OutputRegister(instruction);
2233
2234  vixl::Label done;
2235
2236  // Return 0 if `obj` is null.
2237  // Avoid null check if we know `obj` is not null.
2238  if (instruction->MustDoNullCheck()) {
2239    __ Mov(out, 0);
2240    __ Cbz(obj, &done);
2241  }
2242
2243  // Compare the class of `obj` with `cls`.
2244  __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset()));
2245  GetAssembler()->MaybeUnpoisonHeapReference(out.W());
2246  __ Cmp(out, cls);
2247  if (instruction->IsClassFinal()) {
2248    // Classes must be equal for the instanceof to succeed.
2249    __ Cset(out, eq);
2250  } else {
2251    // If the classes are not equal, we go into a slow path.
2252    DCHECK(locations->OnlyCallsOnSlowPath());
2253    SlowPathCodeARM64* slow_path =
2254        new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
2255        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
2256    codegen_->AddSlowPath(slow_path);
2257    __ B(ne, slow_path->GetEntryLabel());
2258    __ Mov(out, 1);
2259    __ Bind(slow_path->GetExitLabel());
2260  }
2261
2262  __ Bind(&done);
2263}
2264
2265void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
2266  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2267  locations->SetOut(Location::ConstantLocation(constant));
2268}
2269
2270void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
2271  // Will be generated at use site.
2272  UNUSED(constant);
2273}
2274
2275void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
2276  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2277  locations->SetOut(Location::ConstantLocation(constant));
2278}
2279
2280void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) {
2281  // Will be generated at use site.
2282  UNUSED(constant);
2283}
2284
2285void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
2286  InvokeDexCallingConventionVisitorARM64 calling_convention_visitor;
2287  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2288}
2289
2290void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
2291  HandleInvoke(invoke);
2292}
2293
2294void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
2295  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2296  Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
2297  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2298      invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
2299  Location receiver = invoke->GetLocations()->InAt(0);
2300  Offset class_offset = mirror::Object::ClassOffset();
2301  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
2302
2303  // The register ip1 is required to be used for the hidden argument in
2304  // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
2305  MacroAssembler* masm = GetVIXLAssembler();
2306  UseScratchRegisterScope scratch_scope(masm);
2307  BlockPoolsScope block_pools(masm);
2308  scratch_scope.Exclude(ip1);
2309  __ Mov(ip1, invoke->GetDexMethodIndex());
2310
2311  // temp = object->GetClass();
2312  if (receiver.IsStackSlot()) {
2313    __ Ldr(temp.W(), StackOperandFrom(receiver));
2314    __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
2315  } else {
2316    __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
2317  }
2318  codegen_->MaybeRecordImplicitNullCheck(invoke);
2319  GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
2320  // temp = temp->GetImtEntryAt(method_offset);
2321  __ Ldr(temp, MemOperand(temp, method_offset));
2322  // lr = temp->GetEntryPoint();
2323  __ Ldr(lr, MemOperand(temp, entry_point.Int32Value()));
2324  // lr();
2325  __ Blr(lr);
2326  DCHECK(!codegen_->IsLeafMethod());
2327  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2328}
2329
2330void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2331  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
2332  if (intrinsic.TryDispatch(invoke)) {
2333    return;
2334  }
2335
2336  HandleInvoke(invoke);
2337}
2338
2339void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2340  // When we do not run baseline, explicit clinit checks triggered by static
2341  // invokes must have been pruned by art::PrepareForRegisterAllocation.
2342  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2343
2344  IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
2345  if (intrinsic.TryDispatch(invoke)) {
2346    return;
2347  }
2348
2349  HandleInvoke(invoke);
2350}
2351
2352static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codegen) {
2353  if (invoke->GetLocations()->Intrinsified()) {
2354    IntrinsicCodeGeneratorARM64 intrinsic(codegen);
2355    intrinsic.Dispatch(invoke);
2356    return true;
2357  }
2358  return false;
2359}
2360
2361void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2362  // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
2363  size_t index_in_cache = GetCachePointerOffset(invoke->GetDexMethodIndex());
2364
2365  // TODO: Implement all kinds of calls:
2366  // 1) boot -> boot
2367  // 2) app -> boot
2368  // 3) app -> app
2369  //
2370  // Currently we implement the app -> app logic, which looks up in the resolve cache.
2371
2372  if (invoke->IsStringInit()) {
2373    Register reg = XRegisterFrom(temp);
2374    // temp = thread->string_init_entrypoint
2375    __ Ldr(reg.X(), MemOperand(tr, invoke->GetStringInitOffset()));
2376    // LR = temp->entry_point_from_quick_compiled_code_;
2377    __ Ldr(lr, MemOperand(
2378        reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
2379    // lr()
2380    __ Blr(lr);
2381  } else if (invoke->IsRecursive()) {
2382    __ Bl(&frame_entry_label_);
2383  } else {
2384    Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2385    Register reg = XRegisterFrom(temp);
2386    Register method_reg;
2387    if (current_method.IsRegister()) {
2388      method_reg = XRegisterFrom(current_method);
2389    } else {
2390      DCHECK(invoke->GetLocations()->Intrinsified());
2391      DCHECK(!current_method.IsValid());
2392      method_reg = reg;
2393      __ Ldr(reg.X(), MemOperand(sp, kCurrentMethodStackOffset));
2394    }
2395
2396    // temp = current_method->dex_cache_resolved_methods_;
2397    __ Ldr(reg.W(), MemOperand(method_reg.X(),
2398                               ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
2399    // temp = temp[index_in_cache];
2400    __ Ldr(reg.X(), MemOperand(reg, index_in_cache));
2401    // lr = temp->entry_point_from_quick_compiled_code_;
2402    __ Ldr(lr, MemOperand(reg.X(), ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2403        kArm64WordSize).Int32Value()));
2404    // lr();
2405    __ Blr(lr);
2406  }
2407
2408  DCHECK(!IsLeafMethod());
2409}
2410
2411void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2412  // When we do not run baseline, explicit clinit checks triggered by static
2413  // invokes must have been pruned by art::PrepareForRegisterAllocation.
2414  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2415
2416  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2417    return;
2418  }
2419
2420  BlockPoolsScope block_pools(GetVIXLAssembler());
2421  LocationSummary* locations = invoke->GetLocations();
2422  codegen_->GenerateStaticOrDirectCall(
2423      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
2424  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2425}
2426
2427void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2428  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2429    return;
2430  }
2431
2432  LocationSummary* locations = invoke->GetLocations();
2433  Location receiver = locations->InAt(0);
2434  Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
2435  size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2436      invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
2437  Offset class_offset = mirror::Object::ClassOffset();
2438  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
2439
2440  BlockPoolsScope block_pools(GetVIXLAssembler());
2441
2442  DCHECK(receiver.IsRegister());
2443  __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
2444  codegen_->MaybeRecordImplicitNullCheck(invoke);
2445  GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
2446  // temp = temp->GetMethodAt(method_offset);
2447  __ Ldr(temp, MemOperand(temp, method_offset));
2448  // lr = temp->GetEntryPoint();
2449  __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
2450  // lr();
2451  __ Blr(lr);
2452  DCHECK(!codegen_->IsLeafMethod());
2453  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2454}
2455
2456void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
2457  LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2458                                                              : LocationSummary::kNoCall;
2459  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2460  locations->SetInAt(0, Location::RequiresRegister());
2461  locations->SetOut(Location::RequiresRegister());
2462}
2463
2464void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
2465  Register out = OutputRegister(cls);
2466  Register current_method = InputRegisterAt(cls, 0);
2467  if (cls->IsReferrersClass()) {
2468    DCHECK(!cls->CanCallRuntime());
2469    DCHECK(!cls->MustGenerateClinitCheck());
2470    __ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
2471  } else {
2472    DCHECK(cls->CanCallRuntime());
2473    __ Ldr(out, MemOperand(current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
2474    __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
2475    GetAssembler()->MaybeUnpoisonHeapReference(out.W());
2476
2477    SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
2478        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
2479    codegen_->AddSlowPath(slow_path);
2480    __ Cbz(out, slow_path->GetEntryLabel());
2481    if (cls->MustGenerateClinitCheck()) {
2482      GenerateClassInitializationCheck(slow_path, out);
2483    } else {
2484      __ Bind(slow_path->GetExitLabel());
2485    }
2486  }
2487}
2488
2489void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
2490  LocationSummary* locations =
2491      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2492  locations->SetOut(Location::RequiresRegister());
2493}
2494
2495void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
2496  MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
2497  __ Ldr(OutputRegister(instruction), exception);
2498  __ Str(wzr, exception);
2499}
2500
2501void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
2502  load->SetLocations(nullptr);
2503}
2504
2505void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
2506  // Nothing to do, this is driven by the code generator.
2507  UNUSED(load);
2508}
2509
2510void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
2511  LocationSummary* locations =
2512      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2513  locations->SetInAt(0, Location::RequiresRegister());
2514  locations->SetOut(Location::RequiresRegister());
2515}
2516
2517void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
2518  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
2519  codegen_->AddSlowPath(slow_path);
2520
2521  Register out = OutputRegister(load);
2522  Register current_method = InputRegisterAt(load, 0);
2523  __ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
2524  __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
2525  GetAssembler()->MaybeUnpoisonHeapReference(out.W());
2526  __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
2527  GetAssembler()->MaybeUnpoisonHeapReference(out.W());
2528  __ Cbz(out, slow_path->GetEntryLabel());
2529  __ Bind(slow_path->GetExitLabel());
2530}
2531
2532void LocationsBuilderARM64::VisitLocal(HLocal* local) {
2533  local->SetLocations(nullptr);
2534}
2535
2536void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
2537  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2538}
2539
2540void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
2541  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2542  locations->SetOut(Location::ConstantLocation(constant));
2543}
2544
2545void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
2546  // Will be generated at use site.
2547  UNUSED(constant);
2548}
2549
2550void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2551  LocationSummary* locations =
2552      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2553  InvokeRuntimeCallingConvention calling_convention;
2554  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2555}
2556
2557void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
2558  codegen_->InvokeRuntime(instruction->IsEnter()
2559        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
2560      instruction,
2561      instruction->GetDexPc(),
2562      nullptr);
2563  CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2564}
2565
2566void LocationsBuilderARM64::VisitMul(HMul* mul) {
2567  LocationSummary* locations =
2568      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2569  switch (mul->GetResultType()) {
2570    case Primitive::kPrimInt:
2571    case Primitive::kPrimLong:
2572      locations->SetInAt(0, Location::RequiresRegister());
2573      locations->SetInAt(1, Location::RequiresRegister());
2574      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2575      break;
2576
2577    case Primitive::kPrimFloat:
2578    case Primitive::kPrimDouble:
2579      locations->SetInAt(0, Location::RequiresFpuRegister());
2580      locations->SetInAt(1, Location::RequiresFpuRegister());
2581      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2582      break;
2583
2584    default:
2585      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2586  }
2587}
2588
2589void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
2590  switch (mul->GetResultType()) {
2591    case Primitive::kPrimInt:
2592    case Primitive::kPrimLong:
2593      __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
2594      break;
2595
2596    case Primitive::kPrimFloat:
2597    case Primitive::kPrimDouble:
2598      __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
2599      break;
2600
2601    default:
2602      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2603  }
2604}
2605
2606void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
2607  LocationSummary* locations =
2608      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2609  switch (neg->GetResultType()) {
2610    case Primitive::kPrimInt:
2611    case Primitive::kPrimLong:
2612      locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg));
2613      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2614      break;
2615
2616    case Primitive::kPrimFloat:
2617    case Primitive::kPrimDouble:
2618      locations->SetInAt(0, Location::RequiresFpuRegister());
2619      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2620      break;
2621
2622    default:
2623      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2624  }
2625}
2626
2627void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
2628  switch (neg->GetResultType()) {
2629    case Primitive::kPrimInt:
2630    case Primitive::kPrimLong:
2631      __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
2632      break;
2633
2634    case Primitive::kPrimFloat:
2635    case Primitive::kPrimDouble:
2636      __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0));
2637      break;
2638
2639    default:
2640      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2641  }
2642}
2643
2644void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
2645  LocationSummary* locations =
2646      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2647  InvokeRuntimeCallingConvention calling_convention;
2648  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2649  locations->SetOut(LocationFrom(x0));
2650  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
2651  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
2652  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
2653                       void*, uint32_t, int32_t, ArtMethod*>();
2654}
2655
2656void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
2657  LocationSummary* locations = instruction->GetLocations();
2658  InvokeRuntimeCallingConvention calling_convention;
2659  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
2660  DCHECK(type_index.Is(w0));
2661  __ Mov(type_index, instruction->GetTypeIndex());
2662  // Note: if heap poisoning is enabled, the entry point takes cares
2663  // of poisoning the reference.
2664  codegen_->InvokeRuntime(
2665      GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2666      instruction,
2667      instruction->GetDexPc(),
2668      nullptr);
2669  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
2670}
2671
2672void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
2673  LocationSummary* locations =
2674      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2675  InvokeRuntimeCallingConvention calling_convention;
2676  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2677  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
2678  locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2679  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2680}
2681
2682void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
2683  LocationSummary* locations = instruction->GetLocations();
2684  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
2685  DCHECK(type_index.Is(w0));
2686  __ Mov(type_index, instruction->GetTypeIndex());
2687  // Note: if heap poisoning is enabled, the entry point takes cares
2688  // of poisoning the reference.
2689  codegen_->InvokeRuntime(
2690      GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2691      instruction,
2692      instruction->GetDexPc(),
2693      nullptr);
2694  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2695}
2696
2697void LocationsBuilderARM64::VisitNot(HNot* instruction) {
2698  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2699  locations->SetInAt(0, Location::RequiresRegister());
2700  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2701}
2702
2703void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
2704  switch (instruction->GetResultType()) {
2705    case Primitive::kPrimInt:
2706    case Primitive::kPrimLong:
2707      __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
2708      break;
2709
2710    default:
2711      LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2712  }
2713}
2714
2715void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
2716  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2717  locations->SetInAt(0, Location::RequiresRegister());
2718  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2719}
2720
2721void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
2722  __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::Operand(1));
2723}
2724
2725void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
2726  LocationSummary* locations =
2727      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2728  locations->SetInAt(0, Location::RequiresRegister());
2729  if (instruction->HasUses()) {
2730    locations->SetOut(Location::SameAsFirstInput());
2731  }
2732}
2733
2734void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2735  if (codegen_->CanMoveNullCheckToUser(instruction)) {
2736    return;
2737  }
2738
2739  BlockPoolsScope block_pools(GetVIXLAssembler());
2740  Location obj = instruction->GetLocations()->InAt(0);
2741  __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
2742  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2743}
2744
2745void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2746  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
2747  codegen_->AddSlowPath(slow_path);
2748
2749  LocationSummary* locations = instruction->GetLocations();
2750  Location obj = locations->InAt(0);
2751
2752  __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
2753}
2754
2755void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
2756  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
2757    GenerateImplicitNullCheck(instruction);
2758  } else {
2759    GenerateExplicitNullCheck(instruction);
2760  }
2761}
2762
2763void LocationsBuilderARM64::VisitOr(HOr* instruction) {
2764  HandleBinaryOp(instruction);
2765}
2766
2767void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
2768  HandleBinaryOp(instruction);
2769}
2770
2771void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2772  LOG(FATAL) << "Unreachable";
2773}
2774
2775void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
2776  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2777}
2778
2779void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
2780  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2781  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2782  if (location.IsStackSlot()) {
2783    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2784  } else if (location.IsDoubleStackSlot()) {
2785    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2786  }
2787  locations->SetOut(location);
2788}
2789
2790void InstructionCodeGeneratorARM64::VisitParameterValue(
2791    HParameterValue* instruction ATTRIBUTE_UNUSED) {
2792  // Nothing to do, the parameter is already at its location.
2793}
2794
2795void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
2796  LocationSummary* locations =
2797      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2798  locations->SetOut(LocationFrom(kArtMethodRegister));
2799}
2800
2801void InstructionCodeGeneratorARM64::VisitCurrentMethod(
2802    HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
2803  // Nothing to do, the method is already at its location.
2804}
2805
2806void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
2807  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2808  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2809    locations->SetInAt(i, Location::Any());
2810  }
2811  locations->SetOut(Location::Any());
2812}
2813
2814void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
2815  UNUSED(instruction);
2816  LOG(FATAL) << "Unreachable";
2817}
2818
2819void LocationsBuilderARM64::VisitRem(HRem* rem) {
2820  Primitive::Type type = rem->GetResultType();
2821  LocationSummary::CallKind call_kind =
2822      Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2823  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2824
2825  switch (type) {
2826    case Primitive::kPrimInt:
2827    case Primitive::kPrimLong:
2828      locations->SetInAt(0, Location::RequiresRegister());
2829      locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2830      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2831      break;
2832
2833    case Primitive::kPrimFloat:
2834    case Primitive::kPrimDouble: {
2835      InvokeRuntimeCallingConvention calling_convention;
2836      locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
2837      locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1)));
2838      locations->SetOut(calling_convention.GetReturnLocation(type));
2839
2840      break;
2841    }
2842
2843    default:
2844      LOG(FATAL) << "Unexpected rem type " << type;
2845  }
2846}
2847
2848void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
2849  Primitive::Type type = rem->GetResultType();
2850
2851  switch (type) {
2852    case Primitive::kPrimInt:
2853    case Primitive::kPrimLong: {
2854      GenerateDivRemIntegral(rem);
2855      break;
2856    }
2857
2858    case Primitive::kPrimFloat:
2859    case Primitive::kPrimDouble: {
2860      int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
2861                                                             : QUICK_ENTRY_POINT(pFmod);
2862      codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc(), nullptr);
2863      break;
2864    }
2865
2866    default:
2867      LOG(FATAL) << "Unexpected rem type " << type;
2868  }
2869}
2870
2871void LocationsBuilderARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2872  memory_barrier->SetLocations(nullptr);
2873}
2874
2875void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2876  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
2877}
2878
2879void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
2880  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2881  Primitive::Type return_type = instruction->InputAt(0)->GetType();
2882  locations->SetInAt(0, ARM64ReturnLocation(return_type));
2883}
2884
2885void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
2886  UNUSED(instruction);
2887  codegen_->GenerateFrameExit();
2888}
2889
2890void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
2891  instruction->SetLocations(nullptr);
2892}
2893
2894void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
2895  UNUSED(instruction);
2896  codegen_->GenerateFrameExit();
2897}
2898
2899void LocationsBuilderARM64::VisitShl(HShl* shl) {
2900  HandleShift(shl);
2901}
2902
2903void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) {
2904  HandleShift(shl);
2905}
2906
2907void LocationsBuilderARM64::VisitShr(HShr* shr) {
2908  HandleShift(shr);
2909}
2910
2911void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) {
2912  HandleShift(shr);
2913}
2914
2915void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
2916  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
2917  Primitive::Type field_type = store->InputAt(1)->GetType();
2918  switch (field_type) {
2919    case Primitive::kPrimNot:
2920    case Primitive::kPrimBoolean:
2921    case Primitive::kPrimByte:
2922    case Primitive::kPrimChar:
2923    case Primitive::kPrimShort:
2924    case Primitive::kPrimInt:
2925    case Primitive::kPrimFloat:
2926      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
2927      break;
2928
2929    case Primitive::kPrimLong:
2930    case Primitive::kPrimDouble:
2931      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
2932      break;
2933
2934    default:
2935      LOG(FATAL) << "Unimplemented local type " << field_type;
2936  }
2937}
2938
2939void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
2940  UNUSED(store);
2941}
2942
2943void LocationsBuilderARM64::VisitSub(HSub* instruction) {
2944  HandleBinaryOp(instruction);
2945}
2946
2947void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
2948  HandleBinaryOp(instruction);
2949}
2950
2951void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2952  HandleFieldGet(instruction);
2953}
2954
2955void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
2956  HandleFieldGet(instruction, instruction->GetFieldInfo());
2957}
2958
2959void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
2960  HandleFieldSet(instruction);
2961}
2962
2963void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
2964  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
2965}
2966
2967void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
2968  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
2969}
2970
2971void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
2972  HBasicBlock* block = instruction->GetBlock();
2973  if (block->GetLoopInformation() != nullptr) {
2974    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
2975    // The back edge will generate the suspend check.
2976    return;
2977  }
2978  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
2979    // The goto will generate the suspend check.
2980    return;
2981  }
2982  GenerateSuspendCheck(instruction, nullptr);
2983}
2984
2985void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
2986  temp->SetLocations(nullptr);
2987}
2988
2989void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
2990  // Nothing to do, this is driven by the code generator.
2991  UNUSED(temp);
2992}
2993
2994void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
2995  LocationSummary* locations =
2996      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2997  InvokeRuntimeCallingConvention calling_convention;
2998  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2999}
3000
3001void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
3002  codegen_->InvokeRuntime(
3003      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
3004  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3005}
3006
3007void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
3008  LocationSummary* locations =
3009      new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
3010  Primitive::Type input_type = conversion->GetInputType();
3011  Primitive::Type result_type = conversion->GetResultType();
3012  DCHECK_NE(input_type, result_type);
3013  if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3014      (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3015    LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3016  }
3017
3018  if (Primitive::IsFloatingPointType(input_type)) {
3019    locations->SetInAt(0, Location::RequiresFpuRegister());
3020  } else {
3021    locations->SetInAt(0, Location::RequiresRegister());
3022  }
3023
3024  if (Primitive::IsFloatingPointType(result_type)) {
3025    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3026  } else {
3027    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3028  }
3029}
3030
3031void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) {
3032  Primitive::Type result_type = conversion->GetResultType();
3033  Primitive::Type input_type = conversion->GetInputType();
3034
3035  DCHECK_NE(input_type, result_type);
3036
3037  if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3038    int result_size = Primitive::ComponentSize(result_type);
3039    int input_size = Primitive::ComponentSize(input_type);
3040    int min_size = std::min(result_size, input_size);
3041    Register output = OutputRegister(conversion);
3042    Register source = InputRegisterAt(conversion, 0);
3043    if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) {
3044      __ Ubfx(output, source, 0, result_size * kBitsPerByte);
3045    } else if ((result_type == Primitive::kPrimChar) ||
3046               ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
3047      __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
3048    } else {
3049      __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
3050    }
3051  } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3052    __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
3053  } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3054    CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3055    __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0));
3056  } else if (Primitive::IsFloatingPointType(result_type) &&
3057             Primitive::IsFloatingPointType(input_type)) {
3058    __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0));
3059  } else {
3060    LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3061                << " to " << result_type;
3062  }
3063}
3064
3065void LocationsBuilderARM64::VisitUShr(HUShr* ushr) {
3066  HandleShift(ushr);
3067}
3068
3069void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) {
3070  HandleShift(ushr);
3071}
3072
3073void LocationsBuilderARM64::VisitXor(HXor* instruction) {
3074  HandleBinaryOp(instruction);
3075}
3076
3077void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
3078  HandleBinaryOp(instruction);
3079}
3080
3081void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) {
3082  // Nothing to do, this should be removed during prepare for register allocator.
3083  UNUSED(instruction);
3084  LOG(FATAL) << "Unreachable";
3085}
3086
3087void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) {
3088  // Nothing to do, this should be removed during prepare for register allocator.
3089  UNUSED(instruction);
3090  LOG(FATAL) << "Unreachable";
3091}
3092
3093#undef __
3094#undef QUICK_ENTRY_POINT
3095
3096}  // namespace arm64
3097}  // namespace art
3098