code_generator_arm.cc revision 23a8e35481face09183a24b9d11e505597c75ebb
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "compiled_method.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "gc/accounting/card_table.h"
25#include "intrinsics.h"
26#include "intrinsics_arm.h"
27#include "mirror/array-inl.h"
28#include "mirror/class-inl.h"
29#include "thread.h"
30#include "utils/arm/assembler_arm.h"
31#include "utils/arm/managed_register_arm.h"
32#include "utils/assembler.h"
33#include "utils/stack_checks.h"
34
35namespace art {
36
37namespace arm {
38
39static bool ExpectedPairLayout(Location location) {
40  // We expected this for both core and fpu register pairs.
41  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
42}
43
44static constexpr int kCurrentMethodStackOffset = 0;
45static constexpr Register kMethodRegisterArgument = R0;
46
47// We unconditionally allocate R5 to ensure we can do long operations
48// with baseline.
49static constexpr Register kCoreSavedRegisterForBaseline = R5;
50static constexpr Register kCoreCalleeSaves[] =
51    { R5, R6, R7, R8, R10, R11, LR };
52static constexpr SRegister kFpuCalleeSaves[] =
53    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
54
55// D31 cannot be split into two S registers, and the register allocator only works on
56// S registers. Therefore there is no need to block it.
57static constexpr DRegister DTMP = D31;
58
59#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
60#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
61
62class NullCheckSlowPathARM : public SlowPathCodeARM {
63 public:
64  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
65
66  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
67    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
68    __ Bind(GetEntryLabel());
69    if (instruction_->CanThrowIntoCatchBlock()) {
70      // Live registers will be restored in the catch block if caught.
71      SaveLiveRegisters(codegen, instruction_->GetLocations());
72    }
73    arm_codegen->InvokeRuntime(
74        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
75  }
76
77  bool IsFatal() const OVERRIDE { return true; }
78
79  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
80
81 private:
82  HNullCheck* const instruction_;
83  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
84};
85
86class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
87 public:
88  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
89
90  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
91    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
92    __ Bind(GetEntryLabel());
93    if (instruction_->CanThrowIntoCatchBlock()) {
94      // Live registers will be restored in the catch block if caught.
95      SaveLiveRegisters(codegen, instruction_->GetLocations());
96    }
97    arm_codegen->InvokeRuntime(
98        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
99  }
100
101  bool IsFatal() const OVERRIDE { return true; }
102
103  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
104
105 private:
106  HDivZeroCheck* const instruction_;
107  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
108};
109
110class SuspendCheckSlowPathARM : public SlowPathCodeARM {
111 public:
112  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
113      : instruction_(instruction), successor_(successor) {}
114
115  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
116    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
117    __ Bind(GetEntryLabel());
118    SaveLiveRegisters(codegen, instruction_->GetLocations());
119    arm_codegen->InvokeRuntime(
120        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
121    RestoreLiveRegisters(codegen, instruction_->GetLocations());
122    if (successor_ == nullptr) {
123      __ b(GetReturnLabel());
124    } else {
125      __ b(arm_codegen->GetLabelOf(successor_));
126    }
127  }
128
129  Label* GetReturnLabel() {
130    DCHECK(successor_ == nullptr);
131    return &return_label_;
132  }
133
134  HBasicBlock* GetSuccessor() const {
135    return successor_;
136  }
137
138  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; }
139
140 private:
141  HSuspendCheck* const instruction_;
142  // If not null, the block to branch to after the suspend check.
143  HBasicBlock* const successor_;
144
145  // If `successor_` is null, the label to branch to after the suspend check.
146  Label return_label_;
147
148  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
149};
150
151class BoundsCheckSlowPathARM : public SlowPathCodeARM {
152 public:
153  explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction)
154      : instruction_(instruction) {}
155
156  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
157    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
158    LocationSummary* locations = instruction_->GetLocations();
159
160    __ Bind(GetEntryLabel());
161    if (instruction_->CanThrowIntoCatchBlock()) {
162      // Live registers will be restored in the catch block if caught.
163      SaveLiveRegisters(codegen, instruction_->GetLocations());
164    }
165    // We're moving two locations to locations that could overlap, so we need a parallel
166    // move resolver.
167    InvokeRuntimeCallingConvention calling_convention;
168    codegen->EmitParallelMoves(
169        locations->InAt(0),
170        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
171        Primitive::kPrimInt,
172        locations->InAt(1),
173        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
174        Primitive::kPrimInt);
175    arm_codegen->InvokeRuntime(
176        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
177  }
178
179  bool IsFatal() const OVERRIDE { return true; }
180
181  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
182
183 private:
184  HBoundsCheck* const instruction_;
185
186  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
187};
188
189class LoadClassSlowPathARM : public SlowPathCodeARM {
190 public:
191  LoadClassSlowPathARM(HLoadClass* cls,
192                       HInstruction* at,
193                       uint32_t dex_pc,
194                       bool do_clinit)
195      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
196    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
197  }
198
199  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
200    LocationSummary* locations = at_->GetLocations();
201
202    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
203    __ Bind(GetEntryLabel());
204    SaveLiveRegisters(codegen, locations);
205
206    InvokeRuntimeCallingConvention calling_convention;
207    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
208    int32_t entry_point_offset = do_clinit_
209        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
210        : QUICK_ENTRY_POINT(pInitializeType);
211    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
212
213    // Move the class to the desired location.
214    Location out = locations->Out();
215    if (out.IsValid()) {
216      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
217      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
218    }
219    RestoreLiveRegisters(codegen, locations);
220    __ b(GetExitLabel());
221  }
222
223  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM"; }
224
225 private:
226  // The class this slow path will load.
227  HLoadClass* const cls_;
228
229  // The instruction where this slow path is happening.
230  // (Might be the load class or an initialization check).
231  HInstruction* const at_;
232
233  // The dex PC of `at_`.
234  const uint32_t dex_pc_;
235
236  // Whether to initialize the class.
237  const bool do_clinit_;
238
239  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
240};
241
242class LoadStringSlowPathARM : public SlowPathCodeARM {
243 public:
244  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
245
246  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
247    LocationSummary* locations = instruction_->GetLocations();
248    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
249
250    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
251    __ Bind(GetEntryLabel());
252    SaveLiveRegisters(codegen, locations);
253
254    InvokeRuntimeCallingConvention calling_convention;
255    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
256    arm_codegen->InvokeRuntime(
257        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
258    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
259
260    RestoreLiveRegisters(codegen, locations);
261    __ b(GetExitLabel());
262  }
263
264  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
265
266 private:
267  HLoadString* const instruction_;
268
269  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
270};
271
272class TypeCheckSlowPathARM : public SlowPathCodeARM {
273 public:
274  explicit TypeCheckSlowPathARM(HInstruction* instruction) : instruction_(instruction) {}
275
276  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
277    LocationSummary* locations = instruction_->GetLocations();
278    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
279                                                        : locations->Out();
280    DCHECK(instruction_->IsCheckCast()
281           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
282
283    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
284    __ Bind(GetEntryLabel());
285    SaveLiveRegisters(codegen, locations);
286
287    // We're moving two locations to locations that could overlap, so we need a parallel
288    // move resolver.
289    InvokeRuntimeCallingConvention calling_convention;
290    codegen->EmitParallelMoves(
291        locations->InAt(1),
292        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
293        Primitive::kPrimNot,
294        object_class,
295        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
296        Primitive::kPrimNot);
297
298    if (instruction_->IsInstanceOf()) {
299      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
300                                 instruction_,
301                                 instruction_->GetDexPc(),
302                                 this);
303      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
304    } else {
305      DCHECK(instruction_->IsCheckCast());
306      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
307                                 instruction_,
308                                 instruction_->GetDexPc(),
309                                 this);
310    }
311
312    RestoreLiveRegisters(codegen, locations);
313    __ b(GetExitLabel());
314  }
315
316  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; }
317
318 private:
319  HInstruction* const instruction_;
320
321  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
322};
323
324class DeoptimizationSlowPathARM : public SlowPathCodeARM {
325 public:
326  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
327    : instruction_(instruction) {}
328
329  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
330    __ Bind(GetEntryLabel());
331    SaveLiveRegisters(codegen, instruction_->GetLocations());
332    DCHECK(instruction_->IsDeoptimize());
333    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
334    uint32_t dex_pc = deoptimize->GetDexPc();
335    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
336    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
337  }
338
339  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
340
341 private:
342  HInstruction* const instruction_;
343  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
344};
345
346#undef __
347#define __ down_cast<ArmAssembler*>(GetAssembler())->
348
349inline Condition ARMSignedOrFPCondition(IfCondition cond) {
350  switch (cond) {
351    case kCondEQ: return EQ;
352    case kCondNE: return NE;
353    case kCondLT: return LT;
354    case kCondLE: return LE;
355    case kCondGT: return GT;
356    case kCondGE: return GE;
357  }
358  LOG(FATAL) << "Unreachable";
359  UNREACHABLE();
360}
361
362inline Condition ARMUnsignedCondition(IfCondition cond) {
363  switch (cond) {
364    case kCondEQ: return EQ;
365    case kCondNE: return NE;
366    case kCondLT: return LO;
367    case kCondLE: return LS;
368    case kCondGT: return HI;
369    case kCondGE: return HS;
370  }
371  LOG(FATAL) << "Unreachable";
372  UNREACHABLE();
373}
374
375void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
376  stream << Register(reg);
377}
378
379void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
380  stream << SRegister(reg);
381}
382
383size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
384  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
385  return kArmWordSize;
386}
387
388size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
389  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
390  return kArmWordSize;
391}
392
393size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
394  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
395  return kArmWordSize;
396}
397
398size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
399  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
400  return kArmWordSize;
401}
402
403CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
404                                   const ArmInstructionSetFeatures& isa_features,
405                                   const CompilerOptions& compiler_options,
406                                   OptimizingCompilerStats* stats)
407    : CodeGenerator(graph,
408                    kNumberOfCoreRegisters,
409                    kNumberOfSRegisters,
410                    kNumberOfRegisterPairs,
411                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
412                                        arraysize(kCoreCalleeSaves)),
413                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
414                                        arraysize(kFpuCalleeSaves)),
415                    compiler_options,
416                    stats),
417      block_labels_(graph->GetArena(), 0),
418      location_builder_(graph, this),
419      instruction_visitor_(graph, this),
420      move_resolver_(graph->GetArena(), this),
421      assembler_(),
422      isa_features_(isa_features),
423      method_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
424      call_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
425      relative_call_patches_(graph->GetArena()->Adapter()) {
426  // Always save the LR register to mimic Quick.
427  AddAllocatedRegister(Location::RegisterLocation(LR));
428}
429
430void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
431  // Ensure that we fix up branches and literal loads and emit the literal pool.
432  __ FinalizeCode();
433
434  // Adjust native pc offsets in stack maps.
435  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
436    uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
437    uint32_t new_position = __ GetAdjustedPosition(old_position);
438    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
439  }
440  // Adjust native pc offsets of block labels.
441  for (HBasicBlock* block : *block_order_) {
442    // Get the label directly from block_labels_ rather than through GetLabelOf() to avoid
443    // FirstNonEmptyBlock() which could lead to adjusting a label more than once.
444    DCHECK_LT(static_cast<size_t>(block->GetBlockId()), block_labels_.Size());
445    Label* block_label = &block_labels_.GetRawStorage()[block->GetBlockId()];
446    DCHECK_EQ(block_label->IsBound(), !block->IsSingleJump());
447    if (block_label->IsBound()) {
448      __ AdjustLabelPosition(block_label);
449    }
450  }
451  // Adjust pc offsets for the disassembly information.
452  if (disasm_info_ != nullptr) {
453    GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
454    frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
455    frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
456    for (auto& it : *disasm_info_->GetInstructionIntervals()) {
457      it.second.start = __ GetAdjustedPosition(it.second.start);
458      it.second.end = __ GetAdjustedPosition(it.second.end);
459    }
460    for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
461      it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
462      it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
463    }
464  }
465  // Adjust pc offsets for relative call patches.
466  for (MethodPatchInfo<Label>& info : relative_call_patches_) {
467    __ AdjustLabelPosition(&info.label);
468  }
469
470  CodeGenerator::Finalize(allocator);
471}
472
473Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
474  switch (type) {
475    case Primitive::kPrimLong: {
476      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
477      ArmManagedRegister pair =
478          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
479      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
480      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
481
482      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
483      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
484      UpdateBlockedPairRegisters();
485      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
486    }
487
488    case Primitive::kPrimByte:
489    case Primitive::kPrimBoolean:
490    case Primitive::kPrimChar:
491    case Primitive::kPrimShort:
492    case Primitive::kPrimInt:
493    case Primitive::kPrimNot: {
494      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
495      // Block all register pairs that contain `reg`.
496      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
497        ArmManagedRegister current =
498            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
499        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
500          blocked_register_pairs_[i] = true;
501        }
502      }
503      return Location::RegisterLocation(reg);
504    }
505
506    case Primitive::kPrimFloat: {
507      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
508      return Location::FpuRegisterLocation(reg);
509    }
510
511    case Primitive::kPrimDouble: {
512      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
513      DCHECK_EQ(reg % 2, 0);
514      return Location::FpuRegisterPairLocation(reg, reg + 1);
515    }
516
517    case Primitive::kPrimVoid:
518      LOG(FATAL) << "Unreachable type " << type;
519  }
520
521  return Location();
522}
523
524void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
525  // Don't allocate the dalvik style register pair passing.
526  blocked_register_pairs_[R1_R2] = true;
527
528  // Stack register, LR and PC are always reserved.
529  blocked_core_registers_[SP] = true;
530  blocked_core_registers_[LR] = true;
531  blocked_core_registers_[PC] = true;
532
533  // Reserve thread register.
534  blocked_core_registers_[TR] = true;
535
536  // Reserve temp register.
537  blocked_core_registers_[IP] = true;
538
539  if (is_baseline) {
540    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
541      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
542    }
543
544    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
545
546    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
547      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
548    }
549  }
550
551  UpdateBlockedPairRegisters();
552}
553
554void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
555  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
556    ArmManagedRegister current =
557        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
558    if (blocked_core_registers_[current.AsRegisterPairLow()]
559        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
560      blocked_register_pairs_[i] = true;
561    }
562  }
563}
564
565InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
566      : HGraphVisitor(graph),
567        assembler_(codegen->GetAssembler()),
568        codegen_(codegen) {}
569
570void CodeGeneratorARM::ComputeSpillMask() {
571  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
572  // Save one extra register for baseline. Note that on thumb2, there is no easy
573  // instruction to restore just the PC, so this actually helps both baseline
574  // and non-baseline to save and restore at least two registers at entry and exit.
575  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
576  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
577  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
578  // We use vpush and vpop for saving and restoring floating point registers, which take
579  // a SRegister and the number of registers to save/restore after that SRegister. We
580  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
581  // but in the range.
582  if (fpu_spill_mask_ != 0) {
583    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
584    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
585    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
586      fpu_spill_mask_ |= (1 << i);
587    }
588  }
589}
590
591static dwarf::Reg DWARFReg(Register reg) {
592  return dwarf::Reg::ArmCore(static_cast<int>(reg));
593}
594
595static dwarf::Reg DWARFReg(SRegister reg) {
596  return dwarf::Reg::ArmFp(static_cast<int>(reg));
597}
598
599void CodeGeneratorARM::GenerateFrameEntry() {
600  bool skip_overflow_check =
601      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
602  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
603  __ Bind(&frame_entry_label_);
604
605  if (HasEmptyFrame()) {
606    return;
607  }
608
609  if (!skip_overflow_check) {
610    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
611    __ LoadFromOffset(kLoadWord, IP, IP, 0);
612    RecordPcInfo(nullptr, 0);
613  }
614
615  __ PushList(core_spill_mask_);
616  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
617  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, core_spill_mask_, kArmWordSize);
618  if (fpu_spill_mask_ != 0) {
619    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
620    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
621    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
622    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
623  }
624  int adjust = GetFrameSize() - FrameEntrySpillSize();
625  __ AddConstant(SP, -adjust);
626  __ cfi().AdjustCFAOffset(adjust);
627  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
628}
629
630void CodeGeneratorARM::GenerateFrameExit() {
631  if (HasEmptyFrame()) {
632    __ bx(LR);
633    return;
634  }
635  __ cfi().RememberState();
636  int adjust = GetFrameSize() - FrameEntrySpillSize();
637  __ AddConstant(SP, adjust);
638  __ cfi().AdjustCFAOffset(-adjust);
639  if (fpu_spill_mask_ != 0) {
640    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
641    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
642    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
643    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
644  }
645  // Pop LR into PC to return.
646  DCHECK_NE(core_spill_mask_ & (1 << LR), 0U);
647  uint32_t pop_mask = (core_spill_mask_ & (~(1 << LR))) | 1 << PC;
648  __ PopList(pop_mask);
649  __ cfi().RestoreState();
650  __ cfi().DefCFAOffset(GetFrameSize());
651}
652
653void CodeGeneratorARM::Bind(HBasicBlock* block) {
654  __ Bind(GetLabelOf(block));
655}
656
657Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
658  switch (load->GetType()) {
659    case Primitive::kPrimLong:
660    case Primitive::kPrimDouble:
661      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
662
663    case Primitive::kPrimInt:
664    case Primitive::kPrimNot:
665    case Primitive::kPrimFloat:
666      return Location::StackSlot(GetStackSlot(load->GetLocal()));
667
668    case Primitive::kPrimBoolean:
669    case Primitive::kPrimByte:
670    case Primitive::kPrimChar:
671    case Primitive::kPrimShort:
672    case Primitive::kPrimVoid:
673      LOG(FATAL) << "Unexpected type " << load->GetType();
674      UNREACHABLE();
675  }
676
677  LOG(FATAL) << "Unreachable";
678  UNREACHABLE();
679}
680
681Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
682  switch (type) {
683    case Primitive::kPrimBoolean:
684    case Primitive::kPrimByte:
685    case Primitive::kPrimChar:
686    case Primitive::kPrimShort:
687    case Primitive::kPrimInt:
688    case Primitive::kPrimNot: {
689      uint32_t index = gp_index_++;
690      uint32_t stack_index = stack_index_++;
691      if (index < calling_convention.GetNumberOfRegisters()) {
692        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
693      } else {
694        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
695      }
696    }
697
698    case Primitive::kPrimLong: {
699      uint32_t index = gp_index_;
700      uint32_t stack_index = stack_index_;
701      gp_index_ += 2;
702      stack_index_ += 2;
703      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
704        if (calling_convention.GetRegisterAt(index) == R1) {
705          // Skip R1, and use R2_R3 instead.
706          gp_index_++;
707          index++;
708        }
709      }
710      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
711        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
712                  calling_convention.GetRegisterAt(index + 1));
713
714        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
715                                              calling_convention.GetRegisterAt(index + 1));
716      } else {
717        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
718      }
719    }
720
721    case Primitive::kPrimFloat: {
722      uint32_t stack_index = stack_index_++;
723      if (float_index_ % 2 == 0) {
724        float_index_ = std::max(double_index_, float_index_);
725      }
726      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
727        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
728      } else {
729        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
730      }
731    }
732
733    case Primitive::kPrimDouble: {
734      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
735      uint32_t stack_index = stack_index_;
736      stack_index_ += 2;
737      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
738        uint32_t index = double_index_;
739        double_index_ += 2;
740        Location result = Location::FpuRegisterPairLocation(
741          calling_convention.GetFpuRegisterAt(index),
742          calling_convention.GetFpuRegisterAt(index + 1));
743        DCHECK(ExpectedPairLayout(result));
744        return result;
745      } else {
746        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
747      }
748    }
749
750    case Primitive::kPrimVoid:
751      LOG(FATAL) << "Unexpected parameter type " << type;
752      break;
753  }
754  return Location();
755}
756
757Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
758  switch (type) {
759    case Primitive::kPrimBoolean:
760    case Primitive::kPrimByte:
761    case Primitive::kPrimChar:
762    case Primitive::kPrimShort:
763    case Primitive::kPrimInt:
764    case Primitive::kPrimNot: {
765      return Location::RegisterLocation(R0);
766    }
767
768    case Primitive::kPrimFloat: {
769      return Location::FpuRegisterLocation(S0);
770    }
771
772    case Primitive::kPrimLong: {
773      return Location::RegisterPairLocation(R0, R1);
774    }
775
776    case Primitive::kPrimDouble: {
777      return Location::FpuRegisterPairLocation(S0, S1);
778    }
779
780    case Primitive::kPrimVoid:
781      return Location();
782  }
783
784  UNREACHABLE();
785}
786
787Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
788  return Location::RegisterLocation(kMethodRegisterArgument);
789}
790
791void CodeGeneratorARM::Move32(Location destination, Location source) {
792  if (source.Equals(destination)) {
793    return;
794  }
795  if (destination.IsRegister()) {
796    if (source.IsRegister()) {
797      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
798    } else if (source.IsFpuRegister()) {
799      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
800    } else {
801      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
802    }
803  } else if (destination.IsFpuRegister()) {
804    if (source.IsRegister()) {
805      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
806    } else if (source.IsFpuRegister()) {
807      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
808    } else {
809      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
810    }
811  } else {
812    DCHECK(destination.IsStackSlot()) << destination;
813    if (source.IsRegister()) {
814      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
815    } else if (source.IsFpuRegister()) {
816      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
817    } else {
818      DCHECK(source.IsStackSlot()) << source;
819      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
820      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
821    }
822  }
823}
824
825void CodeGeneratorARM::Move64(Location destination, Location source) {
826  if (source.Equals(destination)) {
827    return;
828  }
829  if (destination.IsRegisterPair()) {
830    if (source.IsRegisterPair()) {
831      EmitParallelMoves(
832          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
833          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
834          Primitive::kPrimInt,
835          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
836          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
837          Primitive::kPrimInt);
838    } else if (source.IsFpuRegister()) {
839      UNIMPLEMENTED(FATAL);
840    } else {
841      DCHECK(source.IsDoubleStackSlot());
842      DCHECK(ExpectedPairLayout(destination));
843      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
844                        SP, source.GetStackIndex());
845    }
846  } else if (destination.IsFpuRegisterPair()) {
847    if (source.IsDoubleStackSlot()) {
848      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
849                         SP,
850                         source.GetStackIndex());
851    } else {
852      UNIMPLEMENTED(FATAL);
853    }
854  } else {
855    DCHECK(destination.IsDoubleStackSlot());
856    if (source.IsRegisterPair()) {
857      // No conflict possible, so just do the moves.
858      if (source.AsRegisterPairLow<Register>() == R1) {
859        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
860        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
861        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
862      } else {
863        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
864                         SP, destination.GetStackIndex());
865      }
866    } else if (source.IsFpuRegisterPair()) {
867      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
868                        SP,
869                        destination.GetStackIndex());
870    } else {
871      DCHECK(source.IsDoubleStackSlot());
872      EmitParallelMoves(
873          Location::StackSlot(source.GetStackIndex()),
874          Location::StackSlot(destination.GetStackIndex()),
875          Primitive::kPrimInt,
876          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
877          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
878          Primitive::kPrimInt);
879    }
880  }
881}
882
883void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
884  LocationSummary* locations = instruction->GetLocations();
885  if (instruction->IsCurrentMethod()) {
886    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
887  } else if (locations != nullptr && locations->Out().Equals(location)) {
888    return;
889  } else if (locations != nullptr && locations->Out().IsConstant()) {
890    HConstant* const_to_move = locations->Out().GetConstant();
891    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
892      int32_t value = GetInt32ValueOf(const_to_move);
893      if (location.IsRegister()) {
894        __ LoadImmediate(location.AsRegister<Register>(), value);
895      } else {
896        DCHECK(location.IsStackSlot());
897        __ LoadImmediate(IP, value);
898        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
899      }
900    } else {
901      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
902      int64_t value = const_to_move->AsLongConstant()->GetValue();
903      if (location.IsRegisterPair()) {
904        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
905        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
906      } else {
907        DCHECK(location.IsDoubleStackSlot());
908        __ LoadImmediate(IP, Low32Bits(value));
909        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
910        __ LoadImmediate(IP, High32Bits(value));
911        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
912      }
913    }
914  } else if (instruction->IsLoadLocal()) {
915    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
916    switch (instruction->GetType()) {
917      case Primitive::kPrimBoolean:
918      case Primitive::kPrimByte:
919      case Primitive::kPrimChar:
920      case Primitive::kPrimShort:
921      case Primitive::kPrimInt:
922      case Primitive::kPrimNot:
923      case Primitive::kPrimFloat:
924        Move32(location, Location::StackSlot(stack_slot));
925        break;
926
927      case Primitive::kPrimLong:
928      case Primitive::kPrimDouble:
929        Move64(location, Location::DoubleStackSlot(stack_slot));
930        break;
931
932      default:
933        LOG(FATAL) << "Unexpected type " << instruction->GetType();
934    }
935  } else if (instruction->IsTemporary()) {
936    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
937    if (temp_location.IsStackSlot()) {
938      Move32(location, temp_location);
939    } else {
940      DCHECK(temp_location.IsDoubleStackSlot());
941      Move64(location, temp_location);
942    }
943  } else {
944    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
945    switch (instruction->GetType()) {
946      case Primitive::kPrimBoolean:
947      case Primitive::kPrimByte:
948      case Primitive::kPrimChar:
949      case Primitive::kPrimShort:
950      case Primitive::kPrimNot:
951      case Primitive::kPrimInt:
952      case Primitive::kPrimFloat:
953        Move32(location, locations->Out());
954        break;
955
956      case Primitive::kPrimLong:
957      case Primitive::kPrimDouble:
958        Move64(location, locations->Out());
959        break;
960
961      default:
962        LOG(FATAL) << "Unexpected type " << instruction->GetType();
963    }
964  }
965}
966
967void CodeGeneratorARM::MoveConstant(Location location, int32_t value) {
968  DCHECK(location.IsRegister());
969  __ LoadImmediate(location.AsRegister<Register>(), value);
970}
971
972void CodeGeneratorARM::AddLocationAsTemp(Location location, LocationSummary* locations) {
973  if (location.IsRegister()) {
974    locations->AddTemp(location);
975  } else if (location.IsRegisterPair()) {
976    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
977    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
978  } else {
979    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
980  }
981}
982
983void CodeGeneratorARM::MoveLocationToTemp(Location source,
984                                          const LocationSummary& locations,
985                                          int temp_index,
986                                          Primitive::Type type) {
987  if (!Primitive::IsFloatingPointType(type)) {
988    UNIMPLEMENTED(FATAL) << "MoveLocationToTemp not implemented for type " << type;
989  }
990
991  if (type == Primitive::kPrimFloat) {
992    DCHECK(source.IsFpuRegister()) << source;
993    __ vmovrs(locations.GetTemp(temp_index).AsRegister<Register>(),
994              source.AsFpuRegister<SRegister>());
995  } else {
996    DCHECK_EQ(type, Primitive::kPrimDouble) << type;
997    DCHECK(source.IsFpuRegisterPair()) << source;
998    __ vmovrrd(locations.GetTemp(temp_index).AsRegister<Register>(),
999               locations.GetTemp(temp_index + 1).AsRegister<Register>(),
1000               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
1001  }
1002}
1003
1004void CodeGeneratorARM::MoveTempToLocation(const LocationSummary& locations,
1005                                          int temp_index,
1006                                          Location destination,
1007                                          Primitive::Type type) {
1008  if (!Primitive::IsFloatingPointType(type)) {
1009    UNIMPLEMENTED(FATAL) << "MoveLocationToTemp not implemented for type " << type;
1010  }
1011
1012  if (type == Primitive::kPrimFloat) {
1013    DCHECK(destination.IsFpuRegister()) << destination;
1014    __ vmovsr(destination.AsFpuRegister<SRegister>(),
1015              locations.GetTemp(temp_index).AsRegister<Register>());
1016  } else {
1017    DCHECK(type == Primitive::kPrimDouble);
1018    DCHECK(destination.IsFpuRegisterPair()) << destination;
1019    __ vmovdrr(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
1020               locations.GetTemp(temp_index).AsRegister<Register>(),
1021               locations.GetTemp(temp_index + 1).AsRegister<Register>());
1022  }
1023}
1024
1025void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
1026                                     HInstruction* instruction,
1027                                     uint32_t dex_pc,
1028                                     SlowPathCode* slow_path) {
1029  InvokeRuntime(GetThreadOffset<kArmWordSize>(entrypoint).Int32Value(),
1030                instruction,
1031                dex_pc,
1032                slow_path);
1033}
1034
1035void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
1036                                     HInstruction* instruction,
1037                                     uint32_t dex_pc,
1038                                     SlowPathCode* slow_path) {
1039  ValidateInvokeRuntime(instruction, slow_path);
1040  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
1041  __ blx(LR);
1042  RecordPcInfo(instruction, dex_pc, slow_path);
1043}
1044
1045void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) {
1046  DCHECK(!successor->IsExitBlock());
1047
1048  HBasicBlock* block = got->GetBlock();
1049  HInstruction* previous = got->GetPrevious();
1050
1051  HLoopInformation* info = block->GetLoopInformation();
1052  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1053    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1054    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1055    return;
1056  }
1057
1058  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1059    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1060  }
1061  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
1062    __ b(codegen_->GetLabelOf(successor));
1063  }
1064}
1065
1066void LocationsBuilderARM::VisitGoto(HGoto* got) {
1067  got->SetLocations(nullptr);
1068}
1069
1070void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
1071  HandleGoto(got, got->GetSuccessor());
1072}
1073
1074void LocationsBuilderARM::VisitTryBoundary(HTryBoundary* try_boundary) {
1075  try_boundary->SetLocations(nullptr);
1076}
1077
1078void InstructionCodeGeneratorARM::VisitTryBoundary(HTryBoundary* try_boundary) {
1079  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1080  if (!successor->IsExitBlock()) {
1081    HandleGoto(try_boundary, successor);
1082  }
1083}
1084
1085void LocationsBuilderARM::VisitExit(HExit* exit) {
1086  exit->SetLocations(nullptr);
1087}
1088
1089void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
1090  UNUSED(exit);
1091}
1092
1093void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
1094  ShifterOperand operand;
1095  if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, right, &operand)) {
1096    __ cmp(left, operand);
1097  } else {
1098    Register temp = IP;
1099    __ LoadImmediate(temp, right);
1100    __ cmp(left, ShifterOperand(temp));
1101  }
1102}
1103
1104void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
1105                                                  Label* true_label,
1106                                                  Label* false_label) {
1107  __ vmstat();  // transfer FP status register to ARM APSR.
1108  if (cond->IsFPConditionTrueIfNaN()) {
1109    __ b(true_label, VS);  // VS for unordered.
1110  } else if (cond->IsFPConditionFalseIfNaN()) {
1111    __ b(false_label, VS);  // VS for unordered.
1112  }
1113  __ b(true_label, ARMSignedOrFPCondition(cond->GetCondition()));
1114}
1115
1116void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
1117                                                               Label* true_label,
1118                                                               Label* false_label) {
1119  LocationSummary* locations = cond->GetLocations();
1120  Location left = locations->InAt(0);
1121  Location right = locations->InAt(1);
1122  IfCondition if_cond = cond->GetCondition();
1123
1124  Register left_high = left.AsRegisterPairHigh<Register>();
1125  Register left_low = left.AsRegisterPairLow<Register>();
1126  IfCondition true_high_cond = if_cond;
1127  IfCondition false_high_cond = cond->GetOppositeCondition();
1128  Condition final_condition = ARMUnsignedCondition(if_cond);
1129
1130  // Set the conditions for the test, remembering that == needs to be
1131  // decided using the low words.
1132  switch (if_cond) {
1133    case kCondEQ:
1134    case kCondNE:
1135      // Nothing to do.
1136      break;
1137    case kCondLT:
1138      false_high_cond = kCondGT;
1139      break;
1140    case kCondLE:
1141      true_high_cond = kCondLT;
1142      break;
1143    case kCondGT:
1144      false_high_cond = kCondLT;
1145      break;
1146    case kCondGE:
1147      true_high_cond = kCondGT;
1148      break;
1149  }
1150  if (right.IsConstant()) {
1151    int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
1152    int32_t val_low = Low32Bits(value);
1153    int32_t val_high = High32Bits(value);
1154
1155    GenerateCompareWithImmediate(left_high, val_high);
1156    if (if_cond == kCondNE) {
1157      __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
1158    } else if (if_cond == kCondEQ) {
1159      __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
1160    } else {
1161      __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
1162      __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
1163    }
1164    // Must be equal high, so compare the lows.
1165    GenerateCompareWithImmediate(left_low, val_low);
1166  } else {
1167    Register right_high = right.AsRegisterPairHigh<Register>();
1168    Register right_low = right.AsRegisterPairLow<Register>();
1169
1170    __ cmp(left_high, ShifterOperand(right_high));
1171    if (if_cond == kCondNE) {
1172      __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
1173    } else if (if_cond == kCondEQ) {
1174      __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
1175    } else {
1176      __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
1177      __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
1178    }
1179    // Must be equal high, so compare the lows.
1180    __ cmp(left_low, ShifterOperand(right_low));
1181  }
1182  // The last comparison might be unsigned.
1183  __ b(true_label, final_condition);
1184}
1185
1186void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HIf* if_instr,
1187                                                               HCondition* condition,
1188                                                               Label* true_target,
1189                                                               Label* false_target,
1190                                                               Label* always_true_target) {
1191  LocationSummary* locations = condition->GetLocations();
1192  Location left = locations->InAt(0);
1193  Location right = locations->InAt(1);
1194
1195  // We don't want true_target as a nullptr.
1196  if (true_target == nullptr) {
1197    true_target = always_true_target;
1198  }
1199  bool falls_through = (false_target == nullptr);
1200
1201  // FP compares don't like null false_targets.
1202  if (false_target == nullptr) {
1203    false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1204  }
1205
1206  Primitive::Type type = condition->InputAt(0)->GetType();
1207  switch (type) {
1208    case Primitive::kPrimLong:
1209      GenerateLongComparesAndJumps(condition, true_target, false_target);
1210      break;
1211    case Primitive::kPrimFloat:
1212      __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
1213      GenerateFPJumps(condition, true_target, false_target);
1214      break;
1215    case Primitive::kPrimDouble:
1216      __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
1217               FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
1218      GenerateFPJumps(condition, true_target, false_target);
1219      break;
1220    default:
1221      LOG(FATAL) << "Unexpected compare type " << type;
1222  }
1223
1224  if (!falls_through) {
1225    __ b(false_target);
1226  }
1227}
1228
1229void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
1230                                                        Label* true_target,
1231                                                        Label* false_target,
1232                                                        Label* always_true_target) {
1233  HInstruction* cond = instruction->InputAt(0);
1234  if (cond->IsIntConstant()) {
1235    // Constant condition, statically compared against 1.
1236    int32_t cond_value = cond->AsIntConstant()->GetValue();
1237    if (cond_value == 1) {
1238      if (always_true_target != nullptr) {
1239        __ b(always_true_target);
1240      }
1241      return;
1242    } else {
1243      DCHECK_EQ(cond_value, 0);
1244    }
1245  } else {
1246    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1247      // Condition has been materialized, compare the output to 0
1248      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
1249      __ CompareAndBranchIfNonZero(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
1250                                   true_target);
1251    } else {
1252      // Condition has not been materialized, use its inputs as the
1253      // comparison and its condition as the branch condition.
1254      Primitive::Type type =
1255          cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
1256      // Is this a long or FP comparison that has been folded into the HCondition?
1257      if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
1258        // Generate the comparison directly.
1259        GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(),
1260                                     true_target, false_target, always_true_target);
1261        return;
1262      }
1263
1264      LocationSummary* locations = cond->GetLocations();
1265      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
1266      Register left = locations->InAt(0).AsRegister<Register>();
1267      Location right = locations->InAt(1);
1268      if (right.IsRegister()) {
1269        __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
1270      } else {
1271        DCHECK(right.IsConstant());
1272        GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
1273      }
1274      __ b(true_target, ARMSignedOrFPCondition(cond->AsCondition()->GetCondition()));
1275    }
1276  }
1277  if (false_target != nullptr) {
1278    __ b(false_target);
1279  }
1280}
1281
1282void LocationsBuilderARM::VisitIf(HIf* if_instr) {
1283  LocationSummary* locations =
1284      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
1285  HInstruction* cond = if_instr->InputAt(0);
1286  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1287    locations->SetInAt(0, Location::RequiresRegister());
1288  }
1289}
1290
1291void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1292  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1293  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1294  Label* always_true_target = true_target;
1295  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1296                                if_instr->IfTrueSuccessor())) {
1297    always_true_target = nullptr;
1298  }
1299  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1300                                if_instr->IfFalseSuccessor())) {
1301    false_target = nullptr;
1302  }
1303  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1304}
1305
1306void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1307  LocationSummary* locations = new (GetGraph()->GetArena())
1308      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1309  HInstruction* cond = deoptimize->InputAt(0);
1310  DCHECK(cond->IsCondition());
1311  if (cond->AsCondition()->NeedsMaterialization()) {
1312    locations->SetInAt(0, Location::RequiresRegister());
1313  }
1314}
1315
1316void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1317  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
1318      DeoptimizationSlowPathARM(deoptimize);
1319  codegen_->AddSlowPath(slow_path);
1320  Label* slow_path_entry = slow_path->GetEntryLabel();
1321  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1322}
1323
1324void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1325  LocationSummary* locations =
1326      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1327  // Handle the long/FP comparisons made in instruction simplification.
1328  switch (cond->InputAt(0)->GetType()) {
1329    case Primitive::kPrimLong:
1330      locations->SetInAt(0, Location::RequiresRegister());
1331      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1332      if (cond->NeedsMaterialization()) {
1333        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1334      }
1335      break;
1336
1337    case Primitive::kPrimFloat:
1338    case Primitive::kPrimDouble:
1339      locations->SetInAt(0, Location::RequiresFpuRegister());
1340      locations->SetInAt(1, Location::RequiresFpuRegister());
1341      if (cond->NeedsMaterialization()) {
1342        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1343      }
1344      break;
1345
1346    default:
1347      locations->SetInAt(0, Location::RequiresRegister());
1348      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1349      if (cond->NeedsMaterialization()) {
1350        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1351      }
1352  }
1353}
1354
1355void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1356  if (!cond->NeedsMaterialization()) {
1357    return;
1358  }
1359
1360  LocationSummary* locations = cond->GetLocations();
1361  Location left = locations->InAt(0);
1362  Location right = locations->InAt(1);
1363  Register out = locations->Out().AsRegister<Register>();
1364  Label true_label, false_label;
1365
1366  switch (cond->InputAt(0)->GetType()) {
1367    default: {
1368      // Integer case.
1369      if (right.IsRegister()) {
1370        __ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
1371      } else {
1372        DCHECK(right.IsConstant());
1373        GenerateCompareWithImmediate(left.AsRegister<Register>(),
1374                                     CodeGenerator::GetInt32ValueOf(right.GetConstant()));
1375      }
1376      __ it(ARMSignedOrFPCondition(cond->GetCondition()), kItElse);
1377      __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1378             ARMSignedOrFPCondition(cond->GetCondition()));
1379      __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1380             ARMSignedOrFPCondition(cond->GetOppositeCondition()));
1381      return;
1382    }
1383    case Primitive::kPrimLong:
1384      GenerateLongComparesAndJumps(cond, &true_label, &false_label);
1385      break;
1386    case Primitive::kPrimFloat:
1387      __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
1388      GenerateFPJumps(cond, &true_label, &false_label);
1389      break;
1390    case Primitive::kPrimDouble:
1391      __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
1392               FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
1393      GenerateFPJumps(cond, &true_label, &false_label);
1394      break;
1395  }
1396
1397  // Convert the jumps into the result.
1398  Label done_label;
1399
1400  // False case: result = 0.
1401  __ Bind(&false_label);
1402  __ LoadImmediate(out, 0);
1403  __ b(&done_label);
1404
1405  // True case: result = 1.
1406  __ Bind(&true_label);
1407  __ LoadImmediate(out, 1);
1408  __ Bind(&done_label);
1409}
1410
1411void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1412  VisitCondition(comp);
1413}
1414
1415void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1416  VisitCondition(comp);
1417}
1418
1419void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1420  VisitCondition(comp);
1421}
1422
1423void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1424  VisitCondition(comp);
1425}
1426
1427void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1428  VisitCondition(comp);
1429}
1430
1431void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1432  VisitCondition(comp);
1433}
1434
1435void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1436  VisitCondition(comp);
1437}
1438
1439void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1440  VisitCondition(comp);
1441}
1442
1443void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1444  VisitCondition(comp);
1445}
1446
1447void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1448  VisitCondition(comp);
1449}
1450
1451void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1452  VisitCondition(comp);
1453}
1454
1455void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1456  VisitCondition(comp);
1457}
1458
1459void LocationsBuilderARM::VisitLocal(HLocal* local) {
1460  local->SetLocations(nullptr);
1461}
1462
1463void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1464  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1465}
1466
1467void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1468  load->SetLocations(nullptr);
1469}
1470
1471void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1472  // Nothing to do, this is driven by the code generator.
1473  UNUSED(load);
1474}
1475
1476void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1477  LocationSummary* locations =
1478      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1479  switch (store->InputAt(1)->GetType()) {
1480    case Primitive::kPrimBoolean:
1481    case Primitive::kPrimByte:
1482    case Primitive::kPrimChar:
1483    case Primitive::kPrimShort:
1484    case Primitive::kPrimInt:
1485    case Primitive::kPrimNot:
1486    case Primitive::kPrimFloat:
1487      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1488      break;
1489
1490    case Primitive::kPrimLong:
1491    case Primitive::kPrimDouble:
1492      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1493      break;
1494
1495    default:
1496      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1497  }
1498}
1499
1500void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1501  UNUSED(store);
1502}
1503
1504void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1505  LocationSummary* locations =
1506      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1507  locations->SetOut(Location::ConstantLocation(constant));
1508}
1509
1510void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1511  // Will be generated at use site.
1512  UNUSED(constant);
1513}
1514
1515void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1516  LocationSummary* locations =
1517      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1518  locations->SetOut(Location::ConstantLocation(constant));
1519}
1520
1521void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1522  // Will be generated at use site.
1523  UNUSED(constant);
1524}
1525
1526void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1527  LocationSummary* locations =
1528      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1529  locations->SetOut(Location::ConstantLocation(constant));
1530}
1531
1532void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1533  // Will be generated at use site.
1534  UNUSED(constant);
1535}
1536
1537void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1538  LocationSummary* locations =
1539      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1540  locations->SetOut(Location::ConstantLocation(constant));
1541}
1542
1543void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1544  // Will be generated at use site.
1545  UNUSED(constant);
1546}
1547
1548void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1549  LocationSummary* locations =
1550      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1551  locations->SetOut(Location::ConstantLocation(constant));
1552}
1553
1554void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1555  // Will be generated at use site.
1556  UNUSED(constant);
1557}
1558
1559void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1560  memory_barrier->SetLocations(nullptr);
1561}
1562
1563void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1564  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1565}
1566
1567void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1568  ret->SetLocations(nullptr);
1569}
1570
1571void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1572  UNUSED(ret);
1573  codegen_->GenerateFrameExit();
1574}
1575
1576void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1577  LocationSummary* locations =
1578      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1579  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1580}
1581
1582void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1583  UNUSED(ret);
1584  codegen_->GenerateFrameExit();
1585}
1586
1587void LocationsBuilderARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
1588  // The trampoline uses the same calling convention as dex calling conventions,
1589  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
1590  // the method_idx.
1591  HandleInvoke(invoke);
1592}
1593
1594void InstructionCodeGeneratorARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
1595  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
1596}
1597
1598void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1599  // When we do not run baseline, explicit clinit checks triggered by static
1600  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1601  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1602
1603  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1604                                         codegen_->GetInstructionSetFeatures());
1605  if (intrinsic.TryDispatch(invoke)) {
1606    return;
1607  }
1608
1609  HandleInvoke(invoke);
1610}
1611
1612static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1613  if (invoke->GetLocations()->Intrinsified()) {
1614    IntrinsicCodeGeneratorARM intrinsic(codegen);
1615    intrinsic.Dispatch(invoke);
1616    return true;
1617  }
1618  return false;
1619}
1620
1621void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1622  // When we do not run baseline, explicit clinit checks triggered by static
1623  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1624  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1625
1626  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1627    return;
1628  }
1629
1630  LocationSummary* locations = invoke->GetLocations();
1631  codegen_->GenerateStaticOrDirectCall(
1632      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
1633  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1634}
1635
1636void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1637  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1638  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
1639}
1640
1641void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1642  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1643                                         codegen_->GetInstructionSetFeatures());
1644  if (intrinsic.TryDispatch(invoke)) {
1645    return;
1646  }
1647
1648  HandleInvoke(invoke);
1649}
1650
1651void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1652  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1653    return;
1654  }
1655
1656  codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
1657  DCHECK(!codegen_->IsLeafMethod());
1658  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1659}
1660
1661void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1662  HandleInvoke(invoke);
1663  // Add the hidden argument.
1664  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1665}
1666
1667void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1668  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1669  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1670  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1671      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1672  LocationSummary* locations = invoke->GetLocations();
1673  Location receiver = locations->InAt(0);
1674  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1675
1676  // Set the hidden argument.
1677  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1678                   invoke->GetDexMethodIndex());
1679
1680  // temp = object->GetClass();
1681  if (receiver.IsStackSlot()) {
1682    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1683    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1684  } else {
1685    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1686  }
1687  codegen_->MaybeRecordImplicitNullCheck(invoke);
1688  __ MaybeUnpoisonHeapReference(temp);
1689  // temp = temp->GetImtEntryAt(method_offset);
1690  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1691      kArmWordSize).Int32Value();
1692  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1693  // LR = temp->GetEntryPoint();
1694  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1695  // LR();
1696  __ blx(LR);
1697  DCHECK(!codegen_->IsLeafMethod());
1698  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1699}
1700
1701void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1702  LocationSummary* locations =
1703      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1704  switch (neg->GetResultType()) {
1705    case Primitive::kPrimInt: {
1706      locations->SetInAt(0, Location::RequiresRegister());
1707      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1708      break;
1709    }
1710    case Primitive::kPrimLong: {
1711      locations->SetInAt(0, Location::RequiresRegister());
1712      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1713      break;
1714    }
1715
1716    case Primitive::kPrimFloat:
1717    case Primitive::kPrimDouble:
1718      locations->SetInAt(0, Location::RequiresFpuRegister());
1719      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1720      break;
1721
1722    default:
1723      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1724  }
1725}
1726
1727void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1728  LocationSummary* locations = neg->GetLocations();
1729  Location out = locations->Out();
1730  Location in = locations->InAt(0);
1731  switch (neg->GetResultType()) {
1732    case Primitive::kPrimInt:
1733      DCHECK(in.IsRegister());
1734      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1735      break;
1736
1737    case Primitive::kPrimLong:
1738      DCHECK(in.IsRegisterPair());
1739      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1740      __ rsbs(out.AsRegisterPairLow<Register>(),
1741              in.AsRegisterPairLow<Register>(),
1742              ShifterOperand(0));
1743      // We cannot emit an RSC (Reverse Subtract with Carry)
1744      // instruction here, as it does not exist in the Thumb-2
1745      // instruction set.  We use the following approach
1746      // using SBC and SUB instead.
1747      //
1748      // out.hi = -C
1749      __ sbc(out.AsRegisterPairHigh<Register>(),
1750             out.AsRegisterPairHigh<Register>(),
1751             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1752      // out.hi = out.hi - in.hi
1753      __ sub(out.AsRegisterPairHigh<Register>(),
1754             out.AsRegisterPairHigh<Register>(),
1755             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1756      break;
1757
1758    case Primitive::kPrimFloat:
1759      DCHECK(in.IsFpuRegister());
1760      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1761      break;
1762
1763    case Primitive::kPrimDouble:
1764      DCHECK(in.IsFpuRegisterPair());
1765      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1766               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1767      break;
1768
1769    default:
1770      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1771  }
1772}
1773
1774void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1775  Primitive::Type result_type = conversion->GetResultType();
1776  Primitive::Type input_type = conversion->GetInputType();
1777  DCHECK_NE(result_type, input_type);
1778
1779  // The float-to-long, double-to-long and long-to-float type conversions
1780  // rely on a call to the runtime.
1781  LocationSummary::CallKind call_kind =
1782      (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1783        && result_type == Primitive::kPrimLong)
1784       || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
1785      ? LocationSummary::kCall
1786      : LocationSummary::kNoCall;
1787  LocationSummary* locations =
1788      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1789
1790  // The Java language does not allow treating boolean as an integral type but
1791  // our bit representation makes it safe.
1792
1793  switch (result_type) {
1794    case Primitive::kPrimByte:
1795      switch (input_type) {
1796        case Primitive::kPrimBoolean:
1797          // Boolean input is a result of code transformations.
1798        case Primitive::kPrimShort:
1799        case Primitive::kPrimInt:
1800        case Primitive::kPrimChar:
1801          // Processing a Dex `int-to-byte' instruction.
1802          locations->SetInAt(0, Location::RequiresRegister());
1803          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1804          break;
1805
1806        default:
1807          LOG(FATAL) << "Unexpected type conversion from " << input_type
1808                     << " to " << result_type;
1809      }
1810      break;
1811
1812    case Primitive::kPrimShort:
1813      switch (input_type) {
1814        case Primitive::kPrimBoolean:
1815          // Boolean input is a result of code transformations.
1816        case Primitive::kPrimByte:
1817        case Primitive::kPrimInt:
1818        case Primitive::kPrimChar:
1819          // Processing a Dex `int-to-short' instruction.
1820          locations->SetInAt(0, Location::RequiresRegister());
1821          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1822          break;
1823
1824        default:
1825          LOG(FATAL) << "Unexpected type conversion from " << input_type
1826                     << " to " << result_type;
1827      }
1828      break;
1829
1830    case Primitive::kPrimInt:
1831      switch (input_type) {
1832        case Primitive::kPrimLong:
1833          // Processing a Dex `long-to-int' instruction.
1834          locations->SetInAt(0, Location::Any());
1835          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1836          break;
1837
1838        case Primitive::kPrimFloat:
1839          // Processing a Dex `float-to-int' instruction.
1840          locations->SetInAt(0, Location::RequiresFpuRegister());
1841          locations->SetOut(Location::RequiresRegister());
1842          locations->AddTemp(Location::RequiresFpuRegister());
1843          break;
1844
1845        case Primitive::kPrimDouble:
1846          // Processing a Dex `double-to-int' instruction.
1847          locations->SetInAt(0, Location::RequiresFpuRegister());
1848          locations->SetOut(Location::RequiresRegister());
1849          locations->AddTemp(Location::RequiresFpuRegister());
1850          break;
1851
1852        default:
1853          LOG(FATAL) << "Unexpected type conversion from " << input_type
1854                     << " to " << result_type;
1855      }
1856      break;
1857
1858    case Primitive::kPrimLong:
1859      switch (input_type) {
1860        case Primitive::kPrimBoolean:
1861          // Boolean input is a result of code transformations.
1862        case Primitive::kPrimByte:
1863        case Primitive::kPrimShort:
1864        case Primitive::kPrimInt:
1865        case Primitive::kPrimChar:
1866          // Processing a Dex `int-to-long' instruction.
1867          locations->SetInAt(0, Location::RequiresRegister());
1868          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1869          break;
1870
1871        case Primitive::kPrimFloat: {
1872          // Processing a Dex `float-to-long' instruction.
1873          InvokeRuntimeCallingConvention calling_convention;
1874          locations->SetInAt(0, Location::FpuRegisterLocation(
1875              calling_convention.GetFpuRegisterAt(0)));
1876          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1877          break;
1878        }
1879
1880        case Primitive::kPrimDouble: {
1881          // Processing a Dex `double-to-long' instruction.
1882          InvokeRuntimeCallingConvention calling_convention;
1883          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1884              calling_convention.GetFpuRegisterAt(0),
1885              calling_convention.GetFpuRegisterAt(1)));
1886          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1887          break;
1888        }
1889
1890        default:
1891          LOG(FATAL) << "Unexpected type conversion from " << input_type
1892                     << " to " << result_type;
1893      }
1894      break;
1895
1896    case Primitive::kPrimChar:
1897      switch (input_type) {
1898        case Primitive::kPrimBoolean:
1899          // Boolean input is a result of code transformations.
1900        case Primitive::kPrimByte:
1901        case Primitive::kPrimShort:
1902        case Primitive::kPrimInt:
1903          // Processing a Dex `int-to-char' instruction.
1904          locations->SetInAt(0, Location::RequiresRegister());
1905          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1906          break;
1907
1908        default:
1909          LOG(FATAL) << "Unexpected type conversion from " << input_type
1910                     << " to " << result_type;
1911      }
1912      break;
1913
1914    case Primitive::kPrimFloat:
1915      switch (input_type) {
1916        case Primitive::kPrimBoolean:
1917          // Boolean input is a result of code transformations.
1918        case Primitive::kPrimByte:
1919        case Primitive::kPrimShort:
1920        case Primitive::kPrimInt:
1921        case Primitive::kPrimChar:
1922          // Processing a Dex `int-to-float' instruction.
1923          locations->SetInAt(0, Location::RequiresRegister());
1924          locations->SetOut(Location::RequiresFpuRegister());
1925          break;
1926
1927        case Primitive::kPrimLong: {
1928          // Processing a Dex `long-to-float' instruction.
1929          InvokeRuntimeCallingConvention calling_convention;
1930          locations->SetInAt(0, Location::RegisterPairLocation(
1931              calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
1932          locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1933          break;
1934        }
1935
1936        case Primitive::kPrimDouble:
1937          // Processing a Dex `double-to-float' instruction.
1938          locations->SetInAt(0, Location::RequiresFpuRegister());
1939          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1940          break;
1941
1942        default:
1943          LOG(FATAL) << "Unexpected type conversion from " << input_type
1944                     << " to " << result_type;
1945      };
1946      break;
1947
1948    case Primitive::kPrimDouble:
1949      switch (input_type) {
1950        case Primitive::kPrimBoolean:
1951          // Boolean input is a result of code transformations.
1952        case Primitive::kPrimByte:
1953        case Primitive::kPrimShort:
1954        case Primitive::kPrimInt:
1955        case Primitive::kPrimChar:
1956          // Processing a Dex `int-to-double' instruction.
1957          locations->SetInAt(0, Location::RequiresRegister());
1958          locations->SetOut(Location::RequiresFpuRegister());
1959          break;
1960
1961        case Primitive::kPrimLong:
1962          // Processing a Dex `long-to-double' instruction.
1963          locations->SetInAt(0, Location::RequiresRegister());
1964          locations->SetOut(Location::RequiresFpuRegister());
1965          locations->AddTemp(Location::RequiresFpuRegister());
1966          locations->AddTemp(Location::RequiresFpuRegister());
1967          break;
1968
1969        case Primitive::kPrimFloat:
1970          // Processing a Dex `float-to-double' instruction.
1971          locations->SetInAt(0, Location::RequiresFpuRegister());
1972          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1973          break;
1974
1975        default:
1976          LOG(FATAL) << "Unexpected type conversion from " << input_type
1977                     << " to " << result_type;
1978      };
1979      break;
1980
1981    default:
1982      LOG(FATAL) << "Unexpected type conversion from " << input_type
1983                 << " to " << result_type;
1984  }
1985}
1986
1987void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1988  LocationSummary* locations = conversion->GetLocations();
1989  Location out = locations->Out();
1990  Location in = locations->InAt(0);
1991  Primitive::Type result_type = conversion->GetResultType();
1992  Primitive::Type input_type = conversion->GetInputType();
1993  DCHECK_NE(result_type, input_type);
1994  switch (result_type) {
1995    case Primitive::kPrimByte:
1996      switch (input_type) {
1997        case Primitive::kPrimBoolean:
1998          // Boolean input is a result of code transformations.
1999        case Primitive::kPrimShort:
2000        case Primitive::kPrimInt:
2001        case Primitive::kPrimChar:
2002          // Processing a Dex `int-to-byte' instruction.
2003          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
2004          break;
2005
2006        default:
2007          LOG(FATAL) << "Unexpected type conversion from " << input_type
2008                     << " to " << result_type;
2009      }
2010      break;
2011
2012    case Primitive::kPrimShort:
2013      switch (input_type) {
2014        case Primitive::kPrimBoolean:
2015          // Boolean input is a result of code transformations.
2016        case Primitive::kPrimByte:
2017        case Primitive::kPrimInt:
2018        case Primitive::kPrimChar:
2019          // Processing a Dex `int-to-short' instruction.
2020          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
2021          break;
2022
2023        default:
2024          LOG(FATAL) << "Unexpected type conversion from " << input_type
2025                     << " to " << result_type;
2026      }
2027      break;
2028
2029    case Primitive::kPrimInt:
2030      switch (input_type) {
2031        case Primitive::kPrimLong:
2032          // Processing a Dex `long-to-int' instruction.
2033          DCHECK(out.IsRegister());
2034          if (in.IsRegisterPair()) {
2035            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
2036          } else if (in.IsDoubleStackSlot()) {
2037            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
2038          } else {
2039            DCHECK(in.IsConstant());
2040            DCHECK(in.GetConstant()->IsLongConstant());
2041            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
2042            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
2043          }
2044          break;
2045
2046        case Primitive::kPrimFloat: {
2047          // Processing a Dex `float-to-int' instruction.
2048          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2049          __ vmovs(temp, in.AsFpuRegister<SRegister>());
2050          __ vcvtis(temp, temp);
2051          __ vmovrs(out.AsRegister<Register>(), temp);
2052          break;
2053        }
2054
2055        case Primitive::kPrimDouble: {
2056          // Processing a Dex `double-to-int' instruction.
2057          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2058          DRegister temp_d = FromLowSToD(temp_s);
2059          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
2060          __ vcvtid(temp_s, temp_d);
2061          __ vmovrs(out.AsRegister<Register>(), temp_s);
2062          break;
2063        }
2064
2065        default:
2066          LOG(FATAL) << "Unexpected type conversion from " << input_type
2067                     << " to " << result_type;
2068      }
2069      break;
2070
2071    case Primitive::kPrimLong:
2072      switch (input_type) {
2073        case Primitive::kPrimBoolean:
2074          // Boolean input is a result of code transformations.
2075        case Primitive::kPrimByte:
2076        case Primitive::kPrimShort:
2077        case Primitive::kPrimInt:
2078        case Primitive::kPrimChar:
2079          // Processing a Dex `int-to-long' instruction.
2080          DCHECK(out.IsRegisterPair());
2081          DCHECK(in.IsRegister());
2082          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
2083          // Sign extension.
2084          __ Asr(out.AsRegisterPairHigh<Register>(),
2085                 out.AsRegisterPairLow<Register>(),
2086                 31);
2087          break;
2088
2089        case Primitive::kPrimFloat:
2090          // Processing a Dex `float-to-long' instruction.
2091          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
2092                                  conversion,
2093                                  conversion->GetDexPc(),
2094                                  nullptr);
2095          break;
2096
2097        case Primitive::kPrimDouble:
2098          // Processing a Dex `double-to-long' instruction.
2099          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
2100                                  conversion,
2101                                  conversion->GetDexPc(),
2102                                  nullptr);
2103          break;
2104
2105        default:
2106          LOG(FATAL) << "Unexpected type conversion from " << input_type
2107                     << " to " << result_type;
2108      }
2109      break;
2110
2111    case Primitive::kPrimChar:
2112      switch (input_type) {
2113        case Primitive::kPrimBoolean:
2114          // Boolean input is a result of code transformations.
2115        case Primitive::kPrimByte:
2116        case Primitive::kPrimShort:
2117        case Primitive::kPrimInt:
2118          // Processing a Dex `int-to-char' instruction.
2119          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
2120          break;
2121
2122        default:
2123          LOG(FATAL) << "Unexpected type conversion from " << input_type
2124                     << " to " << result_type;
2125      }
2126      break;
2127
2128    case Primitive::kPrimFloat:
2129      switch (input_type) {
2130        case Primitive::kPrimBoolean:
2131          // Boolean input is a result of code transformations.
2132        case Primitive::kPrimByte:
2133        case Primitive::kPrimShort:
2134        case Primitive::kPrimInt:
2135        case Primitive::kPrimChar: {
2136          // Processing a Dex `int-to-float' instruction.
2137          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
2138          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
2139          break;
2140        }
2141
2142        case Primitive::kPrimLong:
2143          // Processing a Dex `long-to-float' instruction.
2144          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
2145                                  conversion,
2146                                  conversion->GetDexPc(),
2147                                  nullptr);
2148          break;
2149
2150        case Primitive::kPrimDouble:
2151          // Processing a Dex `double-to-float' instruction.
2152          __ vcvtsd(out.AsFpuRegister<SRegister>(),
2153                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
2154          break;
2155
2156        default:
2157          LOG(FATAL) << "Unexpected type conversion from " << input_type
2158                     << " to " << result_type;
2159      };
2160      break;
2161
2162    case Primitive::kPrimDouble:
2163      switch (input_type) {
2164        case Primitive::kPrimBoolean:
2165          // Boolean input is a result of code transformations.
2166        case Primitive::kPrimByte:
2167        case Primitive::kPrimShort:
2168        case Primitive::kPrimInt:
2169        case Primitive::kPrimChar: {
2170          // Processing a Dex `int-to-double' instruction.
2171          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
2172          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2173                    out.AsFpuRegisterPairLow<SRegister>());
2174          break;
2175        }
2176
2177        case Primitive::kPrimLong: {
2178          // Processing a Dex `long-to-double' instruction.
2179          Register low = in.AsRegisterPairLow<Register>();
2180          Register high = in.AsRegisterPairHigh<Register>();
2181          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
2182          DRegister out_d = FromLowSToD(out_s);
2183          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2184          DRegister temp_d = FromLowSToD(temp_s);
2185          SRegister constant_s = locations->GetTemp(1).AsFpuRegisterPairLow<SRegister>();
2186          DRegister constant_d = FromLowSToD(constant_s);
2187
2188          // temp_d = int-to-double(high)
2189          __ vmovsr(temp_s, high);
2190          __ vcvtdi(temp_d, temp_s);
2191          // constant_d = k2Pow32EncodingForDouble
2192          __ LoadDImmediate(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
2193          // out_d = unsigned-to-double(low)
2194          __ vmovsr(out_s, low);
2195          __ vcvtdu(out_d, out_s);
2196          // out_d += temp_d * constant_d
2197          __ vmlad(out_d, temp_d, constant_d);
2198          break;
2199        }
2200
2201        case Primitive::kPrimFloat:
2202          // Processing a Dex `float-to-double' instruction.
2203          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2204                    in.AsFpuRegister<SRegister>());
2205          break;
2206
2207        default:
2208          LOG(FATAL) << "Unexpected type conversion from " << input_type
2209                     << " to " << result_type;
2210      };
2211      break;
2212
2213    default:
2214      LOG(FATAL) << "Unexpected type conversion from " << input_type
2215                 << " to " << result_type;
2216  }
2217}
2218
2219void LocationsBuilderARM::VisitAdd(HAdd* add) {
2220  LocationSummary* locations =
2221      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
2222  switch (add->GetResultType()) {
2223    case Primitive::kPrimInt: {
2224      locations->SetInAt(0, Location::RequiresRegister());
2225      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
2226      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2227      break;
2228    }
2229
2230    case Primitive::kPrimLong: {
2231      locations->SetInAt(0, Location::RequiresRegister());
2232      locations->SetInAt(1, Location::RequiresRegister());
2233      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2234      break;
2235    }
2236
2237    case Primitive::kPrimFloat:
2238    case Primitive::kPrimDouble: {
2239      locations->SetInAt(0, Location::RequiresFpuRegister());
2240      locations->SetInAt(1, Location::RequiresFpuRegister());
2241      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2242      break;
2243    }
2244
2245    default:
2246      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2247  }
2248}
2249
2250void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
2251  LocationSummary* locations = add->GetLocations();
2252  Location out = locations->Out();
2253  Location first = locations->InAt(0);
2254  Location second = locations->InAt(1);
2255  switch (add->GetResultType()) {
2256    case Primitive::kPrimInt:
2257      if (second.IsRegister()) {
2258        __ add(out.AsRegister<Register>(),
2259               first.AsRegister<Register>(),
2260               ShifterOperand(second.AsRegister<Register>()));
2261      } else {
2262        __ AddConstant(out.AsRegister<Register>(),
2263                       first.AsRegister<Register>(),
2264                       second.GetConstant()->AsIntConstant()->GetValue());
2265      }
2266      break;
2267
2268    case Primitive::kPrimLong: {
2269      DCHECK(second.IsRegisterPair());
2270      __ adds(out.AsRegisterPairLow<Register>(),
2271              first.AsRegisterPairLow<Register>(),
2272              ShifterOperand(second.AsRegisterPairLow<Register>()));
2273      __ adc(out.AsRegisterPairHigh<Register>(),
2274             first.AsRegisterPairHigh<Register>(),
2275             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2276      break;
2277    }
2278
2279    case Primitive::kPrimFloat:
2280      __ vadds(out.AsFpuRegister<SRegister>(),
2281               first.AsFpuRegister<SRegister>(),
2282               second.AsFpuRegister<SRegister>());
2283      break;
2284
2285    case Primitive::kPrimDouble:
2286      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2287               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2288               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2289      break;
2290
2291    default:
2292      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2293  }
2294}
2295
2296void LocationsBuilderARM::VisitSub(HSub* sub) {
2297  LocationSummary* locations =
2298      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2299  switch (sub->GetResultType()) {
2300    case Primitive::kPrimInt: {
2301      locations->SetInAt(0, Location::RequiresRegister());
2302      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2303      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2304      break;
2305    }
2306
2307    case Primitive::kPrimLong: {
2308      locations->SetInAt(0, Location::RequiresRegister());
2309      locations->SetInAt(1, Location::RequiresRegister());
2310      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2311      break;
2312    }
2313    case Primitive::kPrimFloat:
2314    case Primitive::kPrimDouble: {
2315      locations->SetInAt(0, Location::RequiresFpuRegister());
2316      locations->SetInAt(1, Location::RequiresFpuRegister());
2317      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2318      break;
2319    }
2320    default:
2321      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2322  }
2323}
2324
2325void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2326  LocationSummary* locations = sub->GetLocations();
2327  Location out = locations->Out();
2328  Location first = locations->InAt(0);
2329  Location second = locations->InAt(1);
2330  switch (sub->GetResultType()) {
2331    case Primitive::kPrimInt: {
2332      if (second.IsRegister()) {
2333        __ sub(out.AsRegister<Register>(),
2334               first.AsRegister<Register>(),
2335               ShifterOperand(second.AsRegister<Register>()));
2336      } else {
2337        __ AddConstant(out.AsRegister<Register>(),
2338                       first.AsRegister<Register>(),
2339                       -second.GetConstant()->AsIntConstant()->GetValue());
2340      }
2341      break;
2342    }
2343
2344    case Primitive::kPrimLong: {
2345      DCHECK(second.IsRegisterPair());
2346      __ subs(out.AsRegisterPairLow<Register>(),
2347              first.AsRegisterPairLow<Register>(),
2348              ShifterOperand(second.AsRegisterPairLow<Register>()));
2349      __ sbc(out.AsRegisterPairHigh<Register>(),
2350             first.AsRegisterPairHigh<Register>(),
2351             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2352      break;
2353    }
2354
2355    case Primitive::kPrimFloat: {
2356      __ vsubs(out.AsFpuRegister<SRegister>(),
2357               first.AsFpuRegister<SRegister>(),
2358               second.AsFpuRegister<SRegister>());
2359      break;
2360    }
2361
2362    case Primitive::kPrimDouble: {
2363      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2364               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2365               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2366      break;
2367    }
2368
2369
2370    default:
2371      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2372  }
2373}
2374
2375void LocationsBuilderARM::VisitMul(HMul* mul) {
2376  LocationSummary* locations =
2377      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2378  switch (mul->GetResultType()) {
2379    case Primitive::kPrimInt:
2380    case Primitive::kPrimLong:  {
2381      locations->SetInAt(0, Location::RequiresRegister());
2382      locations->SetInAt(1, Location::RequiresRegister());
2383      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2384      break;
2385    }
2386
2387    case Primitive::kPrimFloat:
2388    case Primitive::kPrimDouble: {
2389      locations->SetInAt(0, Location::RequiresFpuRegister());
2390      locations->SetInAt(1, Location::RequiresFpuRegister());
2391      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2392      break;
2393    }
2394
2395    default:
2396      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2397  }
2398}
2399
2400void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2401  LocationSummary* locations = mul->GetLocations();
2402  Location out = locations->Out();
2403  Location first = locations->InAt(0);
2404  Location second = locations->InAt(1);
2405  switch (mul->GetResultType()) {
2406    case Primitive::kPrimInt: {
2407      __ mul(out.AsRegister<Register>(),
2408             first.AsRegister<Register>(),
2409             second.AsRegister<Register>());
2410      break;
2411    }
2412    case Primitive::kPrimLong: {
2413      Register out_hi = out.AsRegisterPairHigh<Register>();
2414      Register out_lo = out.AsRegisterPairLow<Register>();
2415      Register in1_hi = first.AsRegisterPairHigh<Register>();
2416      Register in1_lo = first.AsRegisterPairLow<Register>();
2417      Register in2_hi = second.AsRegisterPairHigh<Register>();
2418      Register in2_lo = second.AsRegisterPairLow<Register>();
2419
2420      // Extra checks to protect caused by the existence of R1_R2.
2421      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2422      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2423      DCHECK_NE(out_hi, in1_lo);
2424      DCHECK_NE(out_hi, in2_lo);
2425
2426      // input: in1 - 64 bits, in2 - 64 bits
2427      // output: out
2428      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2429      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2430      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2431
2432      // IP <- in1.lo * in2.hi
2433      __ mul(IP, in1_lo, in2_hi);
2434      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2435      __ mla(out_hi, in1_hi, in2_lo, IP);
2436      // out.lo <- (in1.lo * in2.lo)[31:0];
2437      __ umull(out_lo, IP, in1_lo, in2_lo);
2438      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2439      __ add(out_hi, out_hi, ShifterOperand(IP));
2440      break;
2441    }
2442
2443    case Primitive::kPrimFloat: {
2444      __ vmuls(out.AsFpuRegister<SRegister>(),
2445               first.AsFpuRegister<SRegister>(),
2446               second.AsFpuRegister<SRegister>());
2447      break;
2448    }
2449
2450    case Primitive::kPrimDouble: {
2451      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2452               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2453               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2454      break;
2455    }
2456
2457    default:
2458      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2459  }
2460}
2461
2462void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2463  DCHECK(instruction->IsDiv() || instruction->IsRem());
2464  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2465
2466  LocationSummary* locations = instruction->GetLocations();
2467  Location second = locations->InAt(1);
2468  DCHECK(second.IsConstant());
2469
2470  Register out = locations->Out().AsRegister<Register>();
2471  Register dividend = locations->InAt(0).AsRegister<Register>();
2472  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2473  DCHECK(imm == 1 || imm == -1);
2474
2475  if (instruction->IsRem()) {
2476    __ LoadImmediate(out, 0);
2477  } else {
2478    if (imm == 1) {
2479      __ Mov(out, dividend);
2480    } else {
2481      __ rsb(out, dividend, ShifterOperand(0));
2482    }
2483  }
2484}
2485
2486void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2487  DCHECK(instruction->IsDiv() || instruction->IsRem());
2488  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2489
2490  LocationSummary* locations = instruction->GetLocations();
2491  Location second = locations->InAt(1);
2492  DCHECK(second.IsConstant());
2493
2494  Register out = locations->Out().AsRegister<Register>();
2495  Register dividend = locations->InAt(0).AsRegister<Register>();
2496  Register temp = locations->GetTemp(0).AsRegister<Register>();
2497  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2498  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2499  DCHECK(IsPowerOfTwo(abs_imm));
2500  int ctz_imm = CTZ(abs_imm);
2501
2502  if (ctz_imm == 1) {
2503    __ Lsr(temp, dividend, 32 - ctz_imm);
2504  } else {
2505    __ Asr(temp, dividend, 31);
2506    __ Lsr(temp, temp, 32 - ctz_imm);
2507  }
2508  __ add(out, temp, ShifterOperand(dividend));
2509
2510  if (instruction->IsDiv()) {
2511    __ Asr(out, out, ctz_imm);
2512    if (imm < 0) {
2513      __ rsb(out, out, ShifterOperand(0));
2514    }
2515  } else {
2516    __ ubfx(out, out, 0, ctz_imm);
2517    __ sub(out, out, ShifterOperand(temp));
2518  }
2519}
2520
2521void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2522  DCHECK(instruction->IsDiv() || instruction->IsRem());
2523  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2524
2525  LocationSummary* locations = instruction->GetLocations();
2526  Location second = locations->InAt(1);
2527  DCHECK(second.IsConstant());
2528
2529  Register out = locations->Out().AsRegister<Register>();
2530  Register dividend = locations->InAt(0).AsRegister<Register>();
2531  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2532  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2533  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2534
2535  int64_t magic;
2536  int shift;
2537  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2538
2539  __ LoadImmediate(temp1, magic);
2540  __ smull(temp2, temp1, dividend, temp1);
2541
2542  if (imm > 0 && magic < 0) {
2543    __ add(temp1, temp1, ShifterOperand(dividend));
2544  } else if (imm < 0 && magic > 0) {
2545    __ sub(temp1, temp1, ShifterOperand(dividend));
2546  }
2547
2548  if (shift != 0) {
2549    __ Asr(temp1, temp1, shift);
2550  }
2551
2552  if (instruction->IsDiv()) {
2553    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2554  } else {
2555    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2556    // TODO: Strength reduction for mls.
2557    __ LoadImmediate(temp2, imm);
2558    __ mls(out, temp1, temp2, dividend);
2559  }
2560}
2561
2562void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2563  DCHECK(instruction->IsDiv() || instruction->IsRem());
2564  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2565
2566  LocationSummary* locations = instruction->GetLocations();
2567  Location second = locations->InAt(1);
2568  DCHECK(second.IsConstant());
2569
2570  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2571  if (imm == 0) {
2572    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2573  } else if (imm == 1 || imm == -1) {
2574    DivRemOneOrMinusOne(instruction);
2575  } else if (IsPowerOfTwo(std::abs(imm))) {
2576    DivRemByPowerOfTwo(instruction);
2577  } else {
2578    DCHECK(imm <= -2 || imm >= 2);
2579    GenerateDivRemWithAnyConstant(instruction);
2580  }
2581}
2582
2583void LocationsBuilderARM::VisitDiv(HDiv* div) {
2584  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2585  if (div->GetResultType() == Primitive::kPrimLong) {
2586    // pLdiv runtime call.
2587    call_kind = LocationSummary::kCall;
2588  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2589    // sdiv will be replaced by other instruction sequence.
2590  } else if (div->GetResultType() == Primitive::kPrimInt &&
2591             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2592    // pIdivmod runtime call.
2593    call_kind = LocationSummary::kCall;
2594  }
2595
2596  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2597
2598  switch (div->GetResultType()) {
2599    case Primitive::kPrimInt: {
2600      if (div->InputAt(1)->IsConstant()) {
2601        locations->SetInAt(0, Location::RequiresRegister());
2602        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2603        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2604        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2605        if (abs_imm <= 1) {
2606          // No temp register required.
2607        } else {
2608          locations->AddTemp(Location::RequiresRegister());
2609          if (!IsPowerOfTwo(abs_imm)) {
2610            locations->AddTemp(Location::RequiresRegister());
2611          }
2612        }
2613      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2614        locations->SetInAt(0, Location::RequiresRegister());
2615        locations->SetInAt(1, Location::RequiresRegister());
2616        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2617      } else {
2618        InvokeRuntimeCallingConvention calling_convention;
2619        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2620        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2621        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2622        //       we only need the former.
2623        locations->SetOut(Location::RegisterLocation(R0));
2624      }
2625      break;
2626    }
2627    case Primitive::kPrimLong: {
2628      InvokeRuntimeCallingConvention calling_convention;
2629      locations->SetInAt(0, Location::RegisterPairLocation(
2630          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2631      locations->SetInAt(1, Location::RegisterPairLocation(
2632          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2633      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2634      break;
2635    }
2636    case Primitive::kPrimFloat:
2637    case Primitive::kPrimDouble: {
2638      locations->SetInAt(0, Location::RequiresFpuRegister());
2639      locations->SetInAt(1, Location::RequiresFpuRegister());
2640      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2641      break;
2642    }
2643
2644    default:
2645      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2646  }
2647}
2648
2649void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2650  LocationSummary* locations = div->GetLocations();
2651  Location out = locations->Out();
2652  Location first = locations->InAt(0);
2653  Location second = locations->InAt(1);
2654
2655  switch (div->GetResultType()) {
2656    case Primitive::kPrimInt: {
2657      if (second.IsConstant()) {
2658        GenerateDivRemConstantIntegral(div);
2659      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2660        __ sdiv(out.AsRegister<Register>(),
2661                first.AsRegister<Register>(),
2662                second.AsRegister<Register>());
2663      } else {
2664        InvokeRuntimeCallingConvention calling_convention;
2665        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2666        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2667        DCHECK_EQ(R0, out.AsRegister<Register>());
2668
2669        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2670      }
2671      break;
2672    }
2673
2674    case Primitive::kPrimLong: {
2675      InvokeRuntimeCallingConvention calling_convention;
2676      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2677      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2678      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2679      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2680      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2681      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2682
2683      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2684      break;
2685    }
2686
2687    case Primitive::kPrimFloat: {
2688      __ vdivs(out.AsFpuRegister<SRegister>(),
2689               first.AsFpuRegister<SRegister>(),
2690               second.AsFpuRegister<SRegister>());
2691      break;
2692    }
2693
2694    case Primitive::kPrimDouble: {
2695      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2696               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2697               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2698      break;
2699    }
2700
2701    default:
2702      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2703  }
2704}
2705
2706void LocationsBuilderARM::VisitRem(HRem* rem) {
2707  Primitive::Type type = rem->GetResultType();
2708
2709  // Most remainders are implemented in the runtime.
2710  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2711  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2712    // sdiv will be replaced by other instruction sequence.
2713    call_kind = LocationSummary::kNoCall;
2714  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2715             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2716    // Have hardware divide instruction for int, do it with three instructions.
2717    call_kind = LocationSummary::kNoCall;
2718  }
2719
2720  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2721
2722  switch (type) {
2723    case Primitive::kPrimInt: {
2724      if (rem->InputAt(1)->IsConstant()) {
2725        locations->SetInAt(0, Location::RequiresRegister());
2726        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2727        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2728        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2729        if (abs_imm <= 1) {
2730          // No temp register required.
2731        } else {
2732          locations->AddTemp(Location::RequiresRegister());
2733          if (!IsPowerOfTwo(abs_imm)) {
2734            locations->AddTemp(Location::RequiresRegister());
2735          }
2736        }
2737      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2738        locations->SetInAt(0, Location::RequiresRegister());
2739        locations->SetInAt(1, Location::RequiresRegister());
2740        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2741        locations->AddTemp(Location::RequiresRegister());
2742      } else {
2743        InvokeRuntimeCallingConvention calling_convention;
2744        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2745        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2746        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2747        //       we only need the latter.
2748        locations->SetOut(Location::RegisterLocation(R1));
2749      }
2750      break;
2751    }
2752    case Primitive::kPrimLong: {
2753      InvokeRuntimeCallingConvention calling_convention;
2754      locations->SetInAt(0, Location::RegisterPairLocation(
2755          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2756      locations->SetInAt(1, Location::RegisterPairLocation(
2757          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2758      // The runtime helper puts the output in R2,R3.
2759      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2760      break;
2761    }
2762    case Primitive::kPrimFloat: {
2763      InvokeRuntimeCallingConvention calling_convention;
2764      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2765      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2766      locations->SetOut(Location::FpuRegisterLocation(S0));
2767      break;
2768    }
2769
2770    case Primitive::kPrimDouble: {
2771      InvokeRuntimeCallingConvention calling_convention;
2772      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2773          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2774      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2775          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2776      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2777      break;
2778    }
2779
2780    default:
2781      LOG(FATAL) << "Unexpected rem type " << type;
2782  }
2783}
2784
2785void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2786  LocationSummary* locations = rem->GetLocations();
2787  Location out = locations->Out();
2788  Location first = locations->InAt(0);
2789  Location second = locations->InAt(1);
2790
2791  Primitive::Type type = rem->GetResultType();
2792  switch (type) {
2793    case Primitive::kPrimInt: {
2794        if (second.IsConstant()) {
2795          GenerateDivRemConstantIntegral(rem);
2796        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2797        Register reg1 = first.AsRegister<Register>();
2798        Register reg2 = second.AsRegister<Register>();
2799        Register temp = locations->GetTemp(0).AsRegister<Register>();
2800
2801        // temp = reg1 / reg2  (integer division)
2802        // dest = reg1 - temp * reg2
2803        __ sdiv(temp, reg1, reg2);
2804        __ mls(out.AsRegister<Register>(), temp, reg2, reg1);
2805      } else {
2806        InvokeRuntimeCallingConvention calling_convention;
2807        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2808        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2809        DCHECK_EQ(R1, out.AsRegister<Register>());
2810
2811        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2812      }
2813      break;
2814    }
2815
2816    case Primitive::kPrimLong: {
2817      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2818      break;
2819    }
2820
2821    case Primitive::kPrimFloat: {
2822      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2823      break;
2824    }
2825
2826    case Primitive::kPrimDouble: {
2827      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2828      break;
2829    }
2830
2831    default:
2832      LOG(FATAL) << "Unexpected rem type " << type;
2833  }
2834}
2835
2836void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2837  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2838      ? LocationSummary::kCallOnSlowPath
2839      : LocationSummary::kNoCall;
2840  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2841  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2842  if (instruction->HasUses()) {
2843    locations->SetOut(Location::SameAsFirstInput());
2844  }
2845}
2846
2847void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2848  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2849  codegen_->AddSlowPath(slow_path);
2850
2851  LocationSummary* locations = instruction->GetLocations();
2852  Location value = locations->InAt(0);
2853
2854  switch (instruction->GetType()) {
2855    case Primitive::kPrimByte:
2856    case Primitive::kPrimChar:
2857    case Primitive::kPrimShort:
2858    case Primitive::kPrimInt: {
2859      if (value.IsRegister()) {
2860        __ CompareAndBranchIfZero(value.AsRegister<Register>(), slow_path->GetEntryLabel());
2861      } else {
2862        DCHECK(value.IsConstant()) << value;
2863        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2864          __ b(slow_path->GetEntryLabel());
2865        }
2866      }
2867      break;
2868    }
2869    case Primitive::kPrimLong: {
2870      if (value.IsRegisterPair()) {
2871        __ orrs(IP,
2872                value.AsRegisterPairLow<Register>(),
2873                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2874        __ b(slow_path->GetEntryLabel(), EQ);
2875      } else {
2876        DCHECK(value.IsConstant()) << value;
2877        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2878          __ b(slow_path->GetEntryLabel());
2879        }
2880      }
2881      break;
2882    default:
2883      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2884    }
2885  }
2886}
2887
2888void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2889  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2890
2891  LocationSummary* locations =
2892      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2893
2894  switch (op->GetResultType()) {
2895    case Primitive::kPrimInt: {
2896      locations->SetInAt(0, Location::RequiresRegister());
2897      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2898      // Make the output overlap, as it will be used to hold the masked
2899      // second input.
2900      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2901      break;
2902    }
2903    case Primitive::kPrimLong: {
2904      locations->SetInAt(0, Location::RequiresRegister());
2905      locations->SetInAt(1, Location::RequiresRegister());
2906      locations->AddTemp(Location::RequiresRegister());
2907      locations->SetOut(Location::RequiresRegister());
2908      break;
2909    }
2910    default:
2911      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2912  }
2913}
2914
2915void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2916  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2917
2918  LocationSummary* locations = op->GetLocations();
2919  Location out = locations->Out();
2920  Location first = locations->InAt(0);
2921  Location second = locations->InAt(1);
2922
2923  Primitive::Type type = op->GetResultType();
2924  switch (type) {
2925    case Primitive::kPrimInt: {
2926      Register out_reg = out.AsRegister<Register>();
2927      Register first_reg = first.AsRegister<Register>();
2928      // Arm doesn't mask the shift count so we need to do it ourselves.
2929      if (second.IsRegister()) {
2930        Register second_reg = second.AsRegister<Register>();
2931        __ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2932        if (op->IsShl()) {
2933          __ Lsl(out_reg, first_reg, out_reg);
2934        } else if (op->IsShr()) {
2935          __ Asr(out_reg, first_reg, out_reg);
2936        } else {
2937          __ Lsr(out_reg, first_reg, out_reg);
2938        }
2939      } else {
2940        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2941        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2942        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
2943          __ Mov(out_reg, first_reg);
2944        } else if (op->IsShl()) {
2945          __ Lsl(out_reg, first_reg, shift_value);
2946        } else if (op->IsShr()) {
2947          __ Asr(out_reg, first_reg, shift_value);
2948        } else {
2949          __ Lsr(out_reg, first_reg, shift_value);
2950        }
2951      }
2952      break;
2953    }
2954    case Primitive::kPrimLong: {
2955      Register o_h = out.AsRegisterPairHigh<Register>();
2956      Register o_l = out.AsRegisterPairLow<Register>();
2957
2958      Register temp = locations->GetTemp(0).AsRegister<Register>();
2959
2960      Register high = first.AsRegisterPairHigh<Register>();
2961      Register low = first.AsRegisterPairLow<Register>();
2962
2963      Register second_reg = second.AsRegister<Register>();
2964
2965      if (op->IsShl()) {
2966        __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
2967        // Shift the high part
2968        __ Lsl(o_h, high, o_l);
2969        // Shift the low part and `or` what overflew on the high part
2970        __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
2971        __ Lsr(temp, low, temp);
2972        __ orr(o_h, o_h, ShifterOperand(temp));
2973        // If the shift is > 32 bits, override the high part
2974        __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
2975        __ it(PL);
2976        __ Lsl(o_h, low, temp, PL);
2977        // Shift the low part
2978        __ Lsl(o_l, low, o_l);
2979      } else if (op->IsShr()) {
2980        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2981        // Shift the low part
2982        __ Lsr(o_l, low, o_h);
2983        // Shift the high part and `or` what underflew on the low part
2984        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2985        __ Lsl(temp, high, temp);
2986        __ orr(o_l, o_l, ShifterOperand(temp));
2987        // If the shift is > 32 bits, override the low part
2988        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
2989        __ it(PL);
2990        __ Asr(o_l, high, temp, PL);
2991        // Shift the high part
2992        __ Asr(o_h, high, o_h);
2993      } else {
2994        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2995        // same as Shr except we use `Lsr`s and not `Asr`s
2996        __ Lsr(o_l, low, o_h);
2997        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2998        __ Lsl(temp, high, temp);
2999        __ orr(o_l, o_l, ShifterOperand(temp));
3000        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
3001        __ it(PL);
3002        __ Lsr(o_l, high, temp, PL);
3003        __ Lsr(o_h, high, o_h);
3004      }
3005      break;
3006    }
3007    default:
3008      LOG(FATAL) << "Unexpected operation type " << type;
3009  }
3010}
3011
3012void LocationsBuilderARM::VisitShl(HShl* shl) {
3013  HandleShift(shl);
3014}
3015
3016void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
3017  HandleShift(shl);
3018}
3019
3020void LocationsBuilderARM::VisitShr(HShr* shr) {
3021  HandleShift(shr);
3022}
3023
3024void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
3025  HandleShift(shr);
3026}
3027
3028void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
3029  HandleShift(ushr);
3030}
3031
3032void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
3033  HandleShift(ushr);
3034}
3035
3036void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
3037  LocationSummary* locations =
3038      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3039  InvokeRuntimeCallingConvention calling_convention;
3040  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3041  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3042  locations->SetOut(Location::RegisterLocation(R0));
3043}
3044
3045void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
3046  InvokeRuntimeCallingConvention calling_convention;
3047  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
3048  // Note: if heap poisoning is enabled, the entry point takes cares
3049  // of poisoning the reference.
3050  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3051                          instruction,
3052                          instruction->GetDexPc(),
3053                          nullptr);
3054}
3055
3056void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
3057  LocationSummary* locations =
3058      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3059  InvokeRuntimeCallingConvention calling_convention;
3060  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3061  locations->SetOut(Location::RegisterLocation(R0));
3062  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3063  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3064}
3065
3066void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
3067  InvokeRuntimeCallingConvention calling_convention;
3068  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
3069  // Note: if heap poisoning is enabled, the entry point takes cares
3070  // of poisoning the reference.
3071  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3072                          instruction,
3073                          instruction->GetDexPc(),
3074                          nullptr);
3075}
3076
3077void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
3078  LocationSummary* locations =
3079      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3080  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
3081  if (location.IsStackSlot()) {
3082    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3083  } else if (location.IsDoubleStackSlot()) {
3084    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3085  }
3086  locations->SetOut(location);
3087}
3088
3089void InstructionCodeGeneratorARM::VisitParameterValue(
3090    HParameterValue* instruction ATTRIBUTE_UNUSED) {
3091  // Nothing to do, the parameter is already at its location.
3092}
3093
3094void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
3095  LocationSummary* locations =
3096      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3097  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
3098}
3099
3100void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
3101  // Nothing to do, the method is already at its location.
3102}
3103
3104void LocationsBuilderARM::VisitNot(HNot* not_) {
3105  LocationSummary* locations =
3106      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
3107  locations->SetInAt(0, Location::RequiresRegister());
3108  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3109}
3110
3111void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
3112  LocationSummary* locations = not_->GetLocations();
3113  Location out = locations->Out();
3114  Location in = locations->InAt(0);
3115  switch (not_->GetResultType()) {
3116    case Primitive::kPrimInt:
3117      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
3118      break;
3119
3120    case Primitive::kPrimLong:
3121      __ mvn(out.AsRegisterPairLow<Register>(),
3122             ShifterOperand(in.AsRegisterPairLow<Register>()));
3123      __ mvn(out.AsRegisterPairHigh<Register>(),
3124             ShifterOperand(in.AsRegisterPairHigh<Register>()));
3125      break;
3126
3127    default:
3128      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
3129  }
3130}
3131
3132void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
3133  LocationSummary* locations =
3134      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
3135  locations->SetInAt(0, Location::RequiresRegister());
3136  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3137}
3138
3139void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
3140  LocationSummary* locations = bool_not->GetLocations();
3141  Location out = locations->Out();
3142  Location in = locations->InAt(0);
3143  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
3144}
3145
3146void LocationsBuilderARM::VisitCompare(HCompare* compare) {
3147  LocationSummary* locations =
3148      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
3149  switch (compare->InputAt(0)->GetType()) {
3150    case Primitive::kPrimLong: {
3151      locations->SetInAt(0, Location::RequiresRegister());
3152      locations->SetInAt(1, Location::RequiresRegister());
3153      // Output overlaps because it is written before doing the low comparison.
3154      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3155      break;
3156    }
3157    case Primitive::kPrimFloat:
3158    case Primitive::kPrimDouble: {
3159      locations->SetInAt(0, Location::RequiresFpuRegister());
3160      locations->SetInAt(1, Location::RequiresFpuRegister());
3161      locations->SetOut(Location::RequiresRegister());
3162      break;
3163    }
3164    default:
3165      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
3166  }
3167}
3168
3169void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
3170  LocationSummary* locations = compare->GetLocations();
3171  Register out = locations->Out().AsRegister<Register>();
3172  Location left = locations->InAt(0);
3173  Location right = locations->InAt(1);
3174
3175  Label less, greater, done;
3176  Primitive::Type type = compare->InputAt(0)->GetType();
3177  switch (type) {
3178    case Primitive::kPrimLong: {
3179      __ cmp(left.AsRegisterPairHigh<Register>(),
3180             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
3181      __ b(&less, LT);
3182      __ b(&greater, GT);
3183      // Do LoadImmediate before the last `cmp`, as LoadImmediate might affect the status flags.
3184      __ LoadImmediate(out, 0);
3185      __ cmp(left.AsRegisterPairLow<Register>(),
3186             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
3187      break;
3188    }
3189    case Primitive::kPrimFloat:
3190    case Primitive::kPrimDouble: {
3191      __ LoadImmediate(out, 0);
3192      if (type == Primitive::kPrimFloat) {
3193        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
3194      } else {
3195        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
3196                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
3197      }
3198      __ vmstat();  // transfer FP status register to ARM APSR.
3199      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
3200      break;
3201    }
3202    default:
3203      LOG(FATAL) << "Unexpected compare type " << type;
3204  }
3205  __ b(&done, EQ);
3206  __ b(&less, LO);  // LO is for both: unsigned compare for longs and 'less than' for floats.
3207
3208  __ Bind(&greater);
3209  __ LoadImmediate(out, 1);
3210  __ b(&done);
3211
3212  __ Bind(&less);
3213  __ LoadImmediate(out, -1);
3214
3215  __ Bind(&done);
3216}
3217
3218void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
3219  LocationSummary* locations =
3220      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3221  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
3222    locations->SetInAt(i, Location::Any());
3223  }
3224  locations->SetOut(Location::Any());
3225}
3226
3227void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
3228  UNUSED(instruction);
3229  LOG(FATAL) << "Unreachable";
3230}
3231
3232void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
3233  // TODO (ported from quick): revisit Arm barrier kinds
3234  DmbOptions flavor = DmbOptions::ISH;  // quiet c++ warnings
3235  switch (kind) {
3236    case MemBarrierKind::kAnyStore:
3237    case MemBarrierKind::kLoadAny:
3238    case MemBarrierKind::kAnyAny: {
3239      flavor = DmbOptions::ISH;
3240      break;
3241    }
3242    case MemBarrierKind::kStoreStore: {
3243      flavor = DmbOptions::ISHST;
3244      break;
3245    }
3246    default:
3247      LOG(FATAL) << "Unexpected memory barrier " << kind;
3248  }
3249  __ dmb(flavor);
3250}
3251
3252void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
3253                                                         uint32_t offset,
3254                                                         Register out_lo,
3255                                                         Register out_hi) {
3256  if (offset != 0) {
3257    __ LoadImmediate(out_lo, offset);
3258    __ add(IP, addr, ShifterOperand(out_lo));
3259    addr = IP;
3260  }
3261  __ ldrexd(out_lo, out_hi, addr);
3262}
3263
3264void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
3265                                                          uint32_t offset,
3266                                                          Register value_lo,
3267                                                          Register value_hi,
3268                                                          Register temp1,
3269                                                          Register temp2,
3270                                                          HInstruction* instruction) {
3271  Label fail;
3272  if (offset != 0) {
3273    __ LoadImmediate(temp1, offset);
3274    __ add(IP, addr, ShifterOperand(temp1));
3275    addr = IP;
3276  }
3277  __ Bind(&fail);
3278  // We need a load followed by store. (The address used in a STREX instruction must
3279  // be the same as the address in the most recently executed LDREX instruction.)
3280  __ ldrexd(temp1, temp2, addr);
3281  codegen_->MaybeRecordImplicitNullCheck(instruction);
3282  __ strexd(temp1, value_lo, value_hi, addr);
3283  __ CompareAndBranchIfNonZero(temp1, &fail);
3284}
3285
3286void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
3287  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3288
3289  LocationSummary* locations =
3290      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3291  locations->SetInAt(0, Location::RequiresRegister());
3292
3293  Primitive::Type field_type = field_info.GetFieldType();
3294  if (Primitive::IsFloatingPointType(field_type)) {
3295    locations->SetInAt(1, Location::RequiresFpuRegister());
3296  } else {
3297    locations->SetInAt(1, Location::RequiresRegister());
3298  }
3299
3300  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3301  bool generate_volatile = field_info.IsVolatile()
3302      && is_wide
3303      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3304  bool needs_write_barrier =
3305      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
3306  // Temporary registers for the write barrier.
3307  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3308  if (needs_write_barrier) {
3309    locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
3310    locations->AddTemp(Location::RequiresRegister());
3311  } else if (generate_volatile) {
3312    // Arm encoding have some additional constraints for ldrexd/strexd:
3313    // - registers need to be consecutive
3314    // - the first register should be even but not R14.
3315    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3316    // enable Arm encoding.
3317    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3318
3319    locations->AddTemp(Location::RequiresRegister());
3320    locations->AddTemp(Location::RequiresRegister());
3321    if (field_type == Primitive::kPrimDouble) {
3322      // For doubles we need two more registers to copy the value.
3323      locations->AddTemp(Location::RegisterLocation(R2));
3324      locations->AddTemp(Location::RegisterLocation(R3));
3325    }
3326  }
3327}
3328
3329void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3330                                                 const FieldInfo& field_info,
3331                                                 bool value_can_be_null) {
3332  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3333
3334  LocationSummary* locations = instruction->GetLocations();
3335  Register base = locations->InAt(0).AsRegister<Register>();
3336  Location value = locations->InAt(1);
3337
3338  bool is_volatile = field_info.IsVolatile();
3339  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3340  Primitive::Type field_type = field_info.GetFieldType();
3341  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3342  bool needs_write_barrier =
3343      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
3344
3345  if (is_volatile) {
3346    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3347  }
3348
3349  switch (field_type) {
3350    case Primitive::kPrimBoolean:
3351    case Primitive::kPrimByte: {
3352      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3353      break;
3354    }
3355
3356    case Primitive::kPrimShort:
3357    case Primitive::kPrimChar: {
3358      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3359      break;
3360    }
3361
3362    case Primitive::kPrimInt:
3363    case Primitive::kPrimNot: {
3364      if (kPoisonHeapReferences && needs_write_barrier) {
3365        // Note that in the case where `value` is a null reference,
3366        // we do not enter this block, as a null reference does not
3367        // need poisoning.
3368        DCHECK_EQ(field_type, Primitive::kPrimNot);
3369        Register temp = locations->GetTemp(0).AsRegister<Register>();
3370        __ Mov(temp, value.AsRegister<Register>());
3371        __ PoisonHeapReference(temp);
3372        __ StoreToOffset(kStoreWord, temp, base, offset);
3373      } else {
3374        __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3375      }
3376      break;
3377    }
3378
3379    case Primitive::kPrimLong: {
3380      if (is_volatile && !atomic_ldrd_strd) {
3381        GenerateWideAtomicStore(base, offset,
3382                                value.AsRegisterPairLow<Register>(),
3383                                value.AsRegisterPairHigh<Register>(),
3384                                locations->GetTemp(0).AsRegister<Register>(),
3385                                locations->GetTemp(1).AsRegister<Register>(),
3386                                instruction);
3387      } else {
3388        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3389        codegen_->MaybeRecordImplicitNullCheck(instruction);
3390      }
3391      break;
3392    }
3393
3394    case Primitive::kPrimFloat: {
3395      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3396      break;
3397    }
3398
3399    case Primitive::kPrimDouble: {
3400      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3401      if (is_volatile && !atomic_ldrd_strd) {
3402        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3403        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3404
3405        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3406
3407        GenerateWideAtomicStore(base, offset,
3408                                value_reg_lo,
3409                                value_reg_hi,
3410                                locations->GetTemp(2).AsRegister<Register>(),
3411                                locations->GetTemp(3).AsRegister<Register>(),
3412                                instruction);
3413      } else {
3414        __ StoreDToOffset(value_reg, base, offset);
3415        codegen_->MaybeRecordImplicitNullCheck(instruction);
3416      }
3417      break;
3418    }
3419
3420    case Primitive::kPrimVoid:
3421      LOG(FATAL) << "Unreachable type " << field_type;
3422      UNREACHABLE();
3423  }
3424
3425  // Longs and doubles are handled in the switch.
3426  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3427    codegen_->MaybeRecordImplicitNullCheck(instruction);
3428  }
3429
3430  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3431    Register temp = locations->GetTemp(0).AsRegister<Register>();
3432    Register card = locations->GetTemp(1).AsRegister<Register>();
3433    codegen_->MarkGCCard(
3434        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3435  }
3436
3437  if (is_volatile) {
3438    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3439  }
3440}
3441
3442void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3443  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3444  LocationSummary* locations =
3445      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3446  locations->SetInAt(0, Location::RequiresRegister());
3447
3448  bool volatile_for_double = field_info.IsVolatile()
3449      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3450      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3451  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3452
3453  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3454    locations->SetOut(Location::RequiresFpuRegister());
3455  } else {
3456    locations->SetOut(Location::RequiresRegister(),
3457                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3458  }
3459  if (volatile_for_double) {
3460    // Arm encoding have some additional constraints for ldrexd/strexd:
3461    // - registers need to be consecutive
3462    // - the first register should be even but not R14.
3463    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3464    // enable Arm encoding.
3465    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3466    locations->AddTemp(Location::RequiresRegister());
3467    locations->AddTemp(Location::RequiresRegister());
3468  }
3469}
3470
3471void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3472                                                 const FieldInfo& field_info) {
3473  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3474
3475  LocationSummary* locations = instruction->GetLocations();
3476  Register base = locations->InAt(0).AsRegister<Register>();
3477  Location out = locations->Out();
3478  bool is_volatile = field_info.IsVolatile();
3479  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3480  Primitive::Type field_type = field_info.GetFieldType();
3481  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3482
3483  switch (field_type) {
3484    case Primitive::kPrimBoolean: {
3485      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3486      break;
3487    }
3488
3489    case Primitive::kPrimByte: {
3490      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3491      break;
3492    }
3493
3494    case Primitive::kPrimShort: {
3495      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3496      break;
3497    }
3498
3499    case Primitive::kPrimChar: {
3500      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3501      break;
3502    }
3503
3504    case Primitive::kPrimInt:
3505    case Primitive::kPrimNot: {
3506      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3507      break;
3508    }
3509
3510    case Primitive::kPrimLong: {
3511      if (is_volatile && !atomic_ldrd_strd) {
3512        GenerateWideAtomicLoad(base, offset,
3513                               out.AsRegisterPairLow<Register>(),
3514                               out.AsRegisterPairHigh<Register>());
3515      } else {
3516        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3517      }
3518      break;
3519    }
3520
3521    case Primitive::kPrimFloat: {
3522      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3523      break;
3524    }
3525
3526    case Primitive::kPrimDouble: {
3527      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3528      if (is_volatile && !atomic_ldrd_strd) {
3529        Register lo = locations->GetTemp(0).AsRegister<Register>();
3530        Register hi = locations->GetTemp(1).AsRegister<Register>();
3531        GenerateWideAtomicLoad(base, offset, lo, hi);
3532        codegen_->MaybeRecordImplicitNullCheck(instruction);
3533        __ vmovdrr(out_reg, lo, hi);
3534      } else {
3535        __ LoadDFromOffset(out_reg, base, offset);
3536        codegen_->MaybeRecordImplicitNullCheck(instruction);
3537      }
3538      break;
3539    }
3540
3541    case Primitive::kPrimVoid:
3542      LOG(FATAL) << "Unreachable type " << field_type;
3543      UNREACHABLE();
3544  }
3545
3546  // Doubles are handled in the switch.
3547  if (field_type != Primitive::kPrimDouble) {
3548    codegen_->MaybeRecordImplicitNullCheck(instruction);
3549  }
3550
3551  if (is_volatile) {
3552    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3553  }
3554
3555  if (field_type == Primitive::kPrimNot) {
3556    __ MaybeUnpoisonHeapReference(out.AsRegister<Register>());
3557  }
3558}
3559
3560void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3561  HandleFieldSet(instruction, instruction->GetFieldInfo());
3562}
3563
3564void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3565  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3566}
3567
3568void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3569  HandleFieldGet(instruction, instruction->GetFieldInfo());
3570}
3571
3572void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3573  HandleFieldGet(instruction, instruction->GetFieldInfo());
3574}
3575
3576void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3577  HandleFieldGet(instruction, instruction->GetFieldInfo());
3578}
3579
3580void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3581  HandleFieldGet(instruction, instruction->GetFieldInfo());
3582}
3583
3584void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3585  HandleFieldSet(instruction, instruction->GetFieldInfo());
3586}
3587
3588void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3589  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3590}
3591
3592void LocationsBuilderARM::VisitUnresolvedInstanceFieldGet(
3593    HUnresolvedInstanceFieldGet* instruction) {
3594  FieldAccessCallingConvetionARM calling_convention;
3595  codegen_->CreateUnresolvedFieldLocationSummary(
3596      instruction, instruction->GetFieldType(), calling_convention);
3597}
3598
3599void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldGet(
3600    HUnresolvedInstanceFieldGet* instruction) {
3601  codegen_->GenerateUnresolvedFieldAccess(instruction,
3602                                          instruction->GetFieldType(),
3603                                          instruction->GetFieldIndex(),
3604                                          instruction->GetDexPc());
3605}
3606
3607void LocationsBuilderARM::VisitUnresolvedInstanceFieldSet(
3608    HUnresolvedInstanceFieldSet* instruction) {
3609  FieldAccessCallingConvetionARM calling_convention;
3610  codegen_->CreateUnresolvedFieldLocationSummary(
3611      instruction, instruction->GetFieldType(), calling_convention);
3612}
3613
3614void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldSet(
3615    HUnresolvedInstanceFieldSet* instruction) {
3616  codegen_->GenerateUnresolvedFieldAccess(instruction,
3617                                          instruction->GetFieldType(),
3618                                          instruction->GetFieldIndex(),
3619                                          instruction->GetDexPc());
3620}
3621
3622void LocationsBuilderARM::VisitUnresolvedStaticFieldGet(
3623    HUnresolvedStaticFieldGet* instruction) {
3624  FieldAccessCallingConvetionARM calling_convention;
3625  codegen_->CreateUnresolvedFieldLocationSummary(
3626      instruction, instruction->GetFieldType(), calling_convention);
3627}
3628
3629void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldGet(
3630    HUnresolvedStaticFieldGet* instruction) {
3631  codegen_->GenerateUnresolvedFieldAccess(instruction,
3632                                          instruction->GetFieldType(),
3633                                          instruction->GetFieldIndex(),
3634                                          instruction->GetDexPc());
3635}
3636
3637void LocationsBuilderARM::VisitUnresolvedStaticFieldSet(
3638    HUnresolvedStaticFieldSet* instruction) {
3639  FieldAccessCallingConvetionARM calling_convention;
3640  codegen_->CreateUnresolvedFieldLocationSummary(
3641      instruction, instruction->GetFieldType(), calling_convention);
3642}
3643
3644void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet(
3645    HUnresolvedStaticFieldSet* instruction) {
3646  codegen_->GenerateUnresolvedFieldAccess(instruction,
3647                                          instruction->GetFieldType(),
3648                                          instruction->GetFieldIndex(),
3649                                          instruction->GetDexPc());
3650}
3651
3652void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3653  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
3654      ? LocationSummary::kCallOnSlowPath
3655      : LocationSummary::kNoCall;
3656  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3657  locations->SetInAt(0, Location::RequiresRegister());
3658  if (instruction->HasUses()) {
3659    locations->SetOut(Location::SameAsFirstInput());
3660  }
3661}
3662
3663void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3664  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3665    return;
3666  }
3667  Location obj = instruction->GetLocations()->InAt(0);
3668
3669  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3670  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3671}
3672
3673void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3674  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3675  codegen_->AddSlowPath(slow_path);
3676
3677  LocationSummary* locations = instruction->GetLocations();
3678  Location obj = locations->InAt(0);
3679
3680  __ CompareAndBranchIfZero(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
3681}
3682
3683void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3684  if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
3685    GenerateImplicitNullCheck(instruction);
3686  } else {
3687    GenerateExplicitNullCheck(instruction);
3688  }
3689}
3690
3691void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3692  LocationSummary* locations =
3693      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3694  locations->SetInAt(0, Location::RequiresRegister());
3695  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3696  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3697    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3698  } else {
3699    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3700  }
3701}
3702
3703void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3704  LocationSummary* locations = instruction->GetLocations();
3705  Register obj = locations->InAt(0).AsRegister<Register>();
3706  Location index = locations->InAt(1);
3707  Primitive::Type type = instruction->GetType();
3708
3709  switch (type) {
3710    case Primitive::kPrimBoolean: {
3711      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3712      Register out = locations->Out().AsRegister<Register>();
3713      if (index.IsConstant()) {
3714        size_t offset =
3715            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3716        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3717      } else {
3718        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3719        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3720      }
3721      break;
3722    }
3723
3724    case Primitive::kPrimByte: {
3725      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3726      Register out = locations->Out().AsRegister<Register>();
3727      if (index.IsConstant()) {
3728        size_t offset =
3729            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3730        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3731      } else {
3732        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3733        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3734      }
3735      break;
3736    }
3737
3738    case Primitive::kPrimShort: {
3739      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3740      Register out = locations->Out().AsRegister<Register>();
3741      if (index.IsConstant()) {
3742        size_t offset =
3743            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3744        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3745      } else {
3746        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3747        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3748      }
3749      break;
3750    }
3751
3752    case Primitive::kPrimChar: {
3753      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3754      Register out = locations->Out().AsRegister<Register>();
3755      if (index.IsConstant()) {
3756        size_t offset =
3757            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3758        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3759      } else {
3760        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3761        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3762      }
3763      break;
3764    }
3765
3766    case Primitive::kPrimInt:
3767    case Primitive::kPrimNot: {
3768      static_assert(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
3769                    "art::mirror::HeapReference<mirror::Object> and int32_t have different sizes.");
3770      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3771      Register out = locations->Out().AsRegister<Register>();
3772      if (index.IsConstant()) {
3773        size_t offset =
3774            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3775        __ LoadFromOffset(kLoadWord, out, obj, offset);
3776      } else {
3777        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3778        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3779      }
3780      break;
3781    }
3782
3783    case Primitive::kPrimLong: {
3784      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3785      Location out = locations->Out();
3786      if (index.IsConstant()) {
3787        size_t offset =
3788            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3789        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3790      } else {
3791        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3792        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3793      }
3794      break;
3795    }
3796
3797    case Primitive::kPrimFloat: {
3798      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3799      Location out = locations->Out();
3800      DCHECK(out.IsFpuRegister());
3801      if (index.IsConstant()) {
3802        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3803        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3804      } else {
3805        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3806        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3807      }
3808      break;
3809    }
3810
3811    case Primitive::kPrimDouble: {
3812      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3813      Location out = locations->Out();
3814      DCHECK(out.IsFpuRegisterPair());
3815      if (index.IsConstant()) {
3816        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3817        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3818      } else {
3819        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3820        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3821      }
3822      break;
3823    }
3824
3825    case Primitive::kPrimVoid:
3826      LOG(FATAL) << "Unreachable type " << type;
3827      UNREACHABLE();
3828  }
3829  codegen_->MaybeRecordImplicitNullCheck(instruction);
3830
3831  if (type == Primitive::kPrimNot) {
3832    Register out = locations->Out().AsRegister<Register>();
3833    __ MaybeUnpoisonHeapReference(out);
3834  }
3835}
3836
3837void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3838  Primitive::Type value_type = instruction->GetComponentType();
3839
3840  bool needs_write_barrier =
3841      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3842  bool needs_runtime_call = instruction->NeedsTypeCheck();
3843
3844  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3845      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3846  if (needs_runtime_call) {
3847    InvokeRuntimeCallingConvention calling_convention;
3848    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3849    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3850    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3851  } else {
3852    locations->SetInAt(0, Location::RequiresRegister());
3853    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3854    if (Primitive::IsFloatingPointType(value_type)) {
3855      locations->SetInAt(2, Location::RequiresFpuRegister());
3856    } else {
3857      locations->SetInAt(2, Location::RequiresRegister());
3858    }
3859
3860    if (needs_write_barrier) {
3861      // Temporary registers for the write barrier.
3862      locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
3863      locations->AddTemp(Location::RequiresRegister());
3864    }
3865  }
3866}
3867
3868void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3869  LocationSummary* locations = instruction->GetLocations();
3870  Register obj = locations->InAt(0).AsRegister<Register>();
3871  Location index = locations->InAt(1);
3872  Primitive::Type value_type = instruction->GetComponentType();
3873  bool needs_runtime_call = locations->WillCall();
3874  bool needs_write_barrier =
3875      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3876
3877  switch (value_type) {
3878    case Primitive::kPrimBoolean:
3879    case Primitive::kPrimByte: {
3880      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3881      Register value = locations->InAt(2).AsRegister<Register>();
3882      if (index.IsConstant()) {
3883        size_t offset =
3884            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3885        __ StoreToOffset(kStoreByte, value, obj, offset);
3886      } else {
3887        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3888        __ StoreToOffset(kStoreByte, value, IP, data_offset);
3889      }
3890      break;
3891    }
3892
3893    case Primitive::kPrimShort:
3894    case Primitive::kPrimChar: {
3895      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3896      Register value = locations->InAt(2).AsRegister<Register>();
3897      if (index.IsConstant()) {
3898        size_t offset =
3899            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3900        __ StoreToOffset(kStoreHalfword, value, obj, offset);
3901      } else {
3902        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3903        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3904      }
3905      break;
3906    }
3907
3908    case Primitive::kPrimInt:
3909    case Primitive::kPrimNot: {
3910      if (!needs_runtime_call) {
3911        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3912        Register value = locations->InAt(2).AsRegister<Register>();
3913        Register source = value;
3914        if (kPoisonHeapReferences && needs_write_barrier) {
3915          // Note that in the case where `value` is a null reference,
3916          // we do not enter this block, as a null reference does not
3917          // need poisoning.
3918          DCHECK_EQ(value_type, Primitive::kPrimNot);
3919          Register temp = locations->GetTemp(0).AsRegister<Register>();
3920          __ Mov(temp, value);
3921          __ PoisonHeapReference(temp);
3922          source = temp;
3923        }
3924        if (index.IsConstant()) {
3925          size_t offset =
3926              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3927          __ StoreToOffset(kStoreWord, source, obj, offset);
3928        } else {
3929          DCHECK(index.IsRegister()) << index;
3930          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3931          __ StoreToOffset(kStoreWord, source, IP, data_offset);
3932        }
3933        codegen_->MaybeRecordImplicitNullCheck(instruction);
3934        if (needs_write_barrier) {
3935          DCHECK_EQ(value_type, Primitive::kPrimNot);
3936          Register temp = locations->GetTemp(0).AsRegister<Register>();
3937          Register card = locations->GetTemp(1).AsRegister<Register>();
3938          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
3939        }
3940      } else {
3941        DCHECK_EQ(value_type, Primitive::kPrimNot);
3942        // Note: if heap poisoning is enabled, pAputObject takes cares
3943        // of poisoning the reference.
3944        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3945                                instruction,
3946                                instruction->GetDexPc(),
3947                                nullptr);
3948      }
3949      break;
3950    }
3951
3952    case Primitive::kPrimLong: {
3953      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3954      Location value = locations->InAt(2);
3955      if (index.IsConstant()) {
3956        size_t offset =
3957            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3958        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3959      } else {
3960        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3961        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3962      }
3963      break;
3964    }
3965
3966    case Primitive::kPrimFloat: {
3967      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3968      Location value = locations->InAt(2);
3969      DCHECK(value.IsFpuRegister());
3970      if (index.IsConstant()) {
3971        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3972        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3973      } else {
3974        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3975        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3976      }
3977      break;
3978    }
3979
3980    case Primitive::kPrimDouble: {
3981      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3982      Location value = locations->InAt(2);
3983      DCHECK(value.IsFpuRegisterPair());
3984      if (index.IsConstant()) {
3985        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3986        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3987      } else {
3988        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3989        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3990      }
3991
3992      break;
3993    }
3994
3995    case Primitive::kPrimVoid:
3996      LOG(FATAL) << "Unreachable type " << value_type;
3997      UNREACHABLE();
3998  }
3999
4000  // Ints and objects are handled in the switch.
4001  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
4002    codegen_->MaybeRecordImplicitNullCheck(instruction);
4003  }
4004}
4005
4006void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
4007  LocationSummary* locations =
4008      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4009  locations->SetInAt(0, Location::RequiresRegister());
4010  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4011}
4012
4013void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
4014  LocationSummary* locations = instruction->GetLocations();
4015  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
4016  Register obj = locations->InAt(0).AsRegister<Register>();
4017  Register out = locations->Out().AsRegister<Register>();
4018  __ LoadFromOffset(kLoadWord, out, obj, offset);
4019  codegen_->MaybeRecordImplicitNullCheck(instruction);
4020}
4021
4022void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
4023  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
4024      ? LocationSummary::kCallOnSlowPath
4025      : LocationSummary::kNoCall;
4026  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4027  locations->SetInAt(0, Location::RequiresRegister());
4028  locations->SetInAt(1, Location::RequiresRegister());
4029  if (instruction->HasUses()) {
4030    locations->SetOut(Location::SameAsFirstInput());
4031  }
4032}
4033
4034void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
4035  LocationSummary* locations = instruction->GetLocations();
4036  SlowPathCodeARM* slow_path =
4037      new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
4038  codegen_->AddSlowPath(slow_path);
4039
4040  Register index = locations->InAt(0).AsRegister<Register>();
4041  Register length = locations->InAt(1).AsRegister<Register>();
4042
4043  __ cmp(index, ShifterOperand(length));
4044  __ b(slow_path->GetEntryLabel(), HS);
4045}
4046
4047void CodeGeneratorARM::MarkGCCard(Register temp,
4048                                  Register card,
4049                                  Register object,
4050                                  Register value,
4051                                  bool can_be_null) {
4052  Label is_null;
4053  if (can_be_null) {
4054    __ CompareAndBranchIfZero(value, &is_null);
4055  }
4056  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
4057  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
4058  __ strb(card, Address(card, temp));
4059  if (can_be_null) {
4060    __ Bind(&is_null);
4061  }
4062}
4063
4064void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
4065  temp->SetLocations(nullptr);
4066}
4067
4068void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
4069  // Nothing to do, this is driven by the code generator.
4070  UNUSED(temp);
4071}
4072
4073void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
4074  UNUSED(instruction);
4075  LOG(FATAL) << "Unreachable";
4076}
4077
4078void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
4079  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
4080}
4081
4082void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
4083  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
4084}
4085
4086void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
4087  HBasicBlock* block = instruction->GetBlock();
4088  if (block->GetLoopInformation() != nullptr) {
4089    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
4090    // The back edge will generate the suspend check.
4091    return;
4092  }
4093  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
4094    // The goto will generate the suspend check.
4095    return;
4096  }
4097  GenerateSuspendCheck(instruction, nullptr);
4098}
4099
4100void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
4101                                                       HBasicBlock* successor) {
4102  SuspendCheckSlowPathARM* slow_path =
4103      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
4104  if (slow_path == nullptr) {
4105    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
4106    instruction->SetSlowPath(slow_path);
4107    codegen_->AddSlowPath(slow_path);
4108    if (successor != nullptr) {
4109      DCHECK(successor->IsLoopHeader());
4110      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
4111    }
4112  } else {
4113    DCHECK_EQ(slow_path->GetSuccessor(), successor);
4114  }
4115
4116  __ LoadFromOffset(
4117      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
4118  if (successor == nullptr) {
4119    __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
4120    __ Bind(slow_path->GetReturnLabel());
4121  } else {
4122    __ CompareAndBranchIfZero(IP, codegen_->GetLabelOf(successor));
4123    __ b(slow_path->GetEntryLabel());
4124  }
4125}
4126
4127ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
4128  return codegen_->GetAssembler();
4129}
4130
4131void ParallelMoveResolverARM::EmitMove(size_t index) {
4132  MoveOperands* move = moves_.Get(index);
4133  Location source = move->GetSource();
4134  Location destination = move->GetDestination();
4135
4136  if (source.IsRegister()) {
4137    if (destination.IsRegister()) {
4138      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
4139    } else {
4140      DCHECK(destination.IsStackSlot());
4141      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
4142                       SP, destination.GetStackIndex());
4143    }
4144  } else if (source.IsStackSlot()) {
4145    if (destination.IsRegister()) {
4146      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
4147                        SP, source.GetStackIndex());
4148    } else if (destination.IsFpuRegister()) {
4149      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
4150    } else {
4151      DCHECK(destination.IsStackSlot());
4152      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
4153      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4154    }
4155  } else if (source.IsFpuRegister()) {
4156    if (destination.IsFpuRegister()) {
4157      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
4158    } else {
4159      DCHECK(destination.IsStackSlot());
4160      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
4161    }
4162  } else if (source.IsDoubleStackSlot()) {
4163    if (destination.IsDoubleStackSlot()) {
4164      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
4165      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
4166    } else if (destination.IsRegisterPair()) {
4167      DCHECK(ExpectedPairLayout(destination));
4168      __ LoadFromOffset(
4169          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
4170    } else {
4171      DCHECK(destination.IsFpuRegisterPair()) << destination;
4172      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
4173                         SP,
4174                         source.GetStackIndex());
4175    }
4176  } else if (source.IsRegisterPair()) {
4177    if (destination.IsRegisterPair()) {
4178      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
4179      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
4180    } else {
4181      DCHECK(destination.IsDoubleStackSlot()) << destination;
4182      DCHECK(ExpectedPairLayout(source));
4183      __ StoreToOffset(
4184          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
4185    }
4186  } else if (source.IsFpuRegisterPair()) {
4187    if (destination.IsFpuRegisterPair()) {
4188      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
4189               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
4190    } else {
4191      DCHECK(destination.IsDoubleStackSlot()) << destination;
4192      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
4193                        SP,
4194                        destination.GetStackIndex());
4195    }
4196  } else {
4197    DCHECK(source.IsConstant()) << source;
4198    HConstant* constant = source.GetConstant();
4199    if (constant->IsIntConstant() || constant->IsNullConstant()) {
4200      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
4201      if (destination.IsRegister()) {
4202        __ LoadImmediate(destination.AsRegister<Register>(), value);
4203      } else {
4204        DCHECK(destination.IsStackSlot());
4205        __ LoadImmediate(IP, value);
4206        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4207      }
4208    } else if (constant->IsLongConstant()) {
4209      int64_t value = constant->AsLongConstant()->GetValue();
4210      if (destination.IsRegisterPair()) {
4211        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
4212        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
4213      } else {
4214        DCHECK(destination.IsDoubleStackSlot()) << destination;
4215        __ LoadImmediate(IP, Low32Bits(value));
4216        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4217        __ LoadImmediate(IP, High32Bits(value));
4218        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
4219      }
4220    } else if (constant->IsDoubleConstant()) {
4221      double value = constant->AsDoubleConstant()->GetValue();
4222      if (destination.IsFpuRegisterPair()) {
4223        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
4224      } else {
4225        DCHECK(destination.IsDoubleStackSlot()) << destination;
4226        uint64_t int_value = bit_cast<uint64_t, double>(value);
4227        __ LoadImmediate(IP, Low32Bits(int_value));
4228        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4229        __ LoadImmediate(IP, High32Bits(int_value));
4230        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
4231      }
4232    } else {
4233      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
4234      float value = constant->AsFloatConstant()->GetValue();
4235      if (destination.IsFpuRegister()) {
4236        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
4237      } else {
4238        DCHECK(destination.IsStackSlot());
4239        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
4240        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4241      }
4242    }
4243  }
4244}
4245
4246void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
4247  __ Mov(IP, reg);
4248  __ LoadFromOffset(kLoadWord, reg, SP, mem);
4249  __ StoreToOffset(kStoreWord, IP, SP, mem);
4250}
4251
4252void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
4253  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
4254  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
4255  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
4256                    SP, mem1 + stack_offset);
4257  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
4258  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
4259                   SP, mem2 + stack_offset);
4260  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
4261}
4262
4263void ParallelMoveResolverARM::EmitSwap(size_t index) {
4264  MoveOperands* move = moves_.Get(index);
4265  Location source = move->GetSource();
4266  Location destination = move->GetDestination();
4267
4268  if (source.IsRegister() && destination.IsRegister()) {
4269    DCHECK_NE(source.AsRegister<Register>(), IP);
4270    DCHECK_NE(destination.AsRegister<Register>(), IP);
4271    __ Mov(IP, source.AsRegister<Register>());
4272    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
4273    __ Mov(destination.AsRegister<Register>(), IP);
4274  } else if (source.IsRegister() && destination.IsStackSlot()) {
4275    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
4276  } else if (source.IsStackSlot() && destination.IsRegister()) {
4277    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
4278  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
4279    Exchange(source.GetStackIndex(), destination.GetStackIndex());
4280  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
4281    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
4282    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
4283    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
4284  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
4285    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
4286    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
4287    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
4288    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
4289               destination.AsRegisterPairHigh<Register>(),
4290               DTMP);
4291  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
4292    Register low_reg = source.IsRegisterPair()
4293        ? source.AsRegisterPairLow<Register>()
4294        : destination.AsRegisterPairLow<Register>();
4295    int mem = source.IsRegisterPair()
4296        ? destination.GetStackIndex()
4297        : source.GetStackIndex();
4298    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
4299    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
4300    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
4301    __ StoreDToOffset(DTMP, SP, mem);
4302  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
4303    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
4304    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
4305    __ vmovd(DTMP, first);
4306    __ vmovd(first, second);
4307    __ vmovd(second, DTMP);
4308  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
4309    DRegister reg = source.IsFpuRegisterPair()
4310        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
4311        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
4312    int mem = source.IsFpuRegisterPair()
4313        ? destination.GetStackIndex()
4314        : source.GetStackIndex();
4315    __ vmovd(DTMP, reg);
4316    __ LoadDFromOffset(reg, SP, mem);
4317    __ StoreDToOffset(DTMP, SP, mem);
4318  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
4319    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
4320                                           : destination.AsFpuRegister<SRegister>();
4321    int mem = source.IsFpuRegister()
4322        ? destination.GetStackIndex()
4323        : source.GetStackIndex();
4324
4325    __ vmovrs(IP, reg);
4326    __ LoadSFromOffset(reg, SP, mem);
4327    __ StoreToOffset(kStoreWord, IP, SP, mem);
4328  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
4329    Exchange(source.GetStackIndex(), destination.GetStackIndex());
4330    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
4331  } else {
4332    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
4333  }
4334}
4335
4336void ParallelMoveResolverARM::SpillScratch(int reg) {
4337  __ Push(static_cast<Register>(reg));
4338}
4339
4340void ParallelMoveResolverARM::RestoreScratch(int reg) {
4341  __ Pop(static_cast<Register>(reg));
4342}
4343
4344void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
4345  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
4346      ? LocationSummary::kCallOnSlowPath
4347      : LocationSummary::kNoCall;
4348  LocationSummary* locations =
4349      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
4350  locations->SetInAt(0, Location::RequiresRegister());
4351  locations->SetOut(Location::RequiresRegister());
4352}
4353
4354void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
4355  LocationSummary* locations = cls->GetLocations();
4356  Register out = locations->Out().AsRegister<Register>();
4357  Register current_method = locations->InAt(0).AsRegister<Register>();
4358  if (cls->IsReferrersClass()) {
4359    DCHECK(!cls->CanCallRuntime());
4360    DCHECK(!cls->MustGenerateClinitCheck());
4361    __ LoadFromOffset(
4362        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4363  } else {
4364    DCHECK(cls->CanCallRuntime());
4365    __ LoadFromOffset(kLoadWord,
4366                      out,
4367                      current_method,
4368                      ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
4369    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
4370    // TODO: We will need a read barrier here.
4371
4372    SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4373        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
4374    codegen_->AddSlowPath(slow_path);
4375    __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4376    if (cls->MustGenerateClinitCheck()) {
4377      GenerateClassInitializationCheck(slow_path, out);
4378    } else {
4379      __ Bind(slow_path->GetExitLabel());
4380    }
4381  }
4382}
4383
4384void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
4385  LocationSummary* locations =
4386      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
4387  locations->SetInAt(0, Location::RequiresRegister());
4388  if (check->HasUses()) {
4389    locations->SetOut(Location::SameAsFirstInput());
4390  }
4391}
4392
4393void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
4394  // We assume the class is not null.
4395  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4396      check->GetLoadClass(), check, check->GetDexPc(), true);
4397  codegen_->AddSlowPath(slow_path);
4398  GenerateClassInitializationCheck(slow_path,
4399                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4400}
4401
4402void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4403    SlowPathCodeARM* slow_path, Register class_reg) {
4404  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4405  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4406  __ b(slow_path->GetEntryLabel(), LT);
4407  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4408  // properly. Therefore, we do a memory fence.
4409  __ dmb(ISH);
4410  __ Bind(slow_path->GetExitLabel());
4411}
4412
4413void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4414  LocationSummary* locations =
4415      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4416  locations->SetInAt(0, Location::RequiresRegister());
4417  locations->SetOut(Location::RequiresRegister());
4418}
4419
4420void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4421  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4422  codegen_->AddSlowPath(slow_path);
4423
4424  LocationSummary* locations = load->GetLocations();
4425  Register out = locations->Out().AsRegister<Register>();
4426  Register current_method = locations->InAt(0).AsRegister<Register>();
4427  __ LoadFromOffset(
4428      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4429  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4430  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4431  // TODO: We will need a read barrier here.
4432  __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4433  __ Bind(slow_path->GetExitLabel());
4434}
4435
4436static int32_t GetExceptionTlsOffset() {
4437  return Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4438}
4439
4440void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4441  LocationSummary* locations =
4442      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4443  locations->SetOut(Location::RequiresRegister());
4444}
4445
4446void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4447  Register out = load->GetLocations()->Out().AsRegister<Register>();
4448  __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
4449}
4450
4451void LocationsBuilderARM::VisitClearException(HClearException* clear) {
4452  new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
4453}
4454
4455void InstructionCodeGeneratorARM::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
4456  __ LoadImmediate(IP, 0);
4457  __ StoreToOffset(kStoreWord, IP, TR, GetExceptionTlsOffset());
4458}
4459
4460void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4461  LocationSummary* locations =
4462      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4463  InvokeRuntimeCallingConvention calling_convention;
4464  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4465}
4466
4467void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4468  codegen_->InvokeRuntime(
4469      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4470}
4471
4472void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4473  LocationSummary::CallKind call_kind = instruction->IsClassFinal()
4474      ? LocationSummary::kNoCall
4475      : LocationSummary::kCallOnSlowPath;
4476  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4477  locations->SetInAt(0, Location::RequiresRegister());
4478  locations->SetInAt(1, Location::RequiresRegister());
4479  // The out register is used as a temporary, so it overlaps with the inputs.
4480  // Note that TypeCheckSlowPathARM uses this register too.
4481  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4482}
4483
4484void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4485  LocationSummary* locations = instruction->GetLocations();
4486  Register obj = locations->InAt(0).AsRegister<Register>();
4487  Register cls = locations->InAt(1).AsRegister<Register>();
4488  Register out = locations->Out().AsRegister<Register>();
4489  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4490  Label done, zero;
4491  SlowPathCodeARM* slow_path = nullptr;
4492
4493  // Return 0 if `obj` is null.
4494  // avoid null check if we know obj is not null.
4495  if (instruction->MustDoNullCheck()) {
4496    __ CompareAndBranchIfZero(obj, &zero);
4497  }
4498  // Compare the class of `obj` with `cls`.
4499  __ LoadFromOffset(kLoadWord, out, obj, class_offset);
4500  __ MaybeUnpoisonHeapReference(out);
4501  __ cmp(out, ShifterOperand(cls));
4502  if (instruction->IsClassFinal()) {
4503    // Classes must be equal for the instanceof to succeed.
4504    __ b(&zero, NE);
4505    __ LoadImmediate(out, 1);
4506    __ b(&done);
4507  } else {
4508    // If the classes are not equal, we go into a slow path.
4509    DCHECK(locations->OnlyCallsOnSlowPath());
4510    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction);
4511    codegen_->AddSlowPath(slow_path);
4512    __ b(slow_path->GetEntryLabel(), NE);
4513    __ LoadImmediate(out, 1);
4514    __ b(&done);
4515  }
4516
4517  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
4518    __ Bind(&zero);
4519    __ LoadImmediate(out, 0);
4520  }
4521
4522  if (slow_path != nullptr) {
4523    __ Bind(slow_path->GetExitLabel());
4524  }
4525  __ Bind(&done);
4526}
4527
4528void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4529  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4530      instruction, LocationSummary::kCallOnSlowPath);
4531  locations->SetInAt(0, Location::RequiresRegister());
4532  locations->SetInAt(1, Location::RequiresRegister());
4533  // Note that TypeCheckSlowPathARM uses this register too.
4534  locations->AddTemp(Location::RequiresRegister());
4535}
4536
4537void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4538  LocationSummary* locations = instruction->GetLocations();
4539  Register obj = locations->InAt(0).AsRegister<Register>();
4540  Register cls = locations->InAt(1).AsRegister<Register>();
4541  Register temp = locations->GetTemp(0).AsRegister<Register>();
4542  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4543
4544  SlowPathCodeARM* slow_path =
4545      new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction);
4546  codegen_->AddSlowPath(slow_path);
4547
4548  // avoid null check if we know obj is not null.
4549  if (instruction->MustDoNullCheck()) {
4550    __ CompareAndBranchIfZero(obj, slow_path->GetExitLabel());
4551  }
4552  // Compare the class of `obj` with `cls`.
4553  __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4554  __ MaybeUnpoisonHeapReference(temp);
4555  __ cmp(temp, ShifterOperand(cls));
4556  // The checkcast succeeds if the classes are equal (fast path).
4557  // Otherwise, we need to go into the slow path to check the types.
4558  __ b(slow_path->GetEntryLabel(), NE);
4559  __ Bind(slow_path->GetExitLabel());
4560}
4561
4562void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4563  LocationSummary* locations =
4564      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4565  InvokeRuntimeCallingConvention calling_convention;
4566  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4567}
4568
4569void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4570  codegen_->InvokeRuntime(instruction->IsEnter()
4571        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4572      instruction,
4573      instruction->GetDexPc(),
4574      nullptr);
4575}
4576
4577void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
4578void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
4579void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4580
4581void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4582  LocationSummary* locations =
4583      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4584  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4585         || instruction->GetResultType() == Primitive::kPrimLong);
4586  locations->SetInAt(0, Location::RequiresRegister());
4587  locations->SetInAt(1, Location::RequiresRegister());
4588  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4589}
4590
4591void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4592  HandleBitwiseOperation(instruction);
4593}
4594
4595void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4596  HandleBitwiseOperation(instruction);
4597}
4598
4599void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4600  HandleBitwiseOperation(instruction);
4601}
4602
4603void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4604  LocationSummary* locations = instruction->GetLocations();
4605
4606  if (instruction->GetResultType() == Primitive::kPrimInt) {
4607    Register first = locations->InAt(0).AsRegister<Register>();
4608    Register second = locations->InAt(1).AsRegister<Register>();
4609    Register out = locations->Out().AsRegister<Register>();
4610    if (instruction->IsAnd()) {
4611      __ and_(out, first, ShifterOperand(second));
4612    } else if (instruction->IsOr()) {
4613      __ orr(out, first, ShifterOperand(second));
4614    } else {
4615      DCHECK(instruction->IsXor());
4616      __ eor(out, first, ShifterOperand(second));
4617    }
4618  } else {
4619    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4620    Location first = locations->InAt(0);
4621    Location second = locations->InAt(1);
4622    Location out = locations->Out();
4623    if (instruction->IsAnd()) {
4624      __ and_(out.AsRegisterPairLow<Register>(),
4625              first.AsRegisterPairLow<Register>(),
4626              ShifterOperand(second.AsRegisterPairLow<Register>()));
4627      __ and_(out.AsRegisterPairHigh<Register>(),
4628              first.AsRegisterPairHigh<Register>(),
4629              ShifterOperand(second.AsRegisterPairHigh<Register>()));
4630    } else if (instruction->IsOr()) {
4631      __ orr(out.AsRegisterPairLow<Register>(),
4632             first.AsRegisterPairLow<Register>(),
4633             ShifterOperand(second.AsRegisterPairLow<Register>()));
4634      __ orr(out.AsRegisterPairHigh<Register>(),
4635             first.AsRegisterPairHigh<Register>(),
4636             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4637    } else {
4638      DCHECK(instruction->IsXor());
4639      __ eor(out.AsRegisterPairLow<Register>(),
4640             first.AsRegisterPairLow<Register>(),
4641             ShifterOperand(second.AsRegisterPairLow<Register>()));
4642      __ eor(out.AsRegisterPairHigh<Register>(),
4643             first.AsRegisterPairHigh<Register>(),
4644             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4645    }
4646  }
4647}
4648
4649void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
4650  // For better instruction scheduling we load the direct code pointer before the method pointer.
4651  bool direct_code_loaded = false;
4652  switch (invoke->GetCodePtrLocation()) {
4653    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
4654      if (IsSameDexFile(*invoke->GetTargetMethod().dex_file, GetGraph()->GetDexFile())) {
4655        break;
4656      }
4657      // Calls across dex files are more likely to exceed the available BL range,
4658      // so use absolute patch by falling through to kDirectCodeFixup.
4659      FALLTHROUGH_INTENDED;
4660    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
4661      // LR = code address from literal pool with link-time patch.
4662      __ LoadLiteral(LR, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
4663      direct_code_loaded = true;
4664      break;
4665    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
4666      // LR = invoke->GetDirectCodePtr();
4667      __ LoadImmediate(LR, invoke->GetDirectCodePtr());
4668      direct_code_loaded = true;
4669      break;
4670    default:
4671      break;
4672  }
4673
4674  Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
4675  switch (invoke->GetMethodLoadKind()) {
4676    case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
4677      // temp = thread->string_init_entrypoint
4678      __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, invoke->GetStringInitOffset());
4679      break;
4680    case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
4681      callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
4682      break;
4683    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
4684      __ LoadImmediate(temp.AsRegister<Register>(), invoke->GetMethodAddress());
4685      break;
4686    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
4687      __ LoadLiteral(temp.AsRegister<Register>(),
4688                     DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
4689      break;
4690    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
4691      // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
4692      FALLTHROUGH_INTENDED;
4693    case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
4694      Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
4695      Register method_reg;
4696      Register reg = temp.AsRegister<Register>();
4697      if (current_method.IsRegister()) {
4698        method_reg = current_method.AsRegister<Register>();
4699      } else {
4700        DCHECK(invoke->GetLocations()->Intrinsified());
4701        DCHECK(!current_method.IsValid());
4702        method_reg = reg;
4703        __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
4704      }
4705      // temp = current_method->dex_cache_resolved_methods_;
4706      __ LoadFromOffset(
4707          kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset(
4708              kArmPointerSize).Int32Value());
4709      // temp = temp[index_in_cache]
4710      uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
4711      __ LoadFromOffset(kLoadWord, reg, reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
4712      break;
4713    }
4714  }
4715
4716  switch (invoke->GetCodePtrLocation()) {
4717    case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
4718      __ bl(GetFrameEntryLabel());
4719      break;
4720    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
4721      if (!direct_code_loaded) {
4722        relative_call_patches_.emplace_back(invoke->GetTargetMethod());
4723        __ Bind(&relative_call_patches_.back().label);
4724        Label label;
4725        __ bl(&label);  // Arbitrarily branch to the instruction after BL, override at link time.
4726        __ Bind(&label);
4727        break;
4728      }
4729      // If we loaded the direct code above, fall through.
4730      FALLTHROUGH_INTENDED;
4731    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
4732    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
4733      // LR prepared above for better instruction scheduling.
4734      DCHECK(direct_code_loaded);
4735      // LR()
4736      __ blx(LR);
4737      break;
4738    case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
4739      // LR = callee_method->entry_point_from_quick_compiled_code_
4740      __ LoadFromOffset(
4741          kLoadWord, LR, callee_method.AsRegister<Register>(),
4742          ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value());
4743      // LR()
4744      __ blx(LR);
4745      break;
4746  }
4747
4748  DCHECK(!IsLeafMethod());
4749}
4750
4751void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
4752  Register temp = temp_location.AsRegister<Register>();
4753  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
4754      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
4755  LocationSummary* locations = invoke->GetLocations();
4756  Location receiver = locations->InAt(0);
4757  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4758  // temp = object->GetClass();
4759  DCHECK(receiver.IsRegister());
4760  __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
4761  MaybeRecordImplicitNullCheck(invoke);
4762  __ MaybeUnpoisonHeapReference(temp);
4763  // temp = temp->GetMethodAt(method_offset);
4764  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4765      kArmWordSize).Int32Value();
4766  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
4767  // LR = temp->GetEntryPoint();
4768  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
4769  // LR();
4770  __ blx(LR);
4771}
4772
4773void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
4774  DCHECK(linker_patches->empty());
4775  size_t size = method_patches_.size() + call_patches_.size() + relative_call_patches_.size();
4776  linker_patches->reserve(size);
4777  for (const auto& entry : method_patches_) {
4778    const MethodReference& target_method = entry.first;
4779    Literal* literal = entry.second;
4780    DCHECK(literal->GetLabel()->IsBound());
4781    uint32_t literal_offset = literal->GetLabel()->Position();
4782    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
4783                                                       target_method.dex_file,
4784                                                       target_method.dex_method_index));
4785  }
4786  for (const auto& entry : call_patches_) {
4787    const MethodReference& target_method = entry.first;
4788    Literal* literal = entry.second;
4789    DCHECK(literal->GetLabel()->IsBound());
4790    uint32_t literal_offset = literal->GetLabel()->Position();
4791    linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
4792                                                     target_method.dex_file,
4793                                                     target_method.dex_method_index));
4794  }
4795  for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
4796    uint32_t literal_offset = info.label.Position();
4797    linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
4798                                                             info.target_method.dex_file,
4799                                                             info.target_method.dex_method_index));
4800  }
4801}
4802
4803Literal* CodeGeneratorARM::DeduplicateMethodLiteral(MethodReference target_method,
4804                                                    MethodToLiteralMap* map) {
4805  // Look up the literal for target_method.
4806  auto lb = map->lower_bound(target_method);
4807  if (lb != map->end() && !map->key_comp()(target_method, lb->first)) {
4808    return lb->second;
4809  }
4810  // We don't have a literal for this method yet, insert a new one.
4811  Literal* literal = __ NewLiteral<uint32_t>(0u);
4812  map->PutBefore(lb, target_method, literal);
4813  return literal;
4814}
4815
4816Literal* CodeGeneratorARM::DeduplicateMethodAddressLiteral(MethodReference target_method) {
4817  return DeduplicateMethodLiteral(target_method, &method_patches_);
4818}
4819
4820Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_method) {
4821  return DeduplicateMethodLiteral(target_method, &call_patches_);
4822}
4823
4824void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4825  // Nothing to do, this should be removed during prepare for register allocator.
4826  UNUSED(instruction);
4827  LOG(FATAL) << "Unreachable";
4828}
4829
4830void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4831  // Nothing to do, this should be removed during prepare for register allocator.
4832  UNUSED(instruction);
4833  LOG(FATAL) << "Unreachable";
4834}
4835
4836void LocationsBuilderARM::VisitFakeString(HFakeString* instruction) {
4837  DCHECK(codegen_->IsBaseline());
4838  LocationSummary* locations =
4839      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4840  locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
4841}
4842
4843void InstructionCodeGeneratorARM::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
4844  DCHECK(codegen_->IsBaseline());
4845  // Will be generated at use site.
4846}
4847
4848#undef __
4849#undef QUICK_ENTRY_POINT
4850
4851}  // namespace arm
4852}  // namespace art
4853