code_generator_arm.cc revision 38207af82afb6f99c687f64b15601ed20d82220a
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "gc/accounting/card_table.h"
24#include "intrinsics.h"
25#include "intrinsics_arm.h"
26#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "thread.h"
29#include "utils/arm/assembler_arm.h"
30#include "utils/arm/managed_register_arm.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35
36namespace arm {
37
38static bool ExpectedPairLayout(Location location) {
39  // We expected this for both core and fpu register pairs.
40  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
41}
42
43static constexpr int kCurrentMethodStackOffset = 0;
44static constexpr Register kMethodRegisterArgument = R0;
45
46// We unconditionally allocate R5 to ensure we can do long operations
47// with baseline.
48static constexpr Register kCoreSavedRegisterForBaseline = R5;
49static constexpr Register kCoreCalleeSaves[] =
50    { R5, R6, R7, R8, R10, R11, PC };
51static constexpr SRegister kFpuCalleeSaves[] =
52    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
53
54// D31 cannot be split into two S registers, and the register allocator only works on
55// S registers. Therefore there is no need to block it.
56static constexpr DRegister DTMP = D31;
57
58#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
59#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
60
61class NullCheckSlowPathARM : public SlowPathCodeARM {
62 public:
63  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
64
65  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
66    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
67    __ Bind(GetEntryLabel());
68    arm_codegen->InvokeRuntime(
69        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
70  }
71
72 private:
73  HNullCheck* const instruction_;
74  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
75};
76
77class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
78 public:
79  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
80
81  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
82    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
83    __ Bind(GetEntryLabel());
84    arm_codegen->InvokeRuntime(
85        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
86  }
87
88 private:
89  HDivZeroCheck* const instruction_;
90  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
91};
92
93class SuspendCheckSlowPathARM : public SlowPathCodeARM {
94 public:
95  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
96      : instruction_(instruction), successor_(successor) {}
97
98  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
99    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
100    __ Bind(GetEntryLabel());
101    SaveLiveRegisters(codegen, instruction_->GetLocations());
102    arm_codegen->InvokeRuntime(
103        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
104    RestoreLiveRegisters(codegen, instruction_->GetLocations());
105    if (successor_ == nullptr) {
106      __ b(GetReturnLabel());
107    } else {
108      __ b(arm_codegen->GetLabelOf(successor_));
109    }
110  }
111
112  Label* GetReturnLabel() {
113    DCHECK(successor_ == nullptr);
114    return &return_label_;
115  }
116
117  HBasicBlock* GetSuccessor() const {
118    return successor_;
119  }
120
121 private:
122  HSuspendCheck* const instruction_;
123  // If not null, the block to branch to after the suspend check.
124  HBasicBlock* const successor_;
125
126  // If `successor_` is null, the label to branch to after the suspend check.
127  Label return_label_;
128
129  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
130};
131
132class BoundsCheckSlowPathARM : public SlowPathCodeARM {
133 public:
134  BoundsCheckSlowPathARM(HBoundsCheck* instruction,
135                         Location index_location,
136                         Location length_location)
137      : instruction_(instruction),
138        index_location_(index_location),
139        length_location_(length_location) {}
140
141  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
142    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
143    __ Bind(GetEntryLabel());
144    // We're moving two locations to locations that could overlap, so we need a parallel
145    // move resolver.
146    InvokeRuntimeCallingConvention calling_convention;
147    codegen->EmitParallelMoves(
148        index_location_,
149        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
150        Primitive::kPrimInt,
151        length_location_,
152        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
153        Primitive::kPrimInt);
154    arm_codegen->InvokeRuntime(
155        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
156  }
157
158 private:
159  HBoundsCheck* const instruction_;
160  const Location index_location_;
161  const Location length_location_;
162
163  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
164};
165
166class LoadClassSlowPathARM : public SlowPathCodeARM {
167 public:
168  LoadClassSlowPathARM(HLoadClass* cls,
169                       HInstruction* at,
170                       uint32_t dex_pc,
171                       bool do_clinit)
172      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
173    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
174  }
175
176  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
177    LocationSummary* locations = at_->GetLocations();
178
179    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
180    __ Bind(GetEntryLabel());
181    SaveLiveRegisters(codegen, locations);
182
183    InvokeRuntimeCallingConvention calling_convention;
184    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
185    int32_t entry_point_offset = do_clinit_
186        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
187        : QUICK_ENTRY_POINT(pInitializeType);
188    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
189
190    // Move the class to the desired location.
191    Location out = locations->Out();
192    if (out.IsValid()) {
193      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
194      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
195    }
196    RestoreLiveRegisters(codegen, locations);
197    __ b(GetExitLabel());
198  }
199
200 private:
201  // The class this slow path will load.
202  HLoadClass* const cls_;
203
204  // The instruction where this slow path is happening.
205  // (Might be the load class or an initialization check).
206  HInstruction* const at_;
207
208  // The dex PC of `at_`.
209  const uint32_t dex_pc_;
210
211  // Whether to initialize the class.
212  const bool do_clinit_;
213
214  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
215};
216
217class LoadStringSlowPathARM : public SlowPathCodeARM {
218 public:
219  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
220
221  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
222    LocationSummary* locations = instruction_->GetLocations();
223    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
224
225    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
226    __ Bind(GetEntryLabel());
227    SaveLiveRegisters(codegen, locations);
228
229    InvokeRuntimeCallingConvention calling_convention;
230    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
231    arm_codegen->InvokeRuntime(
232        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
233    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
234
235    RestoreLiveRegisters(codegen, locations);
236    __ b(GetExitLabel());
237  }
238
239 private:
240  HLoadString* const instruction_;
241
242  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
243};
244
245class TypeCheckSlowPathARM : public SlowPathCodeARM {
246 public:
247  TypeCheckSlowPathARM(HInstruction* instruction,
248                       Location class_to_check,
249                       Location object_class,
250                       uint32_t dex_pc)
251      : instruction_(instruction),
252        class_to_check_(class_to_check),
253        object_class_(object_class),
254        dex_pc_(dex_pc) {}
255
256  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
257    LocationSummary* locations = instruction_->GetLocations();
258    DCHECK(instruction_->IsCheckCast()
259           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
260
261    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
262    __ Bind(GetEntryLabel());
263    SaveLiveRegisters(codegen, locations);
264
265    // We're moving two locations to locations that could overlap, so we need a parallel
266    // move resolver.
267    InvokeRuntimeCallingConvention calling_convention;
268    codegen->EmitParallelMoves(
269        class_to_check_,
270        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
271        Primitive::kPrimNot,
272        object_class_,
273        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
274        Primitive::kPrimNot);
275
276    if (instruction_->IsInstanceOf()) {
277      arm_codegen->InvokeRuntime(
278          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
279      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
280    } else {
281      DCHECK(instruction_->IsCheckCast());
282      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
283    }
284
285    RestoreLiveRegisters(codegen, locations);
286    __ b(GetExitLabel());
287  }
288
289 private:
290  HInstruction* const instruction_;
291  const Location class_to_check_;
292  const Location object_class_;
293  uint32_t dex_pc_;
294
295  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
296};
297
298class DeoptimizationSlowPathARM : public SlowPathCodeARM {
299 public:
300  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
301    : instruction_(instruction) {}
302
303  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
304    __ Bind(GetEntryLabel());
305    SaveLiveRegisters(codegen, instruction_->GetLocations());
306    DCHECK(instruction_->IsDeoptimize());
307    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
308    uint32_t dex_pc = deoptimize->GetDexPc();
309    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
310    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
311  }
312
313 private:
314  HInstruction* const instruction_;
315  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
316};
317
318#undef __
319
320#undef __
321#define __ down_cast<ArmAssembler*>(GetAssembler())->
322
323inline Condition ARMCondition(IfCondition cond) {
324  switch (cond) {
325    case kCondEQ: return EQ;
326    case kCondNE: return NE;
327    case kCondLT: return LT;
328    case kCondLE: return LE;
329    case kCondGT: return GT;
330    case kCondGE: return GE;
331    default:
332      LOG(FATAL) << "Unknown if condition";
333  }
334  return EQ;        // Unreachable.
335}
336
337inline Condition ARMOppositeCondition(IfCondition cond) {
338  switch (cond) {
339    case kCondEQ: return NE;
340    case kCondNE: return EQ;
341    case kCondLT: return GE;
342    case kCondLE: return GT;
343    case kCondGT: return LE;
344    case kCondGE: return LT;
345    default:
346      LOG(FATAL) << "Unknown if condition";
347  }
348  return EQ;        // Unreachable.
349}
350
351void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
352  stream << Register(reg);
353}
354
355void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
356  stream << SRegister(reg);
357}
358
359size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
360  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
361  return kArmWordSize;
362}
363
364size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
365  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
366  return kArmWordSize;
367}
368
369size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
370  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
371  return kArmWordSize;
372}
373
374size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
375  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
376  return kArmWordSize;
377}
378
379CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
380                                   const ArmInstructionSetFeatures& isa_features,
381                                   const CompilerOptions& compiler_options)
382    : CodeGenerator(graph,
383                    kNumberOfCoreRegisters,
384                    kNumberOfSRegisters,
385                    kNumberOfRegisterPairs,
386                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
387                                        arraysize(kCoreCalleeSaves)),
388                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
389                                        arraysize(kFpuCalleeSaves)),
390                    compiler_options),
391      block_labels_(graph->GetArena(), 0),
392      location_builder_(graph, this),
393      instruction_visitor_(graph, this),
394      move_resolver_(graph->GetArena(), this),
395      assembler_(false /* can_relocate_branches */),
396      isa_features_(isa_features) {
397  // Save the PC register to mimic Quick.
398  AddAllocatedRegister(Location::RegisterLocation(PC));
399}
400
401Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
402  switch (type) {
403    case Primitive::kPrimLong: {
404      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
405      ArmManagedRegister pair =
406          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
407      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
408      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
409
410      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
411      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
412      UpdateBlockedPairRegisters();
413      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
414    }
415
416    case Primitive::kPrimByte:
417    case Primitive::kPrimBoolean:
418    case Primitive::kPrimChar:
419    case Primitive::kPrimShort:
420    case Primitive::kPrimInt:
421    case Primitive::kPrimNot: {
422      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
423      // Block all register pairs that contain `reg`.
424      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
425        ArmManagedRegister current =
426            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
427        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
428          blocked_register_pairs_[i] = true;
429        }
430      }
431      return Location::RegisterLocation(reg);
432    }
433
434    case Primitive::kPrimFloat: {
435      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
436      return Location::FpuRegisterLocation(reg);
437    }
438
439    case Primitive::kPrimDouble: {
440      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
441      DCHECK_EQ(reg % 2, 0);
442      return Location::FpuRegisterPairLocation(reg, reg + 1);
443    }
444
445    case Primitive::kPrimVoid:
446      LOG(FATAL) << "Unreachable type " << type;
447  }
448
449  return Location();
450}
451
452void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
453  // Don't allocate the dalvik style register pair passing.
454  blocked_register_pairs_[R1_R2] = true;
455
456  // Stack register, LR and PC are always reserved.
457  blocked_core_registers_[SP] = true;
458  blocked_core_registers_[LR] = true;
459  blocked_core_registers_[PC] = true;
460
461  // Reserve thread register.
462  blocked_core_registers_[TR] = true;
463
464  // Reserve temp register.
465  blocked_core_registers_[IP] = true;
466
467  if (is_baseline) {
468    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
469      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
470    }
471
472    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
473
474    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
475      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
476    }
477  }
478
479  UpdateBlockedPairRegisters();
480}
481
482void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
483  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
484    ArmManagedRegister current =
485        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
486    if (blocked_core_registers_[current.AsRegisterPairLow()]
487        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
488      blocked_register_pairs_[i] = true;
489    }
490  }
491}
492
493InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
494      : HGraphVisitor(graph),
495        assembler_(codegen->GetAssembler()),
496        codegen_(codegen) {}
497
498void CodeGeneratorARM::ComputeSpillMask() {
499  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
500  // Save one extra register for baseline. Note that on thumb2, there is no easy
501  // instruction to restore just the PC, so this actually helps both baseline
502  // and non-baseline to save and restore at least two registers at entry and exit.
503  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
504  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
505  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
506  // We use vpush and vpop for saving and restoring floating point registers, which take
507  // a SRegister and the number of registers to save/restore after that SRegister. We
508  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
509  // but in the range.
510  if (fpu_spill_mask_ != 0) {
511    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
512    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
513    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
514      fpu_spill_mask_ |= (1 << i);
515    }
516  }
517}
518
519static dwarf::Reg DWARFReg(Register reg) {
520  return dwarf::Reg::ArmCore(static_cast<int>(reg));
521}
522
523static dwarf::Reg DWARFReg(SRegister reg) {
524  return dwarf::Reg::ArmFp(static_cast<int>(reg));
525}
526
527void CodeGeneratorARM::GenerateFrameEntry() {
528  bool skip_overflow_check =
529      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
530  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
531  __ Bind(&frame_entry_label_);
532
533  if (HasEmptyFrame()) {
534    return;
535  }
536
537  if (!skip_overflow_check) {
538    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
539    __ LoadFromOffset(kLoadWord, IP, IP, 0);
540    RecordPcInfo(nullptr, 0);
541  }
542
543  // PC is in the list of callee-save to mimic Quick, but we need to push
544  // LR at entry instead.
545  uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
546  __ PushList(push_mask);
547  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
548  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
549  if (fpu_spill_mask_ != 0) {
550    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
551    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
552    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
553    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
554  }
555  int adjust = GetFrameSize() - FrameEntrySpillSize();
556  __ AddConstant(SP, -adjust);
557  __ cfi().AdjustCFAOffset(adjust);
558  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
559}
560
561void CodeGeneratorARM::GenerateFrameExit() {
562  if (HasEmptyFrame()) {
563    __ bx(LR);
564    return;
565  }
566  __ cfi().RememberState();
567  int adjust = GetFrameSize() - FrameEntrySpillSize();
568  __ AddConstant(SP, adjust);
569  __ cfi().AdjustCFAOffset(-adjust);
570  if (fpu_spill_mask_ != 0) {
571    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
572    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
573    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
574    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
575  }
576  __ PopList(core_spill_mask_);
577  __ cfi().RestoreState();
578  __ cfi().DefCFAOffset(GetFrameSize());
579}
580
581void CodeGeneratorARM::Bind(HBasicBlock* block) {
582  __ Bind(GetLabelOf(block));
583}
584
585Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
586  switch (load->GetType()) {
587    case Primitive::kPrimLong:
588    case Primitive::kPrimDouble:
589      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
590
591    case Primitive::kPrimInt:
592    case Primitive::kPrimNot:
593    case Primitive::kPrimFloat:
594      return Location::StackSlot(GetStackSlot(load->GetLocal()));
595
596    case Primitive::kPrimBoolean:
597    case Primitive::kPrimByte:
598    case Primitive::kPrimChar:
599    case Primitive::kPrimShort:
600    case Primitive::kPrimVoid:
601      LOG(FATAL) << "Unexpected type " << load->GetType();
602      UNREACHABLE();
603  }
604
605  LOG(FATAL) << "Unreachable";
606  UNREACHABLE();
607}
608
609Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
610  switch (type) {
611    case Primitive::kPrimBoolean:
612    case Primitive::kPrimByte:
613    case Primitive::kPrimChar:
614    case Primitive::kPrimShort:
615    case Primitive::kPrimInt:
616    case Primitive::kPrimNot: {
617      uint32_t index = gp_index_++;
618      uint32_t stack_index = stack_index_++;
619      if (index < calling_convention.GetNumberOfRegisters()) {
620        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
621      } else {
622        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
623      }
624    }
625
626    case Primitive::kPrimLong: {
627      uint32_t index = gp_index_;
628      uint32_t stack_index = stack_index_;
629      gp_index_ += 2;
630      stack_index_ += 2;
631      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
632        if (calling_convention.GetRegisterAt(index) == R1) {
633          // Skip R1, and use R2_R3 instead.
634          gp_index_++;
635          index++;
636        }
637      }
638      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
639        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
640                  calling_convention.GetRegisterAt(index + 1));
641        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
642                                              calling_convention.GetRegisterAt(index + 1));
643      } else {
644        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
645      }
646    }
647
648    case Primitive::kPrimFloat: {
649      uint32_t stack_index = stack_index_++;
650      if (float_index_ % 2 == 0) {
651        float_index_ = std::max(double_index_, float_index_);
652      }
653      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
654        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
655      } else {
656        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
657      }
658    }
659
660    case Primitive::kPrimDouble: {
661      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
662      uint32_t stack_index = stack_index_;
663      stack_index_ += 2;
664      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
665        uint32_t index = double_index_;
666        double_index_ += 2;
667        Location result = Location::FpuRegisterPairLocation(
668          calling_convention.GetFpuRegisterAt(index),
669          calling_convention.GetFpuRegisterAt(index + 1));
670        DCHECK(ExpectedPairLayout(result));
671        return result;
672      } else {
673        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
674      }
675    }
676
677    case Primitive::kPrimVoid:
678      LOG(FATAL) << "Unexpected parameter type " << type;
679      break;
680  }
681  return Location();
682}
683
684Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
685  switch (type) {
686    case Primitive::kPrimBoolean:
687    case Primitive::kPrimByte:
688    case Primitive::kPrimChar:
689    case Primitive::kPrimShort:
690    case Primitive::kPrimInt:
691    case Primitive::kPrimNot: {
692      return Location::RegisterLocation(R0);
693    }
694
695    case Primitive::kPrimFloat: {
696      return Location::FpuRegisterLocation(S0);
697    }
698
699    case Primitive::kPrimLong: {
700      return Location::RegisterPairLocation(R0, R1);
701    }
702
703    case Primitive::kPrimDouble: {
704      return Location::FpuRegisterPairLocation(S0, S1);
705    }
706
707    case Primitive::kPrimVoid:
708      return Location();
709  }
710
711  UNREACHABLE();
712}
713
714Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
715  return Location::RegisterLocation(kMethodRegisterArgument);
716}
717
718void CodeGeneratorARM::Move32(Location destination, Location source) {
719  if (source.Equals(destination)) {
720    return;
721  }
722  if (destination.IsRegister()) {
723    if (source.IsRegister()) {
724      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
725    } else if (source.IsFpuRegister()) {
726      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
727    } else {
728      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
729    }
730  } else if (destination.IsFpuRegister()) {
731    if (source.IsRegister()) {
732      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
733    } else if (source.IsFpuRegister()) {
734      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
735    } else {
736      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
737    }
738  } else {
739    DCHECK(destination.IsStackSlot()) << destination;
740    if (source.IsRegister()) {
741      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
742    } else if (source.IsFpuRegister()) {
743      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
744    } else {
745      DCHECK(source.IsStackSlot()) << source;
746      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
747      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
748    }
749  }
750}
751
752void CodeGeneratorARM::Move64(Location destination, Location source) {
753  if (source.Equals(destination)) {
754    return;
755  }
756  if (destination.IsRegisterPair()) {
757    if (source.IsRegisterPair()) {
758      EmitParallelMoves(
759          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
760          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
761          Primitive::kPrimInt,
762          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
763          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
764          Primitive::kPrimInt);
765    } else if (source.IsFpuRegister()) {
766      UNIMPLEMENTED(FATAL);
767    } else {
768      DCHECK(source.IsDoubleStackSlot());
769      DCHECK(ExpectedPairLayout(destination));
770      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
771                        SP, source.GetStackIndex());
772    }
773  } else if (destination.IsFpuRegisterPair()) {
774    if (source.IsDoubleStackSlot()) {
775      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
776                         SP,
777                         source.GetStackIndex());
778    } else {
779      UNIMPLEMENTED(FATAL);
780    }
781  } else {
782    DCHECK(destination.IsDoubleStackSlot());
783    if (source.IsRegisterPair()) {
784      // No conflict possible, so just do the moves.
785      if (source.AsRegisterPairLow<Register>() == R1) {
786        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
787        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
788        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
789      } else {
790        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
791                         SP, destination.GetStackIndex());
792      }
793    } else if (source.IsFpuRegisterPair()) {
794      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
795                        SP,
796                        destination.GetStackIndex());
797    } else {
798      DCHECK(source.IsDoubleStackSlot());
799      EmitParallelMoves(
800          Location::StackSlot(source.GetStackIndex()),
801          Location::StackSlot(destination.GetStackIndex()),
802          Primitive::kPrimInt,
803          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
804          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
805          Primitive::kPrimInt);
806    }
807  }
808}
809
810void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
811  LocationSummary* locations = instruction->GetLocations();
812  if (instruction->IsCurrentMethod()) {
813    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
814  } else if (locations != nullptr && locations->Out().Equals(location)) {
815    return;
816  } else if (locations != nullptr && locations->Out().IsConstant()) {
817    HConstant* const_to_move = locations->Out().GetConstant();
818    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
819      int32_t value = GetInt32ValueOf(const_to_move);
820      if (location.IsRegister()) {
821        __ LoadImmediate(location.AsRegister<Register>(), value);
822      } else {
823        DCHECK(location.IsStackSlot());
824        __ LoadImmediate(IP, value);
825        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
826      }
827    } else {
828      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
829      int64_t value = const_to_move->AsLongConstant()->GetValue();
830      if (location.IsRegisterPair()) {
831        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
832        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
833      } else {
834        DCHECK(location.IsDoubleStackSlot());
835        __ LoadImmediate(IP, Low32Bits(value));
836        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
837        __ LoadImmediate(IP, High32Bits(value));
838        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
839      }
840    }
841  } else if (instruction->IsLoadLocal()) {
842    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
843    switch (instruction->GetType()) {
844      case Primitive::kPrimBoolean:
845      case Primitive::kPrimByte:
846      case Primitive::kPrimChar:
847      case Primitive::kPrimShort:
848      case Primitive::kPrimInt:
849      case Primitive::kPrimNot:
850      case Primitive::kPrimFloat:
851        Move32(location, Location::StackSlot(stack_slot));
852        break;
853
854      case Primitive::kPrimLong:
855      case Primitive::kPrimDouble:
856        Move64(location, Location::DoubleStackSlot(stack_slot));
857        break;
858
859      default:
860        LOG(FATAL) << "Unexpected type " << instruction->GetType();
861    }
862  } else if (instruction->IsTemporary()) {
863    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
864    if (temp_location.IsStackSlot()) {
865      Move32(location, temp_location);
866    } else {
867      DCHECK(temp_location.IsDoubleStackSlot());
868      Move64(location, temp_location);
869    }
870  } else {
871    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
872    switch (instruction->GetType()) {
873      case Primitive::kPrimBoolean:
874      case Primitive::kPrimByte:
875      case Primitive::kPrimChar:
876      case Primitive::kPrimShort:
877      case Primitive::kPrimNot:
878      case Primitive::kPrimInt:
879      case Primitive::kPrimFloat:
880        Move32(location, locations->Out());
881        break;
882
883      case Primitive::kPrimLong:
884      case Primitive::kPrimDouble:
885        Move64(location, locations->Out());
886        break;
887
888      default:
889        LOG(FATAL) << "Unexpected type " << instruction->GetType();
890    }
891  }
892}
893
894void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
895                                     HInstruction* instruction,
896                                     uint32_t dex_pc,
897                                     SlowPathCode* slow_path) {
898  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
899  __ blx(LR);
900  RecordPcInfo(instruction, dex_pc, slow_path);
901  DCHECK(instruction->IsSuspendCheck()
902      || instruction->IsBoundsCheck()
903      || instruction->IsNullCheck()
904      || instruction->IsDivZeroCheck()
905      || instruction->GetLocations()->CanCall()
906      || !IsLeafMethod());
907}
908
909void LocationsBuilderARM::VisitGoto(HGoto* got) {
910  got->SetLocations(nullptr);
911}
912
913void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
914  HBasicBlock* successor = got->GetSuccessor();
915  DCHECK(!successor->IsExitBlock());
916
917  HBasicBlock* block = got->GetBlock();
918  HInstruction* previous = got->GetPrevious();
919
920  HLoopInformation* info = block->GetLoopInformation();
921  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
922    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
923    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
924    return;
925  }
926
927  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
928    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
929  }
930  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
931    __ b(codegen_->GetLabelOf(successor));
932  }
933}
934
935void LocationsBuilderARM::VisitExit(HExit* exit) {
936  exit->SetLocations(nullptr);
937}
938
939void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
940  UNUSED(exit);
941}
942
943void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
944                                                        Label* true_target,
945                                                        Label* false_target,
946                                                        Label* always_true_target) {
947  HInstruction* cond = instruction->InputAt(0);
948  if (cond->IsIntConstant()) {
949    // Constant condition, statically compared against 1.
950    int32_t cond_value = cond->AsIntConstant()->GetValue();
951    if (cond_value == 1) {
952      if (always_true_target != nullptr) {
953        __ b(always_true_target);
954      }
955      return;
956    } else {
957      DCHECK_EQ(cond_value, 0);
958    }
959  } else {
960    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
961      // Condition has been materialized, compare the output to 0
962      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
963      __ cmp(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
964             ShifterOperand(0));
965      __ b(true_target, NE);
966    } else {
967      // Condition has not been materialized, use its inputs as the
968      // comparison and its condition as the branch condition.
969      LocationSummary* locations = cond->GetLocations();
970      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
971      Register left = locations->InAt(0).AsRegister<Register>();
972      if (locations->InAt(1).IsRegister()) {
973        __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
974      } else {
975        DCHECK(locations->InAt(1).IsConstant());
976        HConstant* constant = locations->InAt(1).GetConstant();
977        int32_t value = CodeGenerator::GetInt32ValueOf(constant);
978        ShifterOperand operand;
979        if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
980          __ cmp(left, operand);
981        } else {
982          Register temp = IP;
983          __ LoadImmediate(temp, value);
984          __ cmp(left, ShifterOperand(temp));
985        }
986      }
987      __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
988    }
989  }
990  if (false_target != nullptr) {
991    __ b(false_target);
992  }
993}
994
995void LocationsBuilderARM::VisitIf(HIf* if_instr) {
996  LocationSummary* locations =
997      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
998  HInstruction* cond = if_instr->InputAt(0);
999  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1000    locations->SetInAt(0, Location::RequiresRegister());
1001  }
1002}
1003
1004void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1005  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1006  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1007  Label* always_true_target = true_target;
1008  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1009                                if_instr->IfTrueSuccessor())) {
1010    always_true_target = nullptr;
1011  }
1012  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1013                                if_instr->IfFalseSuccessor())) {
1014    false_target = nullptr;
1015  }
1016  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1017}
1018
1019void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1020  LocationSummary* locations = new (GetGraph()->GetArena())
1021      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1022  HInstruction* cond = deoptimize->InputAt(0);
1023  DCHECK(cond->IsCondition());
1024  if (cond->AsCondition()->NeedsMaterialization()) {
1025    locations->SetInAt(0, Location::RequiresRegister());
1026  }
1027}
1028
1029void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1030  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
1031      DeoptimizationSlowPathARM(deoptimize);
1032  codegen_->AddSlowPath(slow_path);
1033  Label* slow_path_entry = slow_path->GetEntryLabel();
1034  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1035}
1036
1037void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1038  LocationSummary* locations =
1039      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1040  locations->SetInAt(0, Location::RequiresRegister());
1041  locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1042  if (cond->NeedsMaterialization()) {
1043    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1044  }
1045}
1046
1047void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1048  if (!cond->NeedsMaterialization()) return;
1049  LocationSummary* locations = cond->GetLocations();
1050  Register left = locations->InAt(0).AsRegister<Register>();
1051
1052  if (locations->InAt(1).IsRegister()) {
1053    __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1054  } else {
1055    DCHECK(locations->InAt(1).IsConstant());
1056    int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
1057    ShifterOperand operand;
1058    if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1059      __ cmp(left, operand);
1060    } else {
1061      Register temp = IP;
1062      __ LoadImmediate(temp, value);
1063      __ cmp(left, ShifterOperand(temp));
1064    }
1065  }
1066  __ it(ARMCondition(cond->GetCondition()), kItElse);
1067  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1068         ARMCondition(cond->GetCondition()));
1069  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1070         ARMOppositeCondition(cond->GetCondition()));
1071}
1072
1073void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1074  VisitCondition(comp);
1075}
1076
1077void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1078  VisitCondition(comp);
1079}
1080
1081void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1082  VisitCondition(comp);
1083}
1084
1085void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1086  VisitCondition(comp);
1087}
1088
1089void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1090  VisitCondition(comp);
1091}
1092
1093void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1094  VisitCondition(comp);
1095}
1096
1097void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1098  VisitCondition(comp);
1099}
1100
1101void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1102  VisitCondition(comp);
1103}
1104
1105void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1106  VisitCondition(comp);
1107}
1108
1109void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1110  VisitCondition(comp);
1111}
1112
1113void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1114  VisitCondition(comp);
1115}
1116
1117void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1118  VisitCondition(comp);
1119}
1120
1121void LocationsBuilderARM::VisitLocal(HLocal* local) {
1122  local->SetLocations(nullptr);
1123}
1124
1125void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1126  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1127}
1128
1129void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1130  load->SetLocations(nullptr);
1131}
1132
1133void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1134  // Nothing to do, this is driven by the code generator.
1135  UNUSED(load);
1136}
1137
1138void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1139  LocationSummary* locations =
1140      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1141  switch (store->InputAt(1)->GetType()) {
1142    case Primitive::kPrimBoolean:
1143    case Primitive::kPrimByte:
1144    case Primitive::kPrimChar:
1145    case Primitive::kPrimShort:
1146    case Primitive::kPrimInt:
1147    case Primitive::kPrimNot:
1148    case Primitive::kPrimFloat:
1149      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1150      break;
1151
1152    case Primitive::kPrimLong:
1153    case Primitive::kPrimDouble:
1154      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1155      break;
1156
1157    default:
1158      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1159  }
1160}
1161
1162void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1163  UNUSED(store);
1164}
1165
1166void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1167  LocationSummary* locations =
1168      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1169  locations->SetOut(Location::ConstantLocation(constant));
1170}
1171
1172void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1173  // Will be generated at use site.
1174  UNUSED(constant);
1175}
1176
1177void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1178  LocationSummary* locations =
1179      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1180  locations->SetOut(Location::ConstantLocation(constant));
1181}
1182
1183void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1184  // Will be generated at use site.
1185  UNUSED(constant);
1186}
1187
1188void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1189  LocationSummary* locations =
1190      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1191  locations->SetOut(Location::ConstantLocation(constant));
1192}
1193
1194void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1195  // Will be generated at use site.
1196  UNUSED(constant);
1197}
1198
1199void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1200  LocationSummary* locations =
1201      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1202  locations->SetOut(Location::ConstantLocation(constant));
1203}
1204
1205void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1206  // Will be generated at use site.
1207  UNUSED(constant);
1208}
1209
1210void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1211  LocationSummary* locations =
1212      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1213  locations->SetOut(Location::ConstantLocation(constant));
1214}
1215
1216void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1217  // Will be generated at use site.
1218  UNUSED(constant);
1219}
1220
1221void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1222  memory_barrier->SetLocations(nullptr);
1223}
1224
1225void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1226  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1227}
1228
1229void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1230  ret->SetLocations(nullptr);
1231}
1232
1233void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1234  UNUSED(ret);
1235  codegen_->GenerateFrameExit();
1236}
1237
1238void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1239  LocationSummary* locations =
1240      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1241  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1242}
1243
1244void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1245  UNUSED(ret);
1246  codegen_->GenerateFrameExit();
1247}
1248
1249void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1250  // When we do not run baseline, explicit clinit checks triggered by static
1251  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1252  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1253
1254  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1255                                         codegen_->GetInstructionSetFeatures());
1256  if (intrinsic.TryDispatch(invoke)) {
1257    LocationSummary* locations = invoke->GetLocations();
1258    if (locations->CanCall()) {
1259      locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
1260    }
1261    return;
1262  }
1263
1264  HandleInvoke(invoke);
1265}
1266
1267void CodeGeneratorARM::LoadCurrentMethod(Register reg) {
1268  DCHECK(RequiresCurrentMethod());
1269  __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
1270}
1271
1272static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1273  if (invoke->GetLocations()->Intrinsified()) {
1274    IntrinsicCodeGeneratorARM intrinsic(codegen);
1275    intrinsic.Dispatch(invoke);
1276    return true;
1277  }
1278  return false;
1279}
1280
1281void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1282  // When we do not run baseline, explicit clinit checks triggered by static
1283  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1284  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1285
1286  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1287    return;
1288  }
1289
1290  LocationSummary* locations = invoke->GetLocations();
1291  codegen_->GenerateStaticOrDirectCall(
1292      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
1293  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1294}
1295
1296void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1297  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1298  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
1299}
1300
1301void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1302  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1303                                         codegen_->GetInstructionSetFeatures());
1304  if (intrinsic.TryDispatch(invoke)) {
1305    return;
1306  }
1307
1308  HandleInvoke(invoke);
1309}
1310
1311void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1312  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1313    return;
1314  }
1315
1316  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1317  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
1318      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
1319  LocationSummary* locations = invoke->GetLocations();
1320  Location receiver = locations->InAt(0);
1321  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1322  // temp = object->GetClass();
1323  DCHECK(receiver.IsRegister());
1324  __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1325  codegen_->MaybeRecordImplicitNullCheck(invoke);
1326  // temp = temp->GetMethodAt(method_offset);
1327  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1328      kArmWordSize).Int32Value();
1329  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1330  // LR = temp->GetEntryPoint();
1331  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1332  // LR();
1333  __ blx(LR);
1334  DCHECK(!codegen_->IsLeafMethod());
1335  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1336}
1337
1338void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1339  HandleInvoke(invoke);
1340  // Add the hidden argument.
1341  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1342}
1343
1344void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1345  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1346  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1347  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1348      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1349  LocationSummary* locations = invoke->GetLocations();
1350  Location receiver = locations->InAt(0);
1351  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1352
1353  // Set the hidden argument.
1354  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1355                   invoke->GetDexMethodIndex());
1356
1357  // temp = object->GetClass();
1358  if (receiver.IsStackSlot()) {
1359    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1360    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1361  } else {
1362    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1363  }
1364  codegen_->MaybeRecordImplicitNullCheck(invoke);
1365  // temp = temp->GetImtEntryAt(method_offset);
1366  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1367      kArmWordSize).Int32Value();
1368  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1369  // LR = temp->GetEntryPoint();
1370  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1371  // LR();
1372  __ blx(LR);
1373  DCHECK(!codegen_->IsLeafMethod());
1374  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1375}
1376
1377void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1378  LocationSummary* locations =
1379      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1380  switch (neg->GetResultType()) {
1381    case Primitive::kPrimInt: {
1382      locations->SetInAt(0, Location::RequiresRegister());
1383      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1384      break;
1385    }
1386    case Primitive::kPrimLong: {
1387      locations->SetInAt(0, Location::RequiresRegister());
1388      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1389      break;
1390    }
1391
1392    case Primitive::kPrimFloat:
1393    case Primitive::kPrimDouble:
1394      locations->SetInAt(0, Location::RequiresFpuRegister());
1395      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1396      break;
1397
1398    default:
1399      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1400  }
1401}
1402
1403void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1404  LocationSummary* locations = neg->GetLocations();
1405  Location out = locations->Out();
1406  Location in = locations->InAt(0);
1407  switch (neg->GetResultType()) {
1408    case Primitive::kPrimInt:
1409      DCHECK(in.IsRegister());
1410      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1411      break;
1412
1413    case Primitive::kPrimLong:
1414      DCHECK(in.IsRegisterPair());
1415      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1416      __ rsbs(out.AsRegisterPairLow<Register>(),
1417              in.AsRegisterPairLow<Register>(),
1418              ShifterOperand(0));
1419      // We cannot emit an RSC (Reverse Subtract with Carry)
1420      // instruction here, as it does not exist in the Thumb-2
1421      // instruction set.  We use the following approach
1422      // using SBC and SUB instead.
1423      //
1424      // out.hi = -C
1425      __ sbc(out.AsRegisterPairHigh<Register>(),
1426             out.AsRegisterPairHigh<Register>(),
1427             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1428      // out.hi = out.hi - in.hi
1429      __ sub(out.AsRegisterPairHigh<Register>(),
1430             out.AsRegisterPairHigh<Register>(),
1431             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1432      break;
1433
1434    case Primitive::kPrimFloat:
1435      DCHECK(in.IsFpuRegister());
1436      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1437      break;
1438
1439    case Primitive::kPrimDouble:
1440      DCHECK(in.IsFpuRegisterPair());
1441      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1442               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1443      break;
1444
1445    default:
1446      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1447  }
1448}
1449
1450void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1451  Primitive::Type result_type = conversion->GetResultType();
1452  Primitive::Type input_type = conversion->GetInputType();
1453  DCHECK_NE(result_type, input_type);
1454
1455  // The float-to-long, double-to-long and long-to-float type conversions
1456  // rely on a call to the runtime.
1457  LocationSummary::CallKind call_kind =
1458      (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1459        && result_type == Primitive::kPrimLong)
1460       || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
1461      ? LocationSummary::kCall
1462      : LocationSummary::kNoCall;
1463  LocationSummary* locations =
1464      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1465
1466  // The Java language does not allow treating boolean as an integral type but
1467  // our bit representation makes it safe.
1468
1469  switch (result_type) {
1470    case Primitive::kPrimByte:
1471      switch (input_type) {
1472        case Primitive::kPrimBoolean:
1473          // Boolean input is a result of code transformations.
1474        case Primitive::kPrimShort:
1475        case Primitive::kPrimInt:
1476        case Primitive::kPrimChar:
1477          // Processing a Dex `int-to-byte' instruction.
1478          locations->SetInAt(0, Location::RequiresRegister());
1479          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1480          break;
1481
1482        default:
1483          LOG(FATAL) << "Unexpected type conversion from " << input_type
1484                     << " to " << result_type;
1485      }
1486      break;
1487
1488    case Primitive::kPrimShort:
1489      switch (input_type) {
1490        case Primitive::kPrimBoolean:
1491          // Boolean input is a result of code transformations.
1492        case Primitive::kPrimByte:
1493        case Primitive::kPrimInt:
1494        case Primitive::kPrimChar:
1495          // Processing a Dex `int-to-short' instruction.
1496          locations->SetInAt(0, Location::RequiresRegister());
1497          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1498          break;
1499
1500        default:
1501          LOG(FATAL) << "Unexpected type conversion from " << input_type
1502                     << " to " << result_type;
1503      }
1504      break;
1505
1506    case Primitive::kPrimInt:
1507      switch (input_type) {
1508        case Primitive::kPrimLong:
1509          // Processing a Dex `long-to-int' instruction.
1510          locations->SetInAt(0, Location::Any());
1511          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1512          break;
1513
1514        case Primitive::kPrimFloat:
1515          // Processing a Dex `float-to-int' instruction.
1516          locations->SetInAt(0, Location::RequiresFpuRegister());
1517          locations->SetOut(Location::RequiresRegister());
1518          locations->AddTemp(Location::RequiresFpuRegister());
1519          break;
1520
1521        case Primitive::kPrimDouble:
1522          // Processing a Dex `double-to-int' instruction.
1523          locations->SetInAt(0, Location::RequiresFpuRegister());
1524          locations->SetOut(Location::RequiresRegister());
1525          locations->AddTemp(Location::RequiresFpuRegister());
1526          break;
1527
1528        default:
1529          LOG(FATAL) << "Unexpected type conversion from " << input_type
1530                     << " to " << result_type;
1531      }
1532      break;
1533
1534    case Primitive::kPrimLong:
1535      switch (input_type) {
1536        case Primitive::kPrimBoolean:
1537          // Boolean input is a result of code transformations.
1538        case Primitive::kPrimByte:
1539        case Primitive::kPrimShort:
1540        case Primitive::kPrimInt:
1541        case Primitive::kPrimChar:
1542          // Processing a Dex `int-to-long' instruction.
1543          locations->SetInAt(0, Location::RequiresRegister());
1544          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1545          break;
1546
1547        case Primitive::kPrimFloat: {
1548          // Processing a Dex `float-to-long' instruction.
1549          InvokeRuntimeCallingConvention calling_convention;
1550          locations->SetInAt(0, Location::FpuRegisterLocation(
1551              calling_convention.GetFpuRegisterAt(0)));
1552          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1553          break;
1554        }
1555
1556        case Primitive::kPrimDouble: {
1557          // Processing a Dex `double-to-long' instruction.
1558          InvokeRuntimeCallingConvention calling_convention;
1559          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1560              calling_convention.GetFpuRegisterAt(0),
1561              calling_convention.GetFpuRegisterAt(1)));
1562          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1563          break;
1564        }
1565
1566        default:
1567          LOG(FATAL) << "Unexpected type conversion from " << input_type
1568                     << " to " << result_type;
1569      }
1570      break;
1571
1572    case Primitive::kPrimChar:
1573      switch (input_type) {
1574        case Primitive::kPrimBoolean:
1575          // Boolean input is a result of code transformations.
1576        case Primitive::kPrimByte:
1577        case Primitive::kPrimShort:
1578        case Primitive::kPrimInt:
1579          // Processing a Dex `int-to-char' instruction.
1580          locations->SetInAt(0, Location::RequiresRegister());
1581          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1582          break;
1583
1584        default:
1585          LOG(FATAL) << "Unexpected type conversion from " << input_type
1586                     << " to " << result_type;
1587      }
1588      break;
1589
1590    case Primitive::kPrimFloat:
1591      switch (input_type) {
1592        case Primitive::kPrimBoolean:
1593          // Boolean input is a result of code transformations.
1594        case Primitive::kPrimByte:
1595        case Primitive::kPrimShort:
1596        case Primitive::kPrimInt:
1597        case Primitive::kPrimChar:
1598          // Processing a Dex `int-to-float' instruction.
1599          locations->SetInAt(0, Location::RequiresRegister());
1600          locations->SetOut(Location::RequiresFpuRegister());
1601          break;
1602
1603        case Primitive::kPrimLong: {
1604          // Processing a Dex `long-to-float' instruction.
1605          InvokeRuntimeCallingConvention calling_convention;
1606          locations->SetInAt(0, Location::RegisterPairLocation(
1607              calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
1608          locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1609          break;
1610        }
1611
1612        case Primitive::kPrimDouble:
1613          // Processing a Dex `double-to-float' instruction.
1614          locations->SetInAt(0, Location::RequiresFpuRegister());
1615          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1616          break;
1617
1618        default:
1619          LOG(FATAL) << "Unexpected type conversion from " << input_type
1620                     << " to " << result_type;
1621      };
1622      break;
1623
1624    case Primitive::kPrimDouble:
1625      switch (input_type) {
1626        case Primitive::kPrimBoolean:
1627          // Boolean input is a result of code transformations.
1628        case Primitive::kPrimByte:
1629        case Primitive::kPrimShort:
1630        case Primitive::kPrimInt:
1631        case Primitive::kPrimChar:
1632          // Processing a Dex `int-to-double' instruction.
1633          locations->SetInAt(0, Location::RequiresRegister());
1634          locations->SetOut(Location::RequiresFpuRegister());
1635          break;
1636
1637        case Primitive::kPrimLong:
1638          // Processing a Dex `long-to-double' instruction.
1639          locations->SetInAt(0, Location::RequiresRegister());
1640          locations->SetOut(Location::RequiresFpuRegister());
1641          locations->AddTemp(Location::RequiresRegister());
1642          locations->AddTemp(Location::RequiresRegister());
1643          locations->AddTemp(Location::RequiresFpuRegister());
1644          break;
1645
1646        case Primitive::kPrimFloat:
1647          // Processing a Dex `float-to-double' instruction.
1648          locations->SetInAt(0, Location::RequiresFpuRegister());
1649          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1650          break;
1651
1652        default:
1653          LOG(FATAL) << "Unexpected type conversion from " << input_type
1654                     << " to " << result_type;
1655      };
1656      break;
1657
1658    default:
1659      LOG(FATAL) << "Unexpected type conversion from " << input_type
1660                 << " to " << result_type;
1661  }
1662}
1663
1664void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1665  LocationSummary* locations = conversion->GetLocations();
1666  Location out = locations->Out();
1667  Location in = locations->InAt(0);
1668  Primitive::Type result_type = conversion->GetResultType();
1669  Primitive::Type input_type = conversion->GetInputType();
1670  DCHECK_NE(result_type, input_type);
1671  switch (result_type) {
1672    case Primitive::kPrimByte:
1673      switch (input_type) {
1674        case Primitive::kPrimBoolean:
1675          // Boolean input is a result of code transformations.
1676        case Primitive::kPrimShort:
1677        case Primitive::kPrimInt:
1678        case Primitive::kPrimChar:
1679          // Processing a Dex `int-to-byte' instruction.
1680          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
1681          break;
1682
1683        default:
1684          LOG(FATAL) << "Unexpected type conversion from " << input_type
1685                     << " to " << result_type;
1686      }
1687      break;
1688
1689    case Primitive::kPrimShort:
1690      switch (input_type) {
1691        case Primitive::kPrimBoolean:
1692          // Boolean input is a result of code transformations.
1693        case Primitive::kPrimByte:
1694        case Primitive::kPrimInt:
1695        case Primitive::kPrimChar:
1696          // Processing a Dex `int-to-short' instruction.
1697          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1698          break;
1699
1700        default:
1701          LOG(FATAL) << "Unexpected type conversion from " << input_type
1702                     << " to " << result_type;
1703      }
1704      break;
1705
1706    case Primitive::kPrimInt:
1707      switch (input_type) {
1708        case Primitive::kPrimLong:
1709          // Processing a Dex `long-to-int' instruction.
1710          DCHECK(out.IsRegister());
1711          if (in.IsRegisterPair()) {
1712            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
1713          } else if (in.IsDoubleStackSlot()) {
1714            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
1715          } else {
1716            DCHECK(in.IsConstant());
1717            DCHECK(in.GetConstant()->IsLongConstant());
1718            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
1719            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
1720          }
1721          break;
1722
1723        case Primitive::kPrimFloat: {
1724          // Processing a Dex `float-to-int' instruction.
1725          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1726          __ vmovs(temp, in.AsFpuRegister<SRegister>());
1727          __ vcvtis(temp, temp);
1728          __ vmovrs(out.AsRegister<Register>(), temp);
1729          break;
1730        }
1731
1732        case Primitive::kPrimDouble: {
1733          // Processing a Dex `double-to-int' instruction.
1734          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1735          DRegister temp_d = FromLowSToD(temp_s);
1736          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1737          __ vcvtid(temp_s, temp_d);
1738          __ vmovrs(out.AsRegister<Register>(), temp_s);
1739          break;
1740        }
1741
1742        default:
1743          LOG(FATAL) << "Unexpected type conversion from " << input_type
1744                     << " to " << result_type;
1745      }
1746      break;
1747
1748    case Primitive::kPrimLong:
1749      switch (input_type) {
1750        case Primitive::kPrimBoolean:
1751          // Boolean input is a result of code transformations.
1752        case Primitive::kPrimByte:
1753        case Primitive::kPrimShort:
1754        case Primitive::kPrimInt:
1755        case Primitive::kPrimChar:
1756          // Processing a Dex `int-to-long' instruction.
1757          DCHECK(out.IsRegisterPair());
1758          DCHECK(in.IsRegister());
1759          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
1760          // Sign extension.
1761          __ Asr(out.AsRegisterPairHigh<Register>(),
1762                 out.AsRegisterPairLow<Register>(),
1763                 31);
1764          break;
1765
1766        case Primitive::kPrimFloat:
1767          // Processing a Dex `float-to-long' instruction.
1768          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
1769                                  conversion,
1770                                  conversion->GetDexPc(),
1771                                  nullptr);
1772          break;
1773
1774        case Primitive::kPrimDouble:
1775          // Processing a Dex `double-to-long' instruction.
1776          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
1777                                  conversion,
1778                                  conversion->GetDexPc(),
1779                                  nullptr);
1780          break;
1781
1782        default:
1783          LOG(FATAL) << "Unexpected type conversion from " << input_type
1784                     << " to " << result_type;
1785      }
1786      break;
1787
1788    case Primitive::kPrimChar:
1789      switch (input_type) {
1790        case Primitive::kPrimBoolean:
1791          // Boolean input is a result of code transformations.
1792        case Primitive::kPrimByte:
1793        case Primitive::kPrimShort:
1794        case Primitive::kPrimInt:
1795          // Processing a Dex `int-to-char' instruction.
1796          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1797          break;
1798
1799        default:
1800          LOG(FATAL) << "Unexpected type conversion from " << input_type
1801                     << " to " << result_type;
1802      }
1803      break;
1804
1805    case Primitive::kPrimFloat:
1806      switch (input_type) {
1807        case Primitive::kPrimBoolean:
1808          // Boolean input is a result of code transformations.
1809        case Primitive::kPrimByte:
1810        case Primitive::kPrimShort:
1811        case Primitive::kPrimInt:
1812        case Primitive::kPrimChar: {
1813          // Processing a Dex `int-to-float' instruction.
1814          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
1815          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
1816          break;
1817        }
1818
1819        case Primitive::kPrimLong:
1820          // Processing a Dex `long-to-float' instruction.
1821          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
1822                                  conversion,
1823                                  conversion->GetDexPc(),
1824                                  nullptr);
1825          break;
1826
1827        case Primitive::kPrimDouble:
1828          // Processing a Dex `double-to-float' instruction.
1829          __ vcvtsd(out.AsFpuRegister<SRegister>(),
1830                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1831          break;
1832
1833        default:
1834          LOG(FATAL) << "Unexpected type conversion from " << input_type
1835                     << " to " << result_type;
1836      };
1837      break;
1838
1839    case Primitive::kPrimDouble:
1840      switch (input_type) {
1841        case Primitive::kPrimBoolean:
1842          // Boolean input is a result of code transformations.
1843        case Primitive::kPrimByte:
1844        case Primitive::kPrimShort:
1845        case Primitive::kPrimInt:
1846        case Primitive::kPrimChar: {
1847          // Processing a Dex `int-to-double' instruction.
1848          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
1849          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1850                    out.AsFpuRegisterPairLow<SRegister>());
1851          break;
1852        }
1853
1854        case Primitive::kPrimLong: {
1855          // Processing a Dex `long-to-double' instruction.
1856          Register low = in.AsRegisterPairLow<Register>();
1857          Register high = in.AsRegisterPairHigh<Register>();
1858          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
1859          DRegister out_d = FromLowSToD(out_s);
1860          Register constant_low = locations->GetTemp(0).AsRegister<Register>();
1861          Register constant_high = locations->GetTemp(1).AsRegister<Register>();
1862          SRegister temp_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
1863          DRegister temp_d = FromLowSToD(temp_s);
1864
1865          // out_d = int-to-double(high)
1866          __ vmovsr(out_s, high);
1867          __ vcvtdi(out_d, out_s);
1868          // Using vmovd to load the `k2Pow32EncodingForDouble` constant
1869          // as an immediate value into `temp_d` does not work, as
1870          // this instruction only transfers 8 significant bits of its
1871          // immediate operand.  Instead, use two 32-bit core
1872          // registers to load `k2Pow32EncodingForDouble` into `temp_d`.
1873          __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
1874          __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
1875          __ vmovdrr(temp_d, constant_low, constant_high);
1876          // out_d = out_d * 2^32
1877          __ vmuld(out_d, out_d, temp_d);
1878          // temp_d = unsigned-to-double(low)
1879          __ vmovsr(temp_s, low);
1880          __ vcvtdu(temp_d, temp_s);
1881          // out_d = out_d + temp_d
1882          __ vaddd(out_d, out_d, temp_d);
1883          break;
1884        }
1885
1886        case Primitive::kPrimFloat:
1887          // Processing a Dex `float-to-double' instruction.
1888          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1889                    in.AsFpuRegister<SRegister>());
1890          break;
1891
1892        default:
1893          LOG(FATAL) << "Unexpected type conversion from " << input_type
1894                     << " to " << result_type;
1895      };
1896      break;
1897
1898    default:
1899      LOG(FATAL) << "Unexpected type conversion from " << input_type
1900                 << " to " << result_type;
1901  }
1902}
1903
1904void LocationsBuilderARM::VisitAdd(HAdd* add) {
1905  LocationSummary* locations =
1906      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
1907  switch (add->GetResultType()) {
1908    case Primitive::kPrimInt: {
1909      locations->SetInAt(0, Location::RequiresRegister());
1910      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
1911      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1912      break;
1913    }
1914
1915    case Primitive::kPrimLong: {
1916      locations->SetInAt(0, Location::RequiresRegister());
1917      locations->SetInAt(1, Location::RequiresRegister());
1918      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1919      break;
1920    }
1921
1922    case Primitive::kPrimFloat:
1923    case Primitive::kPrimDouble: {
1924      locations->SetInAt(0, Location::RequiresFpuRegister());
1925      locations->SetInAt(1, Location::RequiresFpuRegister());
1926      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1927      break;
1928    }
1929
1930    default:
1931      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
1932  }
1933}
1934
1935void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
1936  LocationSummary* locations = add->GetLocations();
1937  Location out = locations->Out();
1938  Location first = locations->InAt(0);
1939  Location second = locations->InAt(1);
1940  switch (add->GetResultType()) {
1941    case Primitive::kPrimInt:
1942      if (second.IsRegister()) {
1943        __ add(out.AsRegister<Register>(),
1944               first.AsRegister<Register>(),
1945               ShifterOperand(second.AsRegister<Register>()));
1946      } else {
1947        __ AddConstant(out.AsRegister<Register>(),
1948                       first.AsRegister<Register>(),
1949                       second.GetConstant()->AsIntConstant()->GetValue());
1950      }
1951      break;
1952
1953    case Primitive::kPrimLong: {
1954      DCHECK(second.IsRegisterPair());
1955      __ adds(out.AsRegisterPairLow<Register>(),
1956              first.AsRegisterPairLow<Register>(),
1957              ShifterOperand(second.AsRegisterPairLow<Register>()));
1958      __ adc(out.AsRegisterPairHigh<Register>(),
1959             first.AsRegisterPairHigh<Register>(),
1960             ShifterOperand(second.AsRegisterPairHigh<Register>()));
1961      break;
1962    }
1963
1964    case Primitive::kPrimFloat:
1965      __ vadds(out.AsFpuRegister<SRegister>(),
1966               first.AsFpuRegister<SRegister>(),
1967               second.AsFpuRegister<SRegister>());
1968      break;
1969
1970    case Primitive::kPrimDouble:
1971      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1972               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
1973               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
1974      break;
1975
1976    default:
1977      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
1978  }
1979}
1980
1981void LocationsBuilderARM::VisitSub(HSub* sub) {
1982  LocationSummary* locations =
1983      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
1984  switch (sub->GetResultType()) {
1985    case Primitive::kPrimInt: {
1986      locations->SetInAt(0, Location::RequiresRegister());
1987      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
1988      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1989      break;
1990    }
1991
1992    case Primitive::kPrimLong: {
1993      locations->SetInAt(0, Location::RequiresRegister());
1994      locations->SetInAt(1, Location::RequiresRegister());
1995      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1996      break;
1997    }
1998    case Primitive::kPrimFloat:
1999    case Primitive::kPrimDouble: {
2000      locations->SetInAt(0, Location::RequiresFpuRegister());
2001      locations->SetInAt(1, Location::RequiresFpuRegister());
2002      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2003      break;
2004    }
2005    default:
2006      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2007  }
2008}
2009
2010void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2011  LocationSummary* locations = sub->GetLocations();
2012  Location out = locations->Out();
2013  Location first = locations->InAt(0);
2014  Location second = locations->InAt(1);
2015  switch (sub->GetResultType()) {
2016    case Primitive::kPrimInt: {
2017      if (second.IsRegister()) {
2018        __ sub(out.AsRegister<Register>(),
2019               first.AsRegister<Register>(),
2020               ShifterOperand(second.AsRegister<Register>()));
2021      } else {
2022        __ AddConstant(out.AsRegister<Register>(),
2023                       first.AsRegister<Register>(),
2024                       -second.GetConstant()->AsIntConstant()->GetValue());
2025      }
2026      break;
2027    }
2028
2029    case Primitive::kPrimLong: {
2030      DCHECK(second.IsRegisterPair());
2031      __ subs(out.AsRegisterPairLow<Register>(),
2032              first.AsRegisterPairLow<Register>(),
2033              ShifterOperand(second.AsRegisterPairLow<Register>()));
2034      __ sbc(out.AsRegisterPairHigh<Register>(),
2035             first.AsRegisterPairHigh<Register>(),
2036             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2037      break;
2038    }
2039
2040    case Primitive::kPrimFloat: {
2041      __ vsubs(out.AsFpuRegister<SRegister>(),
2042               first.AsFpuRegister<SRegister>(),
2043               second.AsFpuRegister<SRegister>());
2044      break;
2045    }
2046
2047    case Primitive::kPrimDouble: {
2048      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2049               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2050               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2051      break;
2052    }
2053
2054
2055    default:
2056      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2057  }
2058}
2059
2060void LocationsBuilderARM::VisitMul(HMul* mul) {
2061  LocationSummary* locations =
2062      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2063  switch (mul->GetResultType()) {
2064    case Primitive::kPrimInt:
2065    case Primitive::kPrimLong:  {
2066      locations->SetInAt(0, Location::RequiresRegister());
2067      locations->SetInAt(1, Location::RequiresRegister());
2068      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2069      break;
2070    }
2071
2072    case Primitive::kPrimFloat:
2073    case Primitive::kPrimDouble: {
2074      locations->SetInAt(0, Location::RequiresFpuRegister());
2075      locations->SetInAt(1, Location::RequiresFpuRegister());
2076      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2077      break;
2078    }
2079
2080    default:
2081      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2082  }
2083}
2084
2085void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2086  LocationSummary* locations = mul->GetLocations();
2087  Location out = locations->Out();
2088  Location first = locations->InAt(0);
2089  Location second = locations->InAt(1);
2090  switch (mul->GetResultType()) {
2091    case Primitive::kPrimInt: {
2092      __ mul(out.AsRegister<Register>(),
2093             first.AsRegister<Register>(),
2094             second.AsRegister<Register>());
2095      break;
2096    }
2097    case Primitive::kPrimLong: {
2098      Register out_hi = out.AsRegisterPairHigh<Register>();
2099      Register out_lo = out.AsRegisterPairLow<Register>();
2100      Register in1_hi = first.AsRegisterPairHigh<Register>();
2101      Register in1_lo = first.AsRegisterPairLow<Register>();
2102      Register in2_hi = second.AsRegisterPairHigh<Register>();
2103      Register in2_lo = second.AsRegisterPairLow<Register>();
2104
2105      // Extra checks to protect caused by the existence of R1_R2.
2106      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2107      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2108      DCHECK_NE(out_hi, in1_lo);
2109      DCHECK_NE(out_hi, in2_lo);
2110
2111      // input: in1 - 64 bits, in2 - 64 bits
2112      // output: out
2113      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2114      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2115      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2116
2117      // IP <- in1.lo * in2.hi
2118      __ mul(IP, in1_lo, in2_hi);
2119      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2120      __ mla(out_hi, in1_hi, in2_lo, IP);
2121      // out.lo <- (in1.lo * in2.lo)[31:0];
2122      __ umull(out_lo, IP, in1_lo, in2_lo);
2123      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2124      __ add(out_hi, out_hi, ShifterOperand(IP));
2125      break;
2126    }
2127
2128    case Primitive::kPrimFloat: {
2129      __ vmuls(out.AsFpuRegister<SRegister>(),
2130               first.AsFpuRegister<SRegister>(),
2131               second.AsFpuRegister<SRegister>());
2132      break;
2133    }
2134
2135    case Primitive::kPrimDouble: {
2136      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2137               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2138               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2139      break;
2140    }
2141
2142    default:
2143      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2144  }
2145}
2146
2147void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2148  DCHECK(instruction->IsDiv() || instruction->IsRem());
2149  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2150
2151  LocationSummary* locations = instruction->GetLocations();
2152  Location second = locations->InAt(1);
2153  DCHECK(second.IsConstant());
2154
2155  Register out = locations->Out().AsRegister<Register>();
2156  Register dividend = locations->InAt(0).AsRegister<Register>();
2157  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2158  DCHECK(imm == 1 || imm == -1);
2159
2160  if (instruction->IsRem()) {
2161    __ LoadImmediate(out, 0);
2162  } else {
2163    if (imm == 1) {
2164      __ Mov(out, dividend);
2165    } else {
2166      __ rsb(out, dividend, ShifterOperand(0));
2167    }
2168  }
2169}
2170
2171void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2172  DCHECK(instruction->IsDiv() || instruction->IsRem());
2173  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2174
2175  LocationSummary* locations = instruction->GetLocations();
2176  Location second = locations->InAt(1);
2177  DCHECK(second.IsConstant());
2178
2179  Register out = locations->Out().AsRegister<Register>();
2180  Register dividend = locations->InAt(0).AsRegister<Register>();
2181  Register temp = locations->GetTemp(0).AsRegister<Register>();
2182  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2183  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2184  DCHECK(IsPowerOfTwo(abs_imm));
2185  int ctz_imm = CTZ(abs_imm);
2186
2187  if (ctz_imm == 1) {
2188    __ Lsr(temp, dividend, 32 - ctz_imm);
2189  } else {
2190    __ Asr(temp, dividend, 31);
2191    __ Lsr(temp, temp, 32 - ctz_imm);
2192  }
2193  __ add(out, temp, ShifterOperand(dividend));
2194
2195  if (instruction->IsDiv()) {
2196    __ Asr(out, out, ctz_imm);
2197    if (imm < 0) {
2198      __ rsb(out, out, ShifterOperand(0));
2199    }
2200  } else {
2201    __ ubfx(out, out, 0, ctz_imm);
2202    __ sub(out, out, ShifterOperand(temp));
2203  }
2204}
2205
2206void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2207  DCHECK(instruction->IsDiv() || instruction->IsRem());
2208  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2209
2210  LocationSummary* locations = instruction->GetLocations();
2211  Location second = locations->InAt(1);
2212  DCHECK(second.IsConstant());
2213
2214  Register out = locations->Out().AsRegister<Register>();
2215  Register dividend = locations->InAt(0).AsRegister<Register>();
2216  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2217  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2218  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2219
2220  int64_t magic;
2221  int shift;
2222  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2223
2224  __ LoadImmediate(temp1, magic);
2225  __ smull(temp2, temp1, dividend, temp1);
2226
2227  if (imm > 0 && magic < 0) {
2228    __ add(temp1, temp1, ShifterOperand(dividend));
2229  } else if (imm < 0 && magic > 0) {
2230    __ sub(temp1, temp1, ShifterOperand(dividend));
2231  }
2232
2233  if (shift != 0) {
2234    __ Asr(temp1, temp1, shift);
2235  }
2236
2237  if (instruction->IsDiv()) {
2238    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2239  } else {
2240    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2241    // TODO: Strength reduction for mls.
2242    __ LoadImmediate(temp2, imm);
2243    __ mls(out, temp1, temp2, dividend);
2244  }
2245}
2246
2247void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2248  DCHECK(instruction->IsDiv() || instruction->IsRem());
2249  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2250
2251  LocationSummary* locations = instruction->GetLocations();
2252  Location second = locations->InAt(1);
2253  DCHECK(second.IsConstant());
2254
2255  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2256  if (imm == 0) {
2257    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2258  } else if (imm == 1 || imm == -1) {
2259    DivRemOneOrMinusOne(instruction);
2260  } else if (IsPowerOfTwo(std::abs(imm))) {
2261    DivRemByPowerOfTwo(instruction);
2262  } else {
2263    DCHECK(imm <= -2 || imm >= 2);
2264    GenerateDivRemWithAnyConstant(instruction);
2265  }
2266}
2267
2268void LocationsBuilderARM::VisitDiv(HDiv* div) {
2269  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2270  if (div->GetResultType() == Primitive::kPrimLong) {
2271    // pLdiv runtime call.
2272    call_kind = LocationSummary::kCall;
2273  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2274    // sdiv will be replaced by other instruction sequence.
2275  } else if (div->GetResultType() == Primitive::kPrimInt &&
2276             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2277    // pIdivmod runtime call.
2278    call_kind = LocationSummary::kCall;
2279  }
2280
2281  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2282
2283  switch (div->GetResultType()) {
2284    case Primitive::kPrimInt: {
2285      if (div->InputAt(1)->IsConstant()) {
2286        locations->SetInAt(0, Location::RequiresRegister());
2287        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2288        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2289        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2290        if (abs_imm <= 1) {
2291          // No temp register required.
2292        } else {
2293          locations->AddTemp(Location::RequiresRegister());
2294          if (!IsPowerOfTwo(abs_imm)) {
2295            locations->AddTemp(Location::RequiresRegister());
2296          }
2297        }
2298      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2299        locations->SetInAt(0, Location::RequiresRegister());
2300        locations->SetInAt(1, Location::RequiresRegister());
2301        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2302      } else {
2303        InvokeRuntimeCallingConvention calling_convention;
2304        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2305        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2306        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2307        //       we only need the former.
2308        locations->SetOut(Location::RegisterLocation(R0));
2309      }
2310      break;
2311    }
2312    case Primitive::kPrimLong: {
2313      InvokeRuntimeCallingConvention calling_convention;
2314      locations->SetInAt(0, Location::RegisterPairLocation(
2315          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2316      locations->SetInAt(1, Location::RegisterPairLocation(
2317          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2318      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2319      break;
2320    }
2321    case Primitive::kPrimFloat:
2322    case Primitive::kPrimDouble: {
2323      locations->SetInAt(0, Location::RequiresFpuRegister());
2324      locations->SetInAt(1, Location::RequiresFpuRegister());
2325      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2326      break;
2327    }
2328
2329    default:
2330      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2331  }
2332}
2333
2334void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2335  LocationSummary* locations = div->GetLocations();
2336  Location out = locations->Out();
2337  Location first = locations->InAt(0);
2338  Location second = locations->InAt(1);
2339
2340  switch (div->GetResultType()) {
2341    case Primitive::kPrimInt: {
2342      if (second.IsConstant()) {
2343        GenerateDivRemConstantIntegral(div);
2344      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2345        __ sdiv(out.AsRegister<Register>(),
2346                first.AsRegister<Register>(),
2347                second.AsRegister<Register>());
2348      } else {
2349        InvokeRuntimeCallingConvention calling_convention;
2350        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2351        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2352        DCHECK_EQ(R0, out.AsRegister<Register>());
2353
2354        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2355      }
2356      break;
2357    }
2358
2359    case Primitive::kPrimLong: {
2360      InvokeRuntimeCallingConvention calling_convention;
2361      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2362      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2363      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2364      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2365      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2366      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2367
2368      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2369      break;
2370    }
2371
2372    case Primitive::kPrimFloat: {
2373      __ vdivs(out.AsFpuRegister<SRegister>(),
2374               first.AsFpuRegister<SRegister>(),
2375               second.AsFpuRegister<SRegister>());
2376      break;
2377    }
2378
2379    case Primitive::kPrimDouble: {
2380      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2381               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2382               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2383      break;
2384    }
2385
2386    default:
2387      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2388  }
2389}
2390
2391void LocationsBuilderARM::VisitRem(HRem* rem) {
2392  Primitive::Type type = rem->GetResultType();
2393
2394  // Most remainders are implemented in the runtime.
2395  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2396  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2397    // sdiv will be replaced by other instruction sequence.
2398    call_kind = LocationSummary::kNoCall;
2399  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2400             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2401    // Have hardware divide instruction for int, do it with three instructions.
2402    call_kind = LocationSummary::kNoCall;
2403  }
2404
2405  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2406
2407  switch (type) {
2408    case Primitive::kPrimInt: {
2409      if (rem->InputAt(1)->IsConstant()) {
2410        locations->SetInAt(0, Location::RequiresRegister());
2411        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2412        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2413        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2414        if (abs_imm <= 1) {
2415          // No temp register required.
2416        } else {
2417          locations->AddTemp(Location::RequiresRegister());
2418          if (!IsPowerOfTwo(abs_imm)) {
2419            locations->AddTemp(Location::RequiresRegister());
2420          }
2421        }
2422      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2423        locations->SetInAt(0, Location::RequiresRegister());
2424        locations->SetInAt(1, Location::RequiresRegister());
2425        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2426        locations->AddTemp(Location::RequiresRegister());
2427      } else {
2428        InvokeRuntimeCallingConvention calling_convention;
2429        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2430        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2431        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2432        //       we only need the latter.
2433        locations->SetOut(Location::RegisterLocation(R1));
2434      }
2435      break;
2436    }
2437    case Primitive::kPrimLong: {
2438      InvokeRuntimeCallingConvention calling_convention;
2439      locations->SetInAt(0, Location::RegisterPairLocation(
2440          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2441      locations->SetInAt(1, Location::RegisterPairLocation(
2442          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2443      // The runtime helper puts the output in R2,R3.
2444      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2445      break;
2446    }
2447    case Primitive::kPrimFloat: {
2448      InvokeRuntimeCallingConvention calling_convention;
2449      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2450      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2451      locations->SetOut(Location::FpuRegisterLocation(S0));
2452      break;
2453    }
2454
2455    case Primitive::kPrimDouble: {
2456      InvokeRuntimeCallingConvention calling_convention;
2457      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2458          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2459      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2460          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2461      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2462      break;
2463    }
2464
2465    default:
2466      LOG(FATAL) << "Unexpected rem type " << type;
2467  }
2468}
2469
2470void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2471  LocationSummary* locations = rem->GetLocations();
2472  Location out = locations->Out();
2473  Location first = locations->InAt(0);
2474  Location second = locations->InAt(1);
2475
2476  Primitive::Type type = rem->GetResultType();
2477  switch (type) {
2478    case Primitive::kPrimInt: {
2479        if (second.IsConstant()) {
2480          GenerateDivRemConstantIntegral(rem);
2481        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2482        Register reg1 = first.AsRegister<Register>();
2483        Register reg2 = second.AsRegister<Register>();
2484        Register temp = locations->GetTemp(0).AsRegister<Register>();
2485
2486        // temp = reg1 / reg2  (integer division)
2487        // temp = temp * reg2
2488        // dest = reg1 - temp
2489        __ sdiv(temp, reg1, reg2);
2490        __ mul(temp, temp, reg2);
2491        __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
2492      } else {
2493        InvokeRuntimeCallingConvention calling_convention;
2494        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2495        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2496        DCHECK_EQ(R1, out.AsRegister<Register>());
2497
2498        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2499      }
2500      break;
2501    }
2502
2503    case Primitive::kPrimLong: {
2504      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2505      break;
2506    }
2507
2508    case Primitive::kPrimFloat: {
2509      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2510      break;
2511    }
2512
2513    case Primitive::kPrimDouble: {
2514      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2515      break;
2516    }
2517
2518    default:
2519      LOG(FATAL) << "Unexpected rem type " << type;
2520  }
2521}
2522
2523void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2524  LocationSummary* locations =
2525      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2526  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2527  if (instruction->HasUses()) {
2528    locations->SetOut(Location::SameAsFirstInput());
2529  }
2530}
2531
2532void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2533  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2534  codegen_->AddSlowPath(slow_path);
2535
2536  LocationSummary* locations = instruction->GetLocations();
2537  Location value = locations->InAt(0);
2538
2539  switch (instruction->GetType()) {
2540    case Primitive::kPrimInt: {
2541      if (value.IsRegister()) {
2542        __ cmp(value.AsRegister<Register>(), ShifterOperand(0));
2543        __ b(slow_path->GetEntryLabel(), EQ);
2544      } else {
2545        DCHECK(value.IsConstant()) << value;
2546        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2547          __ b(slow_path->GetEntryLabel());
2548        }
2549      }
2550      break;
2551    }
2552    case Primitive::kPrimLong: {
2553      if (value.IsRegisterPair()) {
2554        __ orrs(IP,
2555                value.AsRegisterPairLow<Register>(),
2556                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2557        __ b(slow_path->GetEntryLabel(), EQ);
2558      } else {
2559        DCHECK(value.IsConstant()) << value;
2560        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2561          __ b(slow_path->GetEntryLabel());
2562        }
2563      }
2564      break;
2565    default:
2566      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2567    }
2568  }
2569}
2570
2571void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2572  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2573
2574  LocationSummary* locations =
2575      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2576
2577  switch (op->GetResultType()) {
2578    case Primitive::kPrimInt: {
2579      locations->SetInAt(0, Location::RequiresRegister());
2580      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2581      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2582      break;
2583    }
2584    case Primitive::kPrimLong: {
2585      locations->SetInAt(0, Location::RequiresRegister());
2586      locations->SetInAt(1, Location::RequiresRegister());
2587      locations->AddTemp(Location::RequiresRegister());
2588      locations->SetOut(Location::RequiresRegister());
2589      break;
2590    }
2591    default:
2592      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2593  }
2594}
2595
2596void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2597  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2598
2599  LocationSummary* locations = op->GetLocations();
2600  Location out = locations->Out();
2601  Location first = locations->InAt(0);
2602  Location second = locations->InAt(1);
2603
2604  Primitive::Type type = op->GetResultType();
2605  switch (type) {
2606    case Primitive::kPrimInt: {
2607      Register out_reg = out.AsRegister<Register>();
2608      Register first_reg = first.AsRegister<Register>();
2609      // Arm doesn't mask the shift count so we need to do it ourselves.
2610      if (second.IsRegister()) {
2611        Register second_reg = second.AsRegister<Register>();
2612        __ and_(second_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2613        if (op->IsShl()) {
2614          __ Lsl(out_reg, first_reg, second_reg);
2615        } else if (op->IsShr()) {
2616          __ Asr(out_reg, first_reg, second_reg);
2617        } else {
2618          __ Lsr(out_reg, first_reg, second_reg);
2619        }
2620      } else {
2621        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2622        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2623        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
2624          __ Mov(out_reg, first_reg);
2625        } else if (op->IsShl()) {
2626          __ Lsl(out_reg, first_reg, shift_value);
2627        } else if (op->IsShr()) {
2628          __ Asr(out_reg, first_reg, shift_value);
2629        } else {
2630          __ Lsr(out_reg, first_reg, shift_value);
2631        }
2632      }
2633      break;
2634    }
2635    case Primitive::kPrimLong: {
2636      Register o_h = out.AsRegisterPairHigh<Register>();
2637      Register o_l = out.AsRegisterPairLow<Register>();
2638
2639      Register temp = locations->GetTemp(0).AsRegister<Register>();
2640
2641      Register high = first.AsRegisterPairHigh<Register>();
2642      Register low = first.AsRegisterPairLow<Register>();
2643
2644      Register second_reg = second.AsRegister<Register>();
2645
2646      if (op->IsShl()) {
2647        // Shift the high part
2648        __ and_(second_reg, second_reg, ShifterOperand(63));
2649        __ Lsl(o_h, high, second_reg);
2650        // Shift the low part and `or` what overflew on the high part
2651        __ rsb(temp, second_reg, ShifterOperand(32));
2652        __ Lsr(temp, low, temp);
2653        __ orr(o_h, o_h, ShifterOperand(temp));
2654        // If the shift is > 32 bits, override the high part
2655        __ subs(temp, second_reg, ShifterOperand(32));
2656        __ it(PL);
2657        __ Lsl(o_h, low, temp, false, PL);
2658        // Shift the low part
2659        __ Lsl(o_l, low, second_reg);
2660      } else if (op->IsShr()) {
2661        // Shift the low part
2662        __ and_(second_reg, second_reg, ShifterOperand(63));
2663        __ Lsr(o_l, low, second_reg);
2664        // Shift the high part and `or` what underflew on the low part
2665        __ rsb(temp, second_reg, ShifterOperand(32));
2666        __ Lsl(temp, high, temp);
2667        __ orr(o_l, o_l, ShifterOperand(temp));
2668        // If the shift is > 32 bits, override the low part
2669        __ subs(temp, second_reg, ShifterOperand(32));
2670        __ it(PL);
2671        __ Asr(o_l, high, temp, false, PL);
2672        // Shift the high part
2673        __ Asr(o_h, high, second_reg);
2674      } else {
2675        // same as Shr except we use `Lsr`s and not `Asr`s
2676        __ and_(second_reg, second_reg, ShifterOperand(63));
2677        __ Lsr(o_l, low, second_reg);
2678        __ rsb(temp, second_reg, ShifterOperand(32));
2679        __ Lsl(temp, high, temp);
2680        __ orr(o_l, o_l, ShifterOperand(temp));
2681        __ subs(temp, second_reg, ShifterOperand(32));
2682        __ it(PL);
2683        __ Lsr(o_l, high, temp, false, PL);
2684        __ Lsr(o_h, high, second_reg);
2685      }
2686      break;
2687    }
2688    default:
2689      LOG(FATAL) << "Unexpected operation type " << type;
2690  }
2691}
2692
2693void LocationsBuilderARM::VisitShl(HShl* shl) {
2694  HandleShift(shl);
2695}
2696
2697void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
2698  HandleShift(shl);
2699}
2700
2701void LocationsBuilderARM::VisitShr(HShr* shr) {
2702  HandleShift(shr);
2703}
2704
2705void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
2706  HandleShift(shr);
2707}
2708
2709void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
2710  HandleShift(ushr);
2711}
2712
2713void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
2714  HandleShift(ushr);
2715}
2716
2717void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
2718  LocationSummary* locations =
2719      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2720  InvokeRuntimeCallingConvention calling_convention;
2721  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2722  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2723  locations->SetOut(Location::RegisterLocation(R0));
2724}
2725
2726void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
2727  InvokeRuntimeCallingConvention calling_convention;
2728  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
2729  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2730  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2731                          instruction,
2732                          instruction->GetDexPc(),
2733                          nullptr);
2734}
2735
2736void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
2737  LocationSummary* locations =
2738      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2739  InvokeRuntimeCallingConvention calling_convention;
2740  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2741  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2742  locations->SetOut(Location::RegisterLocation(R0));
2743  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2744}
2745
2746void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
2747  InvokeRuntimeCallingConvention calling_convention;
2748  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2));
2749  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2750  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2751                          instruction,
2752                          instruction->GetDexPc(),
2753                          nullptr);
2754}
2755
2756void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
2757  LocationSummary* locations =
2758      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2759  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2760  if (location.IsStackSlot()) {
2761    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2762  } else if (location.IsDoubleStackSlot()) {
2763    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2764  }
2765  locations->SetOut(location);
2766}
2767
2768void InstructionCodeGeneratorARM::VisitParameterValue(
2769    HParameterValue* instruction ATTRIBUTE_UNUSED) {
2770  // Nothing to do, the parameter is already at its location.
2771}
2772
2773void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
2774  LocationSummary* locations =
2775      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2776  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2777}
2778
2779void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
2780  // Nothing to do, the method is already at its location.
2781}
2782
2783void LocationsBuilderARM::VisitNot(HNot* not_) {
2784  LocationSummary* locations =
2785      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
2786  locations->SetInAt(0, Location::RequiresRegister());
2787  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2788}
2789
2790void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
2791  LocationSummary* locations = not_->GetLocations();
2792  Location out = locations->Out();
2793  Location in = locations->InAt(0);
2794  switch (not_->GetResultType()) {
2795    case Primitive::kPrimInt:
2796      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
2797      break;
2798
2799    case Primitive::kPrimLong:
2800      __ mvn(out.AsRegisterPairLow<Register>(),
2801             ShifterOperand(in.AsRegisterPairLow<Register>()));
2802      __ mvn(out.AsRegisterPairHigh<Register>(),
2803             ShifterOperand(in.AsRegisterPairHigh<Register>()));
2804      break;
2805
2806    default:
2807      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
2808  }
2809}
2810
2811void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
2812  LocationSummary* locations =
2813      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
2814  locations->SetInAt(0, Location::RequiresRegister());
2815  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2816}
2817
2818void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
2819  LocationSummary* locations = bool_not->GetLocations();
2820  Location out = locations->Out();
2821  Location in = locations->InAt(0);
2822  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
2823}
2824
2825void LocationsBuilderARM::VisitCompare(HCompare* compare) {
2826  LocationSummary* locations =
2827      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
2828  switch (compare->InputAt(0)->GetType()) {
2829    case Primitive::kPrimLong: {
2830      locations->SetInAt(0, Location::RequiresRegister());
2831      locations->SetInAt(1, Location::RequiresRegister());
2832      // Output overlaps because it is written before doing the low comparison.
2833      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2834      break;
2835    }
2836    case Primitive::kPrimFloat:
2837    case Primitive::kPrimDouble: {
2838      locations->SetInAt(0, Location::RequiresFpuRegister());
2839      locations->SetInAt(1, Location::RequiresFpuRegister());
2840      locations->SetOut(Location::RequiresRegister());
2841      break;
2842    }
2843    default:
2844      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
2845  }
2846}
2847
2848void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
2849  LocationSummary* locations = compare->GetLocations();
2850  Register out = locations->Out().AsRegister<Register>();
2851  Location left = locations->InAt(0);
2852  Location right = locations->InAt(1);
2853
2854  NearLabel less, greater, done;
2855  Primitive::Type type = compare->InputAt(0)->GetType();
2856  switch (type) {
2857    case Primitive::kPrimLong: {
2858      __ cmp(left.AsRegisterPairHigh<Register>(),
2859             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
2860      __ b(&less, LT);
2861      __ b(&greater, GT);
2862      // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
2863      __ LoadImmediate(out, 0);
2864      __ cmp(left.AsRegisterPairLow<Register>(),
2865             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
2866      break;
2867    }
2868    case Primitive::kPrimFloat:
2869    case Primitive::kPrimDouble: {
2870      __ LoadImmediate(out, 0);
2871      if (type == Primitive::kPrimFloat) {
2872        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
2873      } else {
2874        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
2875                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
2876      }
2877      __ vmstat();  // transfer FP status register to ARM APSR.
2878      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
2879      break;
2880    }
2881    default:
2882      LOG(FATAL) << "Unexpected compare type " << type;
2883  }
2884  __ b(&done, EQ);
2885  __ b(&less, CC);  // CC is for both: unsigned compare for longs and 'less than' for floats.
2886
2887  __ Bind(&greater);
2888  __ LoadImmediate(out, 1);
2889  __ b(&done);
2890
2891  __ Bind(&less);
2892  __ LoadImmediate(out, -1);
2893
2894  __ Bind(&done);
2895}
2896
2897void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
2898  LocationSummary* locations =
2899      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2900  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2901    locations->SetInAt(i, Location::Any());
2902  }
2903  locations->SetOut(Location::Any());
2904}
2905
2906void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
2907  UNUSED(instruction);
2908  LOG(FATAL) << "Unreachable";
2909}
2910
2911void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
2912  // TODO (ported from quick): revisit Arm barrier kinds
2913  DmbOptions flavour = DmbOptions::ISH;  // quiet c++ warnings
2914  switch (kind) {
2915    case MemBarrierKind::kAnyStore:
2916    case MemBarrierKind::kLoadAny:
2917    case MemBarrierKind::kAnyAny: {
2918      flavour = DmbOptions::ISH;
2919      break;
2920    }
2921    case MemBarrierKind::kStoreStore: {
2922      flavour = DmbOptions::ISHST;
2923      break;
2924    }
2925    default:
2926      LOG(FATAL) << "Unexpected memory barrier " << kind;
2927  }
2928  __ dmb(flavour);
2929}
2930
2931void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
2932                                                         uint32_t offset,
2933                                                         Register out_lo,
2934                                                         Register out_hi) {
2935  if (offset != 0) {
2936    __ LoadImmediate(out_lo, offset);
2937    __ add(IP, addr, ShifterOperand(out_lo));
2938    addr = IP;
2939  }
2940  __ ldrexd(out_lo, out_hi, addr);
2941}
2942
2943void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
2944                                                          uint32_t offset,
2945                                                          Register value_lo,
2946                                                          Register value_hi,
2947                                                          Register temp1,
2948                                                          Register temp2,
2949                                                          HInstruction* instruction) {
2950  NearLabel fail;
2951  if (offset != 0) {
2952    __ LoadImmediate(temp1, offset);
2953    __ add(IP, addr, ShifterOperand(temp1));
2954    addr = IP;
2955  }
2956  __ Bind(&fail);
2957  // We need a load followed by store. (The address used in a STREX instruction must
2958  // be the same as the address in the most recently executed LDREX instruction.)
2959  __ ldrexd(temp1, temp2, addr);
2960  codegen_->MaybeRecordImplicitNullCheck(instruction);
2961  __ strexd(temp1, value_lo, value_hi, addr);
2962  __ cmp(temp1, ShifterOperand(0));
2963  __ b(&fail, NE);
2964}
2965
2966void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
2967  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
2968
2969  LocationSummary* locations =
2970      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2971  locations->SetInAt(0, Location::RequiresRegister());
2972
2973  Primitive::Type field_type = field_info.GetFieldType();
2974  if (Primitive::IsFloatingPointType(field_type)) {
2975    locations->SetInAt(1, Location::RequiresFpuRegister());
2976  } else {
2977    locations->SetInAt(1, Location::RequiresRegister());
2978  }
2979
2980  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
2981  bool generate_volatile = field_info.IsVolatile()
2982      && is_wide
2983      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
2984  // Temporary registers for the write barrier.
2985  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
2986  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
2987    locations->AddTemp(Location::RequiresRegister());
2988    locations->AddTemp(Location::RequiresRegister());
2989  } else if (generate_volatile) {
2990    // Arm encoding have some additional constraints for ldrexd/strexd:
2991    // - registers need to be consecutive
2992    // - the first register should be even but not R14.
2993    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
2994    // enable Arm encoding.
2995    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
2996
2997    locations->AddTemp(Location::RequiresRegister());
2998    locations->AddTemp(Location::RequiresRegister());
2999    if (field_type == Primitive::kPrimDouble) {
3000      // For doubles we need two more registers to copy the value.
3001      locations->AddTemp(Location::RegisterLocation(R2));
3002      locations->AddTemp(Location::RegisterLocation(R3));
3003    }
3004  }
3005}
3006
3007void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3008                                                 const FieldInfo& field_info,
3009                                                 bool value_can_be_null) {
3010  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3011
3012  LocationSummary* locations = instruction->GetLocations();
3013  Register base = locations->InAt(0).AsRegister<Register>();
3014  Location value = locations->InAt(1);
3015
3016  bool is_volatile = field_info.IsVolatile();
3017  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3018  Primitive::Type field_type = field_info.GetFieldType();
3019  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3020
3021  if (is_volatile) {
3022    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3023  }
3024
3025  switch (field_type) {
3026    case Primitive::kPrimBoolean:
3027    case Primitive::kPrimByte: {
3028      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3029      break;
3030    }
3031
3032    case Primitive::kPrimShort:
3033    case Primitive::kPrimChar: {
3034      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3035      break;
3036    }
3037
3038    case Primitive::kPrimInt:
3039    case Primitive::kPrimNot: {
3040      __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3041      break;
3042    }
3043
3044    case Primitive::kPrimLong: {
3045      if (is_volatile && !atomic_ldrd_strd) {
3046        GenerateWideAtomicStore(base, offset,
3047                                value.AsRegisterPairLow<Register>(),
3048                                value.AsRegisterPairHigh<Register>(),
3049                                locations->GetTemp(0).AsRegister<Register>(),
3050                                locations->GetTemp(1).AsRegister<Register>(),
3051                                instruction);
3052      } else {
3053        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3054        codegen_->MaybeRecordImplicitNullCheck(instruction);
3055      }
3056      break;
3057    }
3058
3059    case Primitive::kPrimFloat: {
3060      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3061      break;
3062    }
3063
3064    case Primitive::kPrimDouble: {
3065      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3066      if (is_volatile && !atomic_ldrd_strd) {
3067        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3068        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3069
3070        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3071
3072        GenerateWideAtomicStore(base, offset,
3073                                value_reg_lo,
3074                                value_reg_hi,
3075                                locations->GetTemp(2).AsRegister<Register>(),
3076                                locations->GetTemp(3).AsRegister<Register>(),
3077                                instruction);
3078      } else {
3079        __ StoreDToOffset(value_reg, base, offset);
3080        codegen_->MaybeRecordImplicitNullCheck(instruction);
3081      }
3082      break;
3083    }
3084
3085    case Primitive::kPrimVoid:
3086      LOG(FATAL) << "Unreachable type " << field_type;
3087      UNREACHABLE();
3088  }
3089
3090  // Longs and doubles are handled in the switch.
3091  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3092    codegen_->MaybeRecordImplicitNullCheck(instruction);
3093  }
3094
3095  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3096    Register temp = locations->GetTemp(0).AsRegister<Register>();
3097    Register card = locations->GetTemp(1).AsRegister<Register>();
3098    codegen_->MarkGCCard(
3099        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3100  }
3101
3102  if (is_volatile) {
3103    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3104  }
3105}
3106
3107void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3108  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3109  LocationSummary* locations =
3110      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3111  locations->SetInAt(0, Location::RequiresRegister());
3112
3113  bool volatile_for_double = field_info.IsVolatile()
3114      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3115      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3116  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3117
3118  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3119    locations->SetOut(Location::RequiresFpuRegister());
3120  } else {
3121    locations->SetOut(Location::RequiresRegister(),
3122                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3123  }
3124  if (volatile_for_double) {
3125    // Arm encoding have some additional constraints for ldrexd/strexd:
3126    // - registers need to be consecutive
3127    // - the first register should be even but not R14.
3128    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3129    // enable Arm encoding.
3130    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3131    locations->AddTemp(Location::RequiresRegister());
3132    locations->AddTemp(Location::RequiresRegister());
3133  }
3134}
3135
3136void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3137                                                 const FieldInfo& field_info) {
3138  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3139
3140  LocationSummary* locations = instruction->GetLocations();
3141  Register base = locations->InAt(0).AsRegister<Register>();
3142  Location out = locations->Out();
3143  bool is_volatile = field_info.IsVolatile();
3144  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3145  Primitive::Type field_type = field_info.GetFieldType();
3146  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3147
3148  switch (field_type) {
3149    case Primitive::kPrimBoolean: {
3150      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3151      break;
3152    }
3153
3154    case Primitive::kPrimByte: {
3155      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3156      break;
3157    }
3158
3159    case Primitive::kPrimShort: {
3160      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3161      break;
3162    }
3163
3164    case Primitive::kPrimChar: {
3165      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3166      break;
3167    }
3168
3169    case Primitive::kPrimInt:
3170    case Primitive::kPrimNot: {
3171      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3172      break;
3173    }
3174
3175    case Primitive::kPrimLong: {
3176      if (is_volatile && !atomic_ldrd_strd) {
3177        GenerateWideAtomicLoad(base, offset,
3178                               out.AsRegisterPairLow<Register>(),
3179                               out.AsRegisterPairHigh<Register>());
3180      } else {
3181        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3182      }
3183      break;
3184    }
3185
3186    case Primitive::kPrimFloat: {
3187      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3188      break;
3189    }
3190
3191    case Primitive::kPrimDouble: {
3192      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3193      if (is_volatile && !atomic_ldrd_strd) {
3194        Register lo = locations->GetTemp(0).AsRegister<Register>();
3195        Register hi = locations->GetTemp(1).AsRegister<Register>();
3196        GenerateWideAtomicLoad(base, offset, lo, hi);
3197        codegen_->MaybeRecordImplicitNullCheck(instruction);
3198        __ vmovdrr(out_reg, lo, hi);
3199      } else {
3200        __ LoadDFromOffset(out_reg, base, offset);
3201        codegen_->MaybeRecordImplicitNullCheck(instruction);
3202      }
3203      break;
3204    }
3205
3206    case Primitive::kPrimVoid:
3207      LOG(FATAL) << "Unreachable type " << field_type;
3208      UNREACHABLE();
3209  }
3210
3211  // Doubles are handled in the switch.
3212  if (field_type != Primitive::kPrimDouble) {
3213    codegen_->MaybeRecordImplicitNullCheck(instruction);
3214  }
3215
3216  if (is_volatile) {
3217    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3218  }
3219}
3220
3221void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3222  HandleFieldSet(instruction, instruction->GetFieldInfo());
3223}
3224
3225void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3226  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3227}
3228
3229void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3230  HandleFieldGet(instruction, instruction->GetFieldInfo());
3231}
3232
3233void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3234  HandleFieldGet(instruction, instruction->GetFieldInfo());
3235}
3236
3237void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3238  HandleFieldGet(instruction, instruction->GetFieldInfo());
3239}
3240
3241void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3242  HandleFieldGet(instruction, instruction->GetFieldInfo());
3243}
3244
3245void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3246  HandleFieldSet(instruction, instruction->GetFieldInfo());
3247}
3248
3249void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3250  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3251}
3252
3253void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3254  LocationSummary* locations =
3255      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3256  locations->SetInAt(0, Location::RequiresRegister());
3257  if (instruction->HasUses()) {
3258    locations->SetOut(Location::SameAsFirstInput());
3259  }
3260}
3261
3262void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3263  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3264    return;
3265  }
3266  Location obj = instruction->GetLocations()->InAt(0);
3267
3268  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3269  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3270}
3271
3272void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3273  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3274  codegen_->AddSlowPath(slow_path);
3275
3276  LocationSummary* locations = instruction->GetLocations();
3277  Location obj = locations->InAt(0);
3278
3279  __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
3280  __ b(slow_path->GetEntryLabel(), EQ);
3281}
3282
3283void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3284  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
3285    GenerateImplicitNullCheck(instruction);
3286  } else {
3287    GenerateExplicitNullCheck(instruction);
3288  }
3289}
3290
3291void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3292  LocationSummary* locations =
3293      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3294  locations->SetInAt(0, Location::RequiresRegister());
3295  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3296  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3297    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3298  } else {
3299    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3300  }
3301}
3302
3303void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3304  LocationSummary* locations = instruction->GetLocations();
3305  Register obj = locations->InAt(0).AsRegister<Register>();
3306  Location index = locations->InAt(1);
3307
3308  switch (instruction->GetType()) {
3309    case Primitive::kPrimBoolean: {
3310      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3311      Register out = locations->Out().AsRegister<Register>();
3312      if (index.IsConstant()) {
3313        size_t offset =
3314            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3315        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3316      } else {
3317        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3318        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3319      }
3320      break;
3321    }
3322
3323    case Primitive::kPrimByte: {
3324      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3325      Register out = locations->Out().AsRegister<Register>();
3326      if (index.IsConstant()) {
3327        size_t offset =
3328            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3329        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3330      } else {
3331        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3332        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3333      }
3334      break;
3335    }
3336
3337    case Primitive::kPrimShort: {
3338      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3339      Register out = locations->Out().AsRegister<Register>();
3340      if (index.IsConstant()) {
3341        size_t offset =
3342            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3343        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3344      } else {
3345        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3346        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3347      }
3348      break;
3349    }
3350
3351    case Primitive::kPrimChar: {
3352      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3353      Register out = locations->Out().AsRegister<Register>();
3354      if (index.IsConstant()) {
3355        size_t offset =
3356            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3357        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3358      } else {
3359        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3360        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3361      }
3362      break;
3363    }
3364
3365    case Primitive::kPrimInt:
3366    case Primitive::kPrimNot: {
3367      DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
3368      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3369      Register out = locations->Out().AsRegister<Register>();
3370      if (index.IsConstant()) {
3371        size_t offset =
3372            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3373        __ LoadFromOffset(kLoadWord, out, obj, offset);
3374      } else {
3375        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3376        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3377      }
3378      break;
3379    }
3380
3381    case Primitive::kPrimLong: {
3382      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3383      Location out = locations->Out();
3384      if (index.IsConstant()) {
3385        size_t offset =
3386            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3387        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3388      } else {
3389        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3390        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3391      }
3392      break;
3393    }
3394
3395    case Primitive::kPrimFloat: {
3396      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3397      Location out = locations->Out();
3398      DCHECK(out.IsFpuRegister());
3399      if (index.IsConstant()) {
3400        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3401        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3402      } else {
3403        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3404        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3405      }
3406      break;
3407    }
3408
3409    case Primitive::kPrimDouble: {
3410      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3411      Location out = locations->Out();
3412      DCHECK(out.IsFpuRegisterPair());
3413      if (index.IsConstant()) {
3414        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3415        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3416      } else {
3417        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3418        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3419      }
3420      break;
3421    }
3422
3423    case Primitive::kPrimVoid:
3424      LOG(FATAL) << "Unreachable type " << instruction->GetType();
3425      UNREACHABLE();
3426  }
3427  codegen_->MaybeRecordImplicitNullCheck(instruction);
3428}
3429
3430void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3431  Primitive::Type value_type = instruction->GetComponentType();
3432
3433  bool needs_write_barrier =
3434      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3435  bool needs_runtime_call = instruction->NeedsTypeCheck();
3436
3437  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3438      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3439  if (needs_runtime_call) {
3440    InvokeRuntimeCallingConvention calling_convention;
3441    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3442    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3443    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3444  } else {
3445    locations->SetInAt(0, Location::RequiresRegister());
3446    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3447    if (Primitive::IsFloatingPointType(value_type)) {
3448      locations->SetInAt(2, Location::RequiresFpuRegister());
3449    } else {
3450      locations->SetInAt(2, Location::RequiresRegister());
3451    }
3452
3453    if (needs_write_barrier) {
3454      // Temporary registers for the write barrier.
3455      locations->AddTemp(Location::RequiresRegister());
3456      locations->AddTemp(Location::RequiresRegister());
3457    }
3458  }
3459}
3460
3461void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3462  LocationSummary* locations = instruction->GetLocations();
3463  Register obj = locations->InAt(0).AsRegister<Register>();
3464  Location index = locations->InAt(1);
3465  Primitive::Type value_type = instruction->GetComponentType();
3466  bool needs_runtime_call = locations->WillCall();
3467  bool needs_write_barrier =
3468      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3469
3470  switch (value_type) {
3471    case Primitive::kPrimBoolean:
3472    case Primitive::kPrimByte: {
3473      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3474      Register value = locations->InAt(2).AsRegister<Register>();
3475      if (index.IsConstant()) {
3476        size_t offset =
3477            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3478        __ StoreToOffset(kStoreByte, value, obj, offset);
3479      } else {
3480        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3481        __ StoreToOffset(kStoreByte, value, IP, data_offset);
3482      }
3483      break;
3484    }
3485
3486    case Primitive::kPrimShort:
3487    case Primitive::kPrimChar: {
3488      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3489      Register value = locations->InAt(2).AsRegister<Register>();
3490      if (index.IsConstant()) {
3491        size_t offset =
3492            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3493        __ StoreToOffset(kStoreHalfword, value, obj, offset);
3494      } else {
3495        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3496        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3497      }
3498      break;
3499    }
3500
3501    case Primitive::kPrimInt:
3502    case Primitive::kPrimNot: {
3503      if (!needs_runtime_call) {
3504        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3505        Register value = locations->InAt(2).AsRegister<Register>();
3506        if (index.IsConstant()) {
3507          size_t offset =
3508              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3509          __ StoreToOffset(kStoreWord, value, obj, offset);
3510        } else {
3511          DCHECK(index.IsRegister()) << index;
3512          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3513          __ StoreToOffset(kStoreWord, value, IP, data_offset);
3514        }
3515        codegen_->MaybeRecordImplicitNullCheck(instruction);
3516        if (needs_write_barrier) {
3517          DCHECK_EQ(value_type, Primitive::kPrimNot);
3518          Register temp = locations->GetTemp(0).AsRegister<Register>();
3519          Register card = locations->GetTemp(1).AsRegister<Register>();
3520          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
3521        }
3522      } else {
3523        DCHECK_EQ(value_type, Primitive::kPrimNot);
3524        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3525                                instruction,
3526                                instruction->GetDexPc(),
3527                                nullptr);
3528      }
3529      break;
3530    }
3531
3532    case Primitive::kPrimLong: {
3533      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3534      Location value = locations->InAt(2);
3535      if (index.IsConstant()) {
3536        size_t offset =
3537            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3538        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3539      } else {
3540        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3541        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3542      }
3543      break;
3544    }
3545
3546    case Primitive::kPrimFloat: {
3547      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3548      Location value = locations->InAt(2);
3549      DCHECK(value.IsFpuRegister());
3550      if (index.IsConstant()) {
3551        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3552        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3553      } else {
3554        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3555        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3556      }
3557      break;
3558    }
3559
3560    case Primitive::kPrimDouble: {
3561      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3562      Location value = locations->InAt(2);
3563      DCHECK(value.IsFpuRegisterPair());
3564      if (index.IsConstant()) {
3565        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3566        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3567      } else {
3568        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3569        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3570      }
3571
3572      break;
3573    }
3574
3575    case Primitive::kPrimVoid:
3576      LOG(FATAL) << "Unreachable type " << value_type;
3577      UNREACHABLE();
3578  }
3579
3580  // Ints and objects are handled in the switch.
3581  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
3582    codegen_->MaybeRecordImplicitNullCheck(instruction);
3583  }
3584}
3585
3586void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
3587  LocationSummary* locations =
3588      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3589  locations->SetInAt(0, Location::RequiresRegister());
3590  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3591}
3592
3593void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
3594  LocationSummary* locations = instruction->GetLocations();
3595  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
3596  Register obj = locations->InAt(0).AsRegister<Register>();
3597  Register out = locations->Out().AsRegister<Register>();
3598  __ LoadFromOffset(kLoadWord, out, obj, offset);
3599  codegen_->MaybeRecordImplicitNullCheck(instruction);
3600}
3601
3602void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3603  LocationSummary* locations =
3604      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3605  locations->SetInAt(0, Location::RequiresRegister());
3606  locations->SetInAt(1, Location::RequiresRegister());
3607  if (instruction->HasUses()) {
3608    locations->SetOut(Location::SameAsFirstInput());
3609  }
3610}
3611
3612void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3613  LocationSummary* locations = instruction->GetLocations();
3614  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
3615      instruction, locations->InAt(0), locations->InAt(1));
3616  codegen_->AddSlowPath(slow_path);
3617
3618  Register index = locations->InAt(0).AsRegister<Register>();
3619  Register length = locations->InAt(1).AsRegister<Register>();
3620
3621  __ cmp(index, ShifterOperand(length));
3622  __ b(slow_path->GetEntryLabel(), CS);
3623}
3624
3625void CodeGeneratorARM::MarkGCCard(Register temp,
3626                                  Register card,
3627                                  Register object,
3628                                  Register value,
3629                                  bool can_be_null) {
3630  NearLabel is_null;
3631  if (can_be_null) {
3632    __ CompareAndBranchIfZero(value, &is_null);
3633  }
3634  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
3635  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
3636  __ strb(card, Address(card, temp));
3637  if (can_be_null) {
3638    __ Bind(&is_null);
3639  }
3640}
3641
3642void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
3643  temp->SetLocations(nullptr);
3644}
3645
3646void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
3647  // Nothing to do, this is driven by the code generator.
3648  UNUSED(temp);
3649}
3650
3651void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
3652  UNUSED(instruction);
3653  LOG(FATAL) << "Unreachable";
3654}
3655
3656void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
3657  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3658}
3659
3660void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3661  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3662}
3663
3664void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3665  HBasicBlock* block = instruction->GetBlock();
3666  if (block->GetLoopInformation() != nullptr) {
3667    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3668    // The back edge will generate the suspend check.
3669    return;
3670  }
3671  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3672    // The goto will generate the suspend check.
3673    return;
3674  }
3675  GenerateSuspendCheck(instruction, nullptr);
3676}
3677
3678void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
3679                                                       HBasicBlock* successor) {
3680  SuspendCheckSlowPathARM* slow_path =
3681      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
3682  if (slow_path == nullptr) {
3683    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
3684    instruction->SetSlowPath(slow_path);
3685    codegen_->AddSlowPath(slow_path);
3686    if (successor != nullptr) {
3687      DCHECK(successor->IsLoopHeader());
3688      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
3689    }
3690  } else {
3691    DCHECK_EQ(slow_path->GetSuccessor(), successor);
3692  }
3693
3694  __ LoadFromOffset(
3695      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
3696  __ cmp(IP, ShifterOperand(0));
3697  // TODO: Figure out the branch offsets and use cbz/cbnz.
3698  if (successor == nullptr) {
3699    __ b(slow_path->GetEntryLabel(), NE);
3700    __ Bind(slow_path->GetReturnLabel());
3701  } else {
3702    __ b(codegen_->GetLabelOf(successor), EQ);
3703    __ b(slow_path->GetEntryLabel());
3704  }
3705}
3706
3707ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
3708  return codegen_->GetAssembler();
3709}
3710
3711void ParallelMoveResolverARM::EmitMove(size_t index) {
3712  MoveOperands* move = moves_.Get(index);
3713  Location source = move->GetSource();
3714  Location destination = move->GetDestination();
3715
3716  if (source.IsRegister()) {
3717    if (destination.IsRegister()) {
3718      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
3719    } else {
3720      DCHECK(destination.IsStackSlot());
3721      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
3722                       SP, destination.GetStackIndex());
3723    }
3724  } else if (source.IsStackSlot()) {
3725    if (destination.IsRegister()) {
3726      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
3727                        SP, source.GetStackIndex());
3728    } else if (destination.IsFpuRegister()) {
3729      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
3730    } else {
3731      DCHECK(destination.IsStackSlot());
3732      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
3733      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3734    }
3735  } else if (source.IsFpuRegister()) {
3736    if (destination.IsFpuRegister()) {
3737      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
3738    } else {
3739      DCHECK(destination.IsStackSlot());
3740      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
3741    }
3742  } else if (source.IsDoubleStackSlot()) {
3743    if (destination.IsDoubleStackSlot()) {
3744      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
3745      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
3746    } else if (destination.IsRegisterPair()) {
3747      DCHECK(ExpectedPairLayout(destination));
3748      __ LoadFromOffset(
3749          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
3750    } else {
3751      DCHECK(destination.IsFpuRegisterPair()) << destination;
3752      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3753                         SP,
3754                         source.GetStackIndex());
3755    }
3756  } else if (source.IsRegisterPair()) {
3757    if (destination.IsRegisterPair()) {
3758      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
3759      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
3760    } else {
3761      DCHECK(destination.IsDoubleStackSlot()) << destination;
3762      DCHECK(ExpectedPairLayout(source));
3763      __ StoreToOffset(
3764          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
3765    }
3766  } else if (source.IsFpuRegisterPair()) {
3767    if (destination.IsFpuRegisterPair()) {
3768      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3769               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
3770    } else {
3771      DCHECK(destination.IsDoubleStackSlot()) << destination;
3772      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
3773                        SP,
3774                        destination.GetStackIndex());
3775    }
3776  } else {
3777    DCHECK(source.IsConstant()) << source;
3778    HConstant* constant = source.GetConstant();
3779    if (constant->IsIntConstant() || constant->IsNullConstant()) {
3780      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
3781      if (destination.IsRegister()) {
3782        __ LoadImmediate(destination.AsRegister<Register>(), value);
3783      } else {
3784        DCHECK(destination.IsStackSlot());
3785        __ LoadImmediate(IP, value);
3786        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3787      }
3788    } else if (constant->IsLongConstant()) {
3789      int64_t value = constant->AsLongConstant()->GetValue();
3790      if (destination.IsRegisterPair()) {
3791        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
3792        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
3793      } else {
3794        DCHECK(destination.IsDoubleStackSlot()) << destination;
3795        __ LoadImmediate(IP, Low32Bits(value));
3796        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3797        __ LoadImmediate(IP, High32Bits(value));
3798        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3799      }
3800    } else if (constant->IsDoubleConstant()) {
3801      double value = constant->AsDoubleConstant()->GetValue();
3802      if (destination.IsFpuRegisterPair()) {
3803        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
3804      } else {
3805        DCHECK(destination.IsDoubleStackSlot()) << destination;
3806        uint64_t int_value = bit_cast<uint64_t, double>(value);
3807        __ LoadImmediate(IP, Low32Bits(int_value));
3808        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3809        __ LoadImmediate(IP, High32Bits(int_value));
3810        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3811      }
3812    } else {
3813      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
3814      float value = constant->AsFloatConstant()->GetValue();
3815      if (destination.IsFpuRegister()) {
3816        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
3817      } else {
3818        DCHECK(destination.IsStackSlot());
3819        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
3820        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3821      }
3822    }
3823  }
3824}
3825
3826void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
3827  __ Mov(IP, reg);
3828  __ LoadFromOffset(kLoadWord, reg, SP, mem);
3829  __ StoreToOffset(kStoreWord, IP, SP, mem);
3830}
3831
3832void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
3833  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
3834  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
3835  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
3836                    SP, mem1 + stack_offset);
3837  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
3838  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
3839                   SP, mem2 + stack_offset);
3840  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
3841}
3842
3843void ParallelMoveResolverARM::EmitSwap(size_t index) {
3844  MoveOperands* move = moves_.Get(index);
3845  Location source = move->GetSource();
3846  Location destination = move->GetDestination();
3847
3848  if (source.IsRegister() && destination.IsRegister()) {
3849    DCHECK_NE(source.AsRegister<Register>(), IP);
3850    DCHECK_NE(destination.AsRegister<Register>(), IP);
3851    __ Mov(IP, source.AsRegister<Register>());
3852    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
3853    __ Mov(destination.AsRegister<Register>(), IP);
3854  } else if (source.IsRegister() && destination.IsStackSlot()) {
3855    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
3856  } else if (source.IsStackSlot() && destination.IsRegister()) {
3857    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
3858  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
3859    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3860  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
3861    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
3862    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
3863    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
3864  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
3865    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
3866    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
3867    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
3868    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
3869               destination.AsRegisterPairHigh<Register>(),
3870               DTMP);
3871  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
3872    Register low_reg = source.IsRegisterPair()
3873        ? source.AsRegisterPairLow<Register>()
3874        : destination.AsRegisterPairLow<Register>();
3875    int mem = source.IsRegisterPair()
3876        ? destination.GetStackIndex()
3877        : source.GetStackIndex();
3878    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
3879    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
3880    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
3881    __ StoreDToOffset(DTMP, SP, mem);
3882  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
3883    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
3884    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3885    __ vmovd(DTMP, first);
3886    __ vmovd(first, second);
3887    __ vmovd(second, DTMP);
3888  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
3889    DRegister reg = source.IsFpuRegisterPair()
3890        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
3891        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3892    int mem = source.IsFpuRegisterPair()
3893        ? destination.GetStackIndex()
3894        : source.GetStackIndex();
3895    __ vmovd(DTMP, reg);
3896    __ LoadDFromOffset(reg, SP, mem);
3897    __ StoreDToOffset(DTMP, SP, mem);
3898  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
3899    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
3900                                           : destination.AsFpuRegister<SRegister>();
3901    int mem = source.IsFpuRegister()
3902        ? destination.GetStackIndex()
3903        : source.GetStackIndex();
3904
3905    __ vmovrs(IP, reg);
3906    __ LoadSFromOffset(reg, SP, mem);
3907    __ StoreToOffset(kStoreWord, IP, SP, mem);
3908  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
3909    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3910    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
3911  } else {
3912    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
3913  }
3914}
3915
3916void ParallelMoveResolverARM::SpillScratch(int reg) {
3917  __ Push(static_cast<Register>(reg));
3918}
3919
3920void ParallelMoveResolverARM::RestoreScratch(int reg) {
3921  __ Pop(static_cast<Register>(reg));
3922}
3923
3924void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
3925  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
3926      ? LocationSummary::kCallOnSlowPath
3927      : LocationSummary::kNoCall;
3928  LocationSummary* locations =
3929      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
3930  locations->SetInAt(0, Location::RequiresRegister());
3931  locations->SetOut(Location::RequiresRegister());
3932}
3933
3934void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
3935  LocationSummary* locations = cls->GetLocations();
3936  Register out = locations->Out().AsRegister<Register>();
3937  Register current_method = locations->InAt(0).AsRegister<Register>();
3938  if (cls->IsReferrersClass()) {
3939    DCHECK(!cls->CanCallRuntime());
3940    DCHECK(!cls->MustGenerateClinitCheck());
3941    __ LoadFromOffset(
3942        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
3943  } else {
3944    DCHECK(cls->CanCallRuntime());
3945    __ LoadFromOffset(kLoadWord,
3946                      out,
3947                      current_method,
3948                      ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
3949    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
3950
3951    SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3952        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
3953    codegen_->AddSlowPath(slow_path);
3954    __ cmp(out, ShifterOperand(0));
3955    __ b(slow_path->GetEntryLabel(), EQ);
3956    if (cls->MustGenerateClinitCheck()) {
3957      GenerateClassInitializationCheck(slow_path, out);
3958    } else {
3959      __ Bind(slow_path->GetExitLabel());
3960    }
3961  }
3962}
3963
3964void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
3965  LocationSummary* locations =
3966      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
3967  locations->SetInAt(0, Location::RequiresRegister());
3968  if (check->HasUses()) {
3969    locations->SetOut(Location::SameAsFirstInput());
3970  }
3971}
3972
3973void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
3974  // We assume the class is not null.
3975  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3976      check->GetLoadClass(), check, check->GetDexPc(), true);
3977  codegen_->AddSlowPath(slow_path);
3978  GenerateClassInitializationCheck(slow_path,
3979                                   check->GetLocations()->InAt(0).AsRegister<Register>());
3980}
3981
3982void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
3983    SlowPathCodeARM* slow_path, Register class_reg) {
3984  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
3985  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
3986  __ b(slow_path->GetEntryLabel(), LT);
3987  // Even if the initialized flag is set, we may be in a situation where caches are not synced
3988  // properly. Therefore, we do a memory fence.
3989  __ dmb(ISH);
3990  __ Bind(slow_path->GetExitLabel());
3991}
3992
3993void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
3994  LocationSummary* locations =
3995      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
3996  locations->SetInAt(0, Location::RequiresRegister());
3997  locations->SetOut(Location::RequiresRegister());
3998}
3999
4000void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4001  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4002  codegen_->AddSlowPath(slow_path);
4003
4004  LocationSummary* locations = load->GetLocations();
4005  Register out = locations->Out().AsRegister<Register>();
4006  Register current_method = locations->InAt(0).AsRegister<Register>();
4007  __ LoadFromOffset(
4008      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4009  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4010  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4011  __ cmp(out, ShifterOperand(0));
4012  __ b(slow_path->GetEntryLabel(), EQ);
4013  __ Bind(slow_path->GetExitLabel());
4014}
4015
4016void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4017  LocationSummary* locations =
4018      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4019  locations->SetOut(Location::RequiresRegister());
4020}
4021
4022void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4023  Register out = load->GetLocations()->Out().AsRegister<Register>();
4024  int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4025  __ LoadFromOffset(kLoadWord, out, TR, offset);
4026  __ LoadImmediate(IP, 0);
4027  __ StoreToOffset(kStoreWord, IP, TR, offset);
4028}
4029
4030void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4031  LocationSummary* locations =
4032      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4033  InvokeRuntimeCallingConvention calling_convention;
4034  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4035}
4036
4037void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4038  codegen_->InvokeRuntime(
4039      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4040}
4041
4042void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4043  LocationSummary::CallKind call_kind = instruction->IsClassFinal()
4044      ? LocationSummary::kNoCall
4045      : LocationSummary::kCallOnSlowPath;
4046  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4047  locations->SetInAt(0, Location::RequiresRegister());
4048  locations->SetInAt(1, Location::RequiresRegister());
4049  // The out register is used as a temporary, so it overlaps with the inputs.
4050  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4051}
4052
4053void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4054  LocationSummary* locations = instruction->GetLocations();
4055  Register obj = locations->InAt(0).AsRegister<Register>();
4056  Register cls = locations->InAt(1).AsRegister<Register>();
4057  Register out = locations->Out().AsRegister<Register>();
4058  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4059  NearLabel done, zero;
4060  SlowPathCodeARM* slow_path = nullptr;
4061
4062  // Return 0 if `obj` is null.
4063  // avoid null check if we know obj is not null.
4064  if (instruction->MustDoNullCheck()) {
4065    __ CompareAndBranchIfZero(obj, &zero);
4066  }
4067  // Compare the class of `obj` with `cls`.
4068  __ LoadFromOffset(kLoadWord, out, obj, class_offset);
4069  __ cmp(out, ShifterOperand(cls));
4070  if (instruction->IsClassFinal()) {
4071    // Classes must be equal for the instanceof to succeed.
4072    __ b(&zero, NE);
4073    __ LoadImmediate(out, 1);
4074    __ b(&done);
4075  } else {
4076    // If the classes are not equal, we go into a slow path.
4077    DCHECK(locations->OnlyCallsOnSlowPath());
4078    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4079        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
4080    codegen_->AddSlowPath(slow_path);
4081    __ b(slow_path->GetEntryLabel(), NE);
4082    __ LoadImmediate(out, 1);
4083    __ b(&done);
4084  }
4085
4086  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
4087    __ Bind(&zero);
4088    __ LoadImmediate(out, 0);
4089  }
4090
4091  if (slow_path != nullptr) {
4092    __ Bind(slow_path->GetExitLabel());
4093  }
4094  __ Bind(&done);
4095}
4096
4097void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4098  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4099      instruction, LocationSummary::kCallOnSlowPath);
4100  locations->SetInAt(0, Location::RequiresRegister());
4101  locations->SetInAt(1, Location::RequiresRegister());
4102  locations->AddTemp(Location::RequiresRegister());
4103}
4104
4105void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4106  LocationSummary* locations = instruction->GetLocations();
4107  Register obj = locations->InAt(0).AsRegister<Register>();
4108  Register cls = locations->InAt(1).AsRegister<Register>();
4109  Register temp = locations->GetTemp(0).AsRegister<Register>();
4110  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4111
4112  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4113      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
4114  codegen_->AddSlowPath(slow_path);
4115
4116  NearLabel done;
4117  // avoid null check if we know obj is not null.
4118  if (instruction->MustDoNullCheck()) {
4119    __ CompareAndBranchIfZero(obj, &done);
4120  }
4121  // Compare the class of `obj` with `cls`.
4122  __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4123  __ cmp(temp, ShifterOperand(cls));
4124  __ b(slow_path->GetEntryLabel(), NE);
4125  __ Bind(slow_path->GetExitLabel());
4126  if (instruction->MustDoNullCheck()) {
4127    __ Bind(&done);
4128  }
4129}
4130
4131void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4132  LocationSummary* locations =
4133      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4134  InvokeRuntimeCallingConvention calling_convention;
4135  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4136}
4137
4138void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4139  codegen_->InvokeRuntime(instruction->IsEnter()
4140        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4141      instruction,
4142      instruction->GetDexPc(),
4143      nullptr);
4144}
4145
4146void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
4147void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
4148void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4149
4150void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4151  LocationSummary* locations =
4152      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4153  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4154         || instruction->GetResultType() == Primitive::kPrimLong);
4155  locations->SetInAt(0, Location::RequiresRegister());
4156  locations->SetInAt(1, Location::RequiresRegister());
4157  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4158}
4159
4160void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4161  HandleBitwiseOperation(instruction);
4162}
4163
4164void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4165  HandleBitwiseOperation(instruction);
4166}
4167
4168void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4169  HandleBitwiseOperation(instruction);
4170}
4171
4172void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4173  LocationSummary* locations = instruction->GetLocations();
4174
4175  if (instruction->GetResultType() == Primitive::kPrimInt) {
4176    Register first = locations->InAt(0).AsRegister<Register>();
4177    Register second = locations->InAt(1).AsRegister<Register>();
4178    Register out = locations->Out().AsRegister<Register>();
4179    if (instruction->IsAnd()) {
4180      __ and_(out, first, ShifterOperand(second));
4181    } else if (instruction->IsOr()) {
4182      __ orr(out, first, ShifterOperand(second));
4183    } else {
4184      DCHECK(instruction->IsXor());
4185      __ eor(out, first, ShifterOperand(second));
4186    }
4187  } else {
4188    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4189    Location first = locations->InAt(0);
4190    Location second = locations->InAt(1);
4191    Location out = locations->Out();
4192    if (instruction->IsAnd()) {
4193      __ and_(out.AsRegisterPairLow<Register>(),
4194              first.AsRegisterPairLow<Register>(),
4195              ShifterOperand(second.AsRegisterPairLow<Register>()));
4196      __ and_(out.AsRegisterPairHigh<Register>(),
4197              first.AsRegisterPairHigh<Register>(),
4198              ShifterOperand(second.AsRegisterPairHigh<Register>()));
4199    } else if (instruction->IsOr()) {
4200      __ orr(out.AsRegisterPairLow<Register>(),
4201             first.AsRegisterPairLow<Register>(),
4202             ShifterOperand(second.AsRegisterPairLow<Register>()));
4203      __ orr(out.AsRegisterPairHigh<Register>(),
4204             first.AsRegisterPairHigh<Register>(),
4205             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4206    } else {
4207      DCHECK(instruction->IsXor());
4208      __ eor(out.AsRegisterPairLow<Register>(),
4209             first.AsRegisterPairLow<Register>(),
4210             ShifterOperand(second.AsRegisterPairLow<Register>()));
4211      __ eor(out.AsRegisterPairHigh<Register>(),
4212             first.AsRegisterPairHigh<Register>(),
4213             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4214    }
4215  }
4216}
4217
4218void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
4219  // TODO: Implement all kinds of calls:
4220  // 1) boot -> boot
4221  // 2) app -> boot
4222  // 3) app -> app
4223  //
4224  // Currently we implement the app -> app logic, which looks up in the resolve cache.
4225
4226  if (invoke->IsStringInit()) {
4227    Register reg = temp.AsRegister<Register>();
4228    // temp = thread->string_init_entrypoint
4229    __ LoadFromOffset(kLoadWord, reg, TR, invoke->GetStringInitOffset());
4230    // LR = temp[offset_of_quick_compiled_code]
4231    __ LoadFromOffset(kLoadWord, LR, reg,
4232                      ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4233                          kArmWordSize).Int32Value());
4234    // LR()
4235    __ blx(LR);
4236  } else if (invoke->IsRecursive()) {
4237    __ bl(GetFrameEntryLabel());
4238  } else {
4239    Register current_method =
4240        invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex()).AsRegister<Register>();
4241    Register reg = temp.AsRegister<Register>();
4242    // reg = current_method->dex_cache_resolved_methods_;
4243    __ LoadFromOffset(
4244        kLoadWord, reg, current_method, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
4245    // reg = reg[index_in_cache]
4246    __ LoadFromOffset(
4247        kLoadWord, reg, reg, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
4248    // LR = reg[offset_of_quick_compiled_code]
4249    __ LoadFromOffset(kLoadWord, LR, reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4250        kArmWordSize).Int32Value());
4251    // LR()
4252    __ blx(LR);
4253  }
4254
4255  DCHECK(!IsLeafMethod());
4256}
4257
4258void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4259  // Nothing to do, this should be removed during prepare for register allocator.
4260  UNUSED(instruction);
4261  LOG(FATAL) << "Unreachable";
4262}
4263
4264void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4265  // Nothing to do, this should be removed during prepare for register allocator.
4266  UNUSED(instruction);
4267  LOG(FATAL) << "Unreachable";
4268}
4269
4270}  // namespace arm
4271}  // namespace art
4272