code_generator_arm.cc revision 80afd02024d20e60b197d3adfbb43cc303cf29e0
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "code_generator_utils.h"
21#include "entrypoints/quick/quick_entrypoints.h"
22#include "gc/accounting/card_table.h"
23#include "intrinsics.h"
24#include "intrinsics_arm.h"
25#include "mirror/array-inl.h"
26#include "mirror/art_method.h"
27#include "mirror/class.h"
28#include "thread.h"
29#include "utils/arm/assembler_arm.h"
30#include "utils/arm/managed_register_arm.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35
36namespace arm {
37
38static bool ExpectedPairLayout(Location location) {
39  // We expected this for both core and fpu register pairs.
40  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
41}
42
43static constexpr int kCurrentMethodStackOffset = 0;
44
45// We unconditionally allocate R5 to ensure we can do long operations
46// with baseline.
47static constexpr Register kCoreSavedRegisterForBaseline = R5;
48static constexpr Register kCoreCalleeSaves[] =
49    { R5, R6, R7, R8, R10, R11, PC };
50static constexpr SRegister kFpuCalleeSaves[] =
51    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
52
53// D31 cannot be split into two S registers, and the register allocator only works on
54// S registers. Therefore there is no need to block it.
55static constexpr DRegister DTMP = D31;
56
57#define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())->
58#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
59
60class NullCheckSlowPathARM : public SlowPathCodeARM {
61 public:
62  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
63
64  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
65    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
66    __ Bind(GetEntryLabel());
67    arm_codegen->InvokeRuntime(
68        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
69  }
70
71 private:
72  HNullCheck* const instruction_;
73  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
74};
75
76class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
77 public:
78  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
79
80  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
81    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
82    __ Bind(GetEntryLabel());
83    arm_codegen->InvokeRuntime(
84        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
85  }
86
87 private:
88  HDivZeroCheck* const instruction_;
89  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
90};
91
92class SuspendCheckSlowPathARM : public SlowPathCodeARM {
93 public:
94  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
95      : instruction_(instruction), successor_(successor) {}
96
97  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
98    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
99    __ Bind(GetEntryLabel());
100    SaveLiveRegisters(codegen, instruction_->GetLocations());
101    arm_codegen->InvokeRuntime(
102        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
103    RestoreLiveRegisters(codegen, instruction_->GetLocations());
104    if (successor_ == nullptr) {
105      __ b(GetReturnLabel());
106    } else {
107      __ b(arm_codegen->GetLabelOf(successor_));
108    }
109  }
110
111  Label* GetReturnLabel() {
112    DCHECK(successor_ == nullptr);
113    return &return_label_;
114  }
115
116  HBasicBlock* GetSuccessor() const {
117    return successor_;
118  }
119
120 private:
121  HSuspendCheck* const instruction_;
122  // If not null, the block to branch to after the suspend check.
123  HBasicBlock* const successor_;
124
125  // If `successor_` is null, the label to branch to after the suspend check.
126  Label return_label_;
127
128  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
129};
130
131class BoundsCheckSlowPathARM : public SlowPathCodeARM {
132 public:
133  BoundsCheckSlowPathARM(HBoundsCheck* instruction,
134                         Location index_location,
135                         Location length_location)
136      : instruction_(instruction),
137        index_location_(index_location),
138        length_location_(length_location) {}
139
140  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
141    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
142    __ Bind(GetEntryLabel());
143    // We're moving two locations to locations that could overlap, so we need a parallel
144    // move resolver.
145    InvokeRuntimeCallingConvention calling_convention;
146    codegen->EmitParallelMoves(
147        index_location_,
148        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
149        Primitive::kPrimInt,
150        length_location_,
151        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
152        Primitive::kPrimInt);
153    arm_codegen->InvokeRuntime(
154        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
155  }
156
157 private:
158  HBoundsCheck* const instruction_;
159  const Location index_location_;
160  const Location length_location_;
161
162  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
163};
164
165class LoadClassSlowPathARM : public SlowPathCodeARM {
166 public:
167  LoadClassSlowPathARM(HLoadClass* cls,
168                       HInstruction* at,
169                       uint32_t dex_pc,
170                       bool do_clinit)
171      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
172    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
173  }
174
175  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
176    LocationSummary* locations = at_->GetLocations();
177
178    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
179    __ Bind(GetEntryLabel());
180    SaveLiveRegisters(codegen, locations);
181
182    InvokeRuntimeCallingConvention calling_convention;
183    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
184    int32_t entry_point_offset = do_clinit_
185        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
186        : QUICK_ENTRY_POINT(pInitializeType);
187    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
188
189    // Move the class to the desired location.
190    Location out = locations->Out();
191    if (out.IsValid()) {
192      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
193      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
194    }
195    RestoreLiveRegisters(codegen, locations);
196    __ b(GetExitLabel());
197  }
198
199 private:
200  // The class this slow path will load.
201  HLoadClass* const cls_;
202
203  // The instruction where this slow path is happening.
204  // (Might be the load class or an initialization check).
205  HInstruction* const at_;
206
207  // The dex PC of `at_`.
208  const uint32_t dex_pc_;
209
210  // Whether to initialize the class.
211  const bool do_clinit_;
212
213  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
214};
215
216class LoadStringSlowPathARM : public SlowPathCodeARM {
217 public:
218  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
219
220  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
221    LocationSummary* locations = instruction_->GetLocations();
222    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
223
224    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
225    __ Bind(GetEntryLabel());
226    SaveLiveRegisters(codegen, locations);
227
228    InvokeRuntimeCallingConvention calling_convention;
229    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
230    arm_codegen->InvokeRuntime(
231        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
232    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
233
234    RestoreLiveRegisters(codegen, locations);
235    __ b(GetExitLabel());
236  }
237
238 private:
239  HLoadString* const instruction_;
240
241  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
242};
243
244class TypeCheckSlowPathARM : public SlowPathCodeARM {
245 public:
246  TypeCheckSlowPathARM(HInstruction* instruction,
247                       Location class_to_check,
248                       Location object_class,
249                       uint32_t dex_pc)
250      : instruction_(instruction),
251        class_to_check_(class_to_check),
252        object_class_(object_class),
253        dex_pc_(dex_pc) {}
254
255  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
256    LocationSummary* locations = instruction_->GetLocations();
257    DCHECK(instruction_->IsCheckCast()
258           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
259
260    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
261    __ Bind(GetEntryLabel());
262    SaveLiveRegisters(codegen, locations);
263
264    // We're moving two locations to locations that could overlap, so we need a parallel
265    // move resolver.
266    InvokeRuntimeCallingConvention calling_convention;
267    codegen->EmitParallelMoves(
268        class_to_check_,
269        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
270        Primitive::kPrimNot,
271        object_class_,
272        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
273        Primitive::kPrimNot);
274
275    if (instruction_->IsInstanceOf()) {
276      arm_codegen->InvokeRuntime(
277          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
278      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
279    } else {
280      DCHECK(instruction_->IsCheckCast());
281      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
282    }
283
284    RestoreLiveRegisters(codegen, locations);
285    __ b(GetExitLabel());
286  }
287
288 private:
289  HInstruction* const instruction_;
290  const Location class_to_check_;
291  const Location object_class_;
292  uint32_t dex_pc_;
293
294  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
295};
296
297class DeoptimizationSlowPathARM : public SlowPathCodeARM {
298 public:
299  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
300    : instruction_(instruction) {}
301
302  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
303    __ Bind(GetEntryLabel());
304    SaveLiveRegisters(codegen, instruction_->GetLocations());
305    DCHECK(instruction_->IsDeoptimize());
306    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
307    uint32_t dex_pc = deoptimize->GetDexPc();
308    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
309    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
310  }
311
312 private:
313  HInstruction* const instruction_;
314  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
315};
316
317#undef __
318
319#undef __
320#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
321
322inline Condition ARMCondition(IfCondition cond) {
323  switch (cond) {
324    case kCondEQ: return EQ;
325    case kCondNE: return NE;
326    case kCondLT: return LT;
327    case kCondLE: return LE;
328    case kCondGT: return GT;
329    case kCondGE: return GE;
330    default:
331      LOG(FATAL) << "Unknown if condition";
332  }
333  return EQ;        // Unreachable.
334}
335
336inline Condition ARMOppositeCondition(IfCondition cond) {
337  switch (cond) {
338    case kCondEQ: return NE;
339    case kCondNE: return EQ;
340    case kCondLT: return GE;
341    case kCondLE: return GT;
342    case kCondGT: return LE;
343    case kCondGE: return LT;
344    default:
345      LOG(FATAL) << "Unknown if condition";
346  }
347  return EQ;        // Unreachable.
348}
349
350void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
351  stream << Register(reg);
352}
353
354void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
355  stream << SRegister(reg);
356}
357
358size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
359  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
360  return kArmWordSize;
361}
362
363size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
364  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
365  return kArmWordSize;
366}
367
368size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
369  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
370  return kArmWordSize;
371}
372
373size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
374  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
375  return kArmWordSize;
376}
377
378CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
379                                   const ArmInstructionSetFeatures& isa_features,
380                                   const CompilerOptions& compiler_options)
381    : CodeGenerator(graph,
382                    kNumberOfCoreRegisters,
383                    kNumberOfSRegisters,
384                    kNumberOfRegisterPairs,
385                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
386                                        arraysize(kCoreCalleeSaves)),
387                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
388                                        arraysize(kFpuCalleeSaves)),
389                    compiler_options),
390      block_labels_(graph->GetArena(), 0),
391      location_builder_(graph, this),
392      instruction_visitor_(graph, this),
393      move_resolver_(graph->GetArena(), this),
394      assembler_(false /* can_relocate_branches */),
395      isa_features_(isa_features) {
396  // Save the PC register to mimic Quick.
397  AddAllocatedRegister(Location::RegisterLocation(PC));
398}
399
400Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
401  switch (type) {
402    case Primitive::kPrimLong: {
403      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
404      ArmManagedRegister pair =
405          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
406      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
407      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
408
409      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
410      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
411      UpdateBlockedPairRegisters();
412      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
413    }
414
415    case Primitive::kPrimByte:
416    case Primitive::kPrimBoolean:
417    case Primitive::kPrimChar:
418    case Primitive::kPrimShort:
419    case Primitive::kPrimInt:
420    case Primitive::kPrimNot: {
421      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
422      // Block all register pairs that contain `reg`.
423      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
424        ArmManagedRegister current =
425            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
426        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
427          blocked_register_pairs_[i] = true;
428        }
429      }
430      return Location::RegisterLocation(reg);
431    }
432
433    case Primitive::kPrimFloat: {
434      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
435      return Location::FpuRegisterLocation(reg);
436    }
437
438    case Primitive::kPrimDouble: {
439      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
440      DCHECK_EQ(reg % 2, 0);
441      return Location::FpuRegisterPairLocation(reg, reg + 1);
442    }
443
444    case Primitive::kPrimVoid:
445      LOG(FATAL) << "Unreachable type " << type;
446  }
447
448  return Location();
449}
450
451void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
452  // Don't allocate the dalvik style register pair passing.
453  blocked_register_pairs_[R1_R2] = true;
454
455  // Stack register, LR and PC are always reserved.
456  blocked_core_registers_[SP] = true;
457  blocked_core_registers_[LR] = true;
458  blocked_core_registers_[PC] = true;
459
460  // Reserve thread register.
461  blocked_core_registers_[TR] = true;
462
463  // Reserve temp register.
464  blocked_core_registers_[IP] = true;
465
466  if (is_baseline) {
467    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
468      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
469    }
470
471    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
472
473    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
474      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
475    }
476  }
477
478  UpdateBlockedPairRegisters();
479}
480
481void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
482  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
483    ArmManagedRegister current =
484        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
485    if (blocked_core_registers_[current.AsRegisterPairLow()]
486        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
487      blocked_register_pairs_[i] = true;
488    }
489  }
490}
491
492InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
493      : HGraphVisitor(graph),
494        assembler_(codegen->GetAssembler()),
495        codegen_(codegen) {}
496
497void CodeGeneratorARM::ComputeSpillMask() {
498  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
499  // Save one extra register for baseline. Note that on thumb2, there is no easy
500  // instruction to restore just the PC, so this actually helps both baseline
501  // and non-baseline to save and restore at least two registers at entry and exit.
502  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
503  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
504  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
505  // We use vpush and vpop for saving and restoring floating point registers, which take
506  // a SRegister and the number of registers to save/restore after that SRegister. We
507  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
508  // but in the range.
509  if (fpu_spill_mask_ != 0) {
510    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
511    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
512    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
513      fpu_spill_mask_ |= (1 << i);
514    }
515  }
516}
517
518static dwarf::Reg DWARFReg(Register reg) {
519  return dwarf::Reg::ArmCore(static_cast<int>(reg));
520}
521
522static dwarf::Reg DWARFReg(SRegister reg) {
523  return dwarf::Reg::ArmFp(static_cast<int>(reg));
524}
525
526void CodeGeneratorARM::GenerateFrameEntry() {
527  bool skip_overflow_check =
528      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
529  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
530  __ Bind(&frame_entry_label_);
531
532  if (HasEmptyFrame()) {
533    return;
534  }
535
536  if (!skip_overflow_check) {
537    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
538    __ LoadFromOffset(kLoadWord, IP, IP, 0);
539    RecordPcInfo(nullptr, 0);
540  }
541
542  // PC is in the list of callee-save to mimic Quick, but we need to push
543  // LR at entry instead.
544  uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
545  __ PushList(push_mask);
546  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
547  __ cfi().RelOffsetForMany(DWARFReg(R0), 0, push_mask, kArmWordSize);
548  if (fpu_spill_mask_ != 0) {
549    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
550    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
551    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
552    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
553  }
554  int adjust = GetFrameSize() - FrameEntrySpillSize();
555  __ AddConstant(SP, -adjust);
556  __ cfi().AdjustCFAOffset(adjust);
557  __ StoreToOffset(kStoreWord, R0, SP, 0);
558}
559
560void CodeGeneratorARM::GenerateFrameExit() {
561  if (HasEmptyFrame()) {
562    __ bx(LR);
563    return;
564  }
565  __ cfi().RememberState();
566  int adjust = GetFrameSize() - FrameEntrySpillSize();
567  __ AddConstant(SP, adjust);
568  __ cfi().AdjustCFAOffset(-adjust);
569  if (fpu_spill_mask_ != 0) {
570    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
571    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
572    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
573    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
574  }
575  __ PopList(core_spill_mask_);
576  __ cfi().RestoreState();
577  __ cfi().DefCFAOffset(GetFrameSize());
578}
579
580void CodeGeneratorARM::Bind(HBasicBlock* block) {
581  __ Bind(GetLabelOf(block));
582}
583
584Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
585  switch (load->GetType()) {
586    case Primitive::kPrimLong:
587    case Primitive::kPrimDouble:
588      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
589
590    case Primitive::kPrimInt:
591    case Primitive::kPrimNot:
592    case Primitive::kPrimFloat:
593      return Location::StackSlot(GetStackSlot(load->GetLocal()));
594
595    case Primitive::kPrimBoolean:
596    case Primitive::kPrimByte:
597    case Primitive::kPrimChar:
598    case Primitive::kPrimShort:
599    case Primitive::kPrimVoid:
600      LOG(FATAL) << "Unexpected type " << load->GetType();
601      UNREACHABLE();
602  }
603
604  LOG(FATAL) << "Unreachable";
605  UNREACHABLE();
606}
607
608Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
609  switch (type) {
610    case Primitive::kPrimBoolean:
611    case Primitive::kPrimByte:
612    case Primitive::kPrimChar:
613    case Primitive::kPrimShort:
614    case Primitive::kPrimInt:
615    case Primitive::kPrimNot: {
616      uint32_t index = gp_index_++;
617      uint32_t stack_index = stack_index_++;
618      if (index < calling_convention.GetNumberOfRegisters()) {
619        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
620      } else {
621        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
622      }
623    }
624
625    case Primitive::kPrimLong: {
626      uint32_t index = gp_index_;
627      uint32_t stack_index = stack_index_;
628      gp_index_ += 2;
629      stack_index_ += 2;
630      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
631        if (calling_convention.GetRegisterAt(index) == R1) {
632          // Skip R1, and use R2_R3 instead.
633          gp_index_++;
634          index++;
635        }
636      }
637      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
638        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
639                  calling_convention.GetRegisterAt(index + 1));
640        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
641                                              calling_convention.GetRegisterAt(index + 1));
642      } else {
643        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
644      }
645    }
646
647    case Primitive::kPrimFloat: {
648      uint32_t stack_index = stack_index_++;
649      if (float_index_ % 2 == 0) {
650        float_index_ = std::max(double_index_, float_index_);
651      }
652      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
653        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
654      } else {
655        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
656      }
657    }
658
659    case Primitive::kPrimDouble: {
660      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
661      uint32_t stack_index = stack_index_;
662      stack_index_ += 2;
663      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
664        uint32_t index = double_index_;
665        double_index_ += 2;
666        Location result = Location::FpuRegisterPairLocation(
667          calling_convention.GetFpuRegisterAt(index),
668          calling_convention.GetFpuRegisterAt(index + 1));
669        DCHECK(ExpectedPairLayout(result));
670        return result;
671      } else {
672        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
673      }
674    }
675
676    case Primitive::kPrimVoid:
677      LOG(FATAL) << "Unexpected parameter type " << type;
678      break;
679  }
680  return Location();
681}
682
683Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) {
684  switch (type) {
685    case Primitive::kPrimBoolean:
686    case Primitive::kPrimByte:
687    case Primitive::kPrimChar:
688    case Primitive::kPrimShort:
689    case Primitive::kPrimInt:
690    case Primitive::kPrimNot: {
691      return Location::RegisterLocation(R0);
692    }
693
694    case Primitive::kPrimFloat: {
695      return Location::FpuRegisterLocation(S0);
696    }
697
698    case Primitive::kPrimLong: {
699      return Location::RegisterPairLocation(R0, R1);
700    }
701
702    case Primitive::kPrimDouble: {
703      return Location::FpuRegisterPairLocation(S0, S1);
704    }
705
706    case Primitive::kPrimVoid:
707      return Location();
708  }
709  UNREACHABLE();
710}
711
712void CodeGeneratorARM::Move32(Location destination, Location source) {
713  if (source.Equals(destination)) {
714    return;
715  }
716  if (destination.IsRegister()) {
717    if (source.IsRegister()) {
718      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
719    } else if (source.IsFpuRegister()) {
720      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
721    } else {
722      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
723    }
724  } else if (destination.IsFpuRegister()) {
725    if (source.IsRegister()) {
726      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
727    } else if (source.IsFpuRegister()) {
728      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
729    } else {
730      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
731    }
732  } else {
733    DCHECK(destination.IsStackSlot()) << destination;
734    if (source.IsRegister()) {
735      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
736    } else if (source.IsFpuRegister()) {
737      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
738    } else {
739      DCHECK(source.IsStackSlot()) << source;
740      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
741      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
742    }
743  }
744}
745
746void CodeGeneratorARM::Move64(Location destination, Location source) {
747  if (source.Equals(destination)) {
748    return;
749  }
750  if (destination.IsRegisterPair()) {
751    if (source.IsRegisterPair()) {
752      EmitParallelMoves(
753          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
754          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
755          Primitive::kPrimInt,
756          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
757          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
758          Primitive::kPrimInt);
759    } else if (source.IsFpuRegister()) {
760      UNIMPLEMENTED(FATAL);
761    } else {
762      DCHECK(source.IsDoubleStackSlot());
763      DCHECK(ExpectedPairLayout(destination));
764      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
765                        SP, source.GetStackIndex());
766    }
767  } else if (destination.IsFpuRegisterPair()) {
768    if (source.IsDoubleStackSlot()) {
769      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
770                         SP,
771                         source.GetStackIndex());
772    } else {
773      UNIMPLEMENTED(FATAL);
774    }
775  } else {
776    DCHECK(destination.IsDoubleStackSlot());
777    if (source.IsRegisterPair()) {
778      // No conflict possible, so just do the moves.
779      if (source.AsRegisterPairLow<Register>() == R1) {
780        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
781        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
782        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
783      } else {
784        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
785                         SP, destination.GetStackIndex());
786      }
787    } else if (source.IsFpuRegisterPair()) {
788      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
789                        SP,
790                        destination.GetStackIndex());
791    } else {
792      DCHECK(source.IsDoubleStackSlot());
793      EmitParallelMoves(
794          Location::StackSlot(source.GetStackIndex()),
795          Location::StackSlot(destination.GetStackIndex()),
796          Primitive::kPrimInt,
797          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
798          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
799          Primitive::kPrimInt);
800    }
801  }
802}
803
804void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
805  LocationSummary* locations = instruction->GetLocations();
806  if (locations != nullptr && locations->Out().Equals(location)) {
807    return;
808  }
809
810  if (locations != nullptr && locations->Out().IsConstant()) {
811    HConstant* const_to_move = locations->Out().GetConstant();
812    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
813      int32_t value = GetInt32ValueOf(const_to_move);
814      if (location.IsRegister()) {
815        __ LoadImmediate(location.AsRegister<Register>(), value);
816      } else {
817        DCHECK(location.IsStackSlot());
818        __ LoadImmediate(IP, value);
819        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
820      }
821    } else {
822      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
823      int64_t value = const_to_move->AsLongConstant()->GetValue();
824      if (location.IsRegisterPair()) {
825        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
826        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
827      } else {
828        DCHECK(location.IsDoubleStackSlot());
829        __ LoadImmediate(IP, Low32Bits(value));
830        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
831        __ LoadImmediate(IP, High32Bits(value));
832        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
833      }
834    }
835  } else if (instruction->IsLoadLocal()) {
836    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
837    switch (instruction->GetType()) {
838      case Primitive::kPrimBoolean:
839      case Primitive::kPrimByte:
840      case Primitive::kPrimChar:
841      case Primitive::kPrimShort:
842      case Primitive::kPrimInt:
843      case Primitive::kPrimNot:
844      case Primitive::kPrimFloat:
845        Move32(location, Location::StackSlot(stack_slot));
846        break;
847
848      case Primitive::kPrimLong:
849      case Primitive::kPrimDouble:
850        Move64(location, Location::DoubleStackSlot(stack_slot));
851        break;
852
853      default:
854        LOG(FATAL) << "Unexpected type " << instruction->GetType();
855    }
856  } else if (instruction->IsTemporary()) {
857    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
858    if (temp_location.IsStackSlot()) {
859      Move32(location, temp_location);
860    } else {
861      DCHECK(temp_location.IsDoubleStackSlot());
862      Move64(location, temp_location);
863    }
864  } else {
865    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
866    switch (instruction->GetType()) {
867      case Primitive::kPrimBoolean:
868      case Primitive::kPrimByte:
869      case Primitive::kPrimChar:
870      case Primitive::kPrimShort:
871      case Primitive::kPrimNot:
872      case Primitive::kPrimInt:
873      case Primitive::kPrimFloat:
874        Move32(location, locations->Out());
875        break;
876
877      case Primitive::kPrimLong:
878      case Primitive::kPrimDouble:
879        Move64(location, locations->Out());
880        break;
881
882      default:
883        LOG(FATAL) << "Unexpected type " << instruction->GetType();
884    }
885  }
886}
887
888void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
889                                     HInstruction* instruction,
890                                     uint32_t dex_pc,
891                                     SlowPathCode* slow_path) {
892  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
893  __ blx(LR);
894  RecordPcInfo(instruction, dex_pc, slow_path);
895  DCHECK(instruction->IsSuspendCheck()
896      || instruction->IsBoundsCheck()
897      || instruction->IsNullCheck()
898      || instruction->IsDivZeroCheck()
899      || instruction->GetLocations()->CanCall()
900      || !IsLeafMethod());
901}
902
903void LocationsBuilderARM::VisitGoto(HGoto* got) {
904  got->SetLocations(nullptr);
905}
906
907void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
908  HBasicBlock* successor = got->GetSuccessor();
909  DCHECK(!successor->IsExitBlock());
910
911  HBasicBlock* block = got->GetBlock();
912  HInstruction* previous = got->GetPrevious();
913
914  HLoopInformation* info = block->GetLoopInformation();
915  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
916    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
917    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
918    return;
919  }
920
921  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
922    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
923  }
924  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
925    __ b(codegen_->GetLabelOf(successor));
926  }
927}
928
929void LocationsBuilderARM::VisitExit(HExit* exit) {
930  exit->SetLocations(nullptr);
931}
932
933void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
934  UNUSED(exit);
935}
936
937void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
938                                                        Label* true_target,
939                                                        Label* false_target,
940                                                        Label* always_true_target) {
941  HInstruction* cond = instruction->InputAt(0);
942  if (cond->IsIntConstant()) {
943    // Constant condition, statically compared against 1.
944    int32_t cond_value = cond->AsIntConstant()->GetValue();
945    if (cond_value == 1) {
946      if (always_true_target != nullptr) {
947        __ b(always_true_target);
948      }
949      return;
950    } else {
951      DCHECK_EQ(cond_value, 0);
952    }
953  } else {
954    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
955      // Condition has been materialized, compare the output to 0
956      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
957      __ cmp(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
958             ShifterOperand(0));
959      __ b(true_target, NE);
960    } else {
961      // Condition has not been materialized, use its inputs as the
962      // comparison and its condition as the branch condition.
963      LocationSummary* locations = cond->GetLocations();
964      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
965      Register left = locations->InAt(0).AsRegister<Register>();
966      if (locations->InAt(1).IsRegister()) {
967        __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
968      } else {
969        DCHECK(locations->InAt(1).IsConstant());
970        HConstant* constant = locations->InAt(1).GetConstant();
971        int32_t value = CodeGenerator::GetInt32ValueOf(constant);
972        ShifterOperand operand;
973        if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
974          __ cmp(left, operand);
975        } else {
976          Register temp = IP;
977          __ LoadImmediate(temp, value);
978          __ cmp(left, ShifterOperand(temp));
979        }
980      }
981      __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
982    }
983  }
984  if (false_target != nullptr) {
985    __ b(false_target);
986  }
987}
988
989void LocationsBuilderARM::VisitIf(HIf* if_instr) {
990  LocationSummary* locations =
991      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
992  HInstruction* cond = if_instr->InputAt(0);
993  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
994    locations->SetInAt(0, Location::RequiresRegister());
995  }
996}
997
998void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
999  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1000  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1001  Label* always_true_target = true_target;
1002  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1003                                if_instr->IfTrueSuccessor())) {
1004    always_true_target = nullptr;
1005  }
1006  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1007                                if_instr->IfFalseSuccessor())) {
1008    false_target = nullptr;
1009  }
1010  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1011}
1012
1013void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1014  LocationSummary* locations = new (GetGraph()->GetArena())
1015      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1016  HInstruction* cond = deoptimize->InputAt(0);
1017  DCHECK(cond->IsCondition());
1018  if (cond->AsCondition()->NeedsMaterialization()) {
1019    locations->SetInAt(0, Location::RequiresRegister());
1020  }
1021}
1022
1023void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1024  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
1025      DeoptimizationSlowPathARM(deoptimize);
1026  codegen_->AddSlowPath(slow_path);
1027  Label* slow_path_entry = slow_path->GetEntryLabel();
1028  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1029}
1030
1031void LocationsBuilderARM::VisitCondition(HCondition* comp) {
1032  LocationSummary* locations =
1033      new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall);
1034  locations->SetInAt(0, Location::RequiresRegister());
1035  locations->SetInAt(1, Location::RegisterOrConstant(comp->InputAt(1)));
1036  if (comp->NeedsMaterialization()) {
1037    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1038  }
1039}
1040
1041void InstructionCodeGeneratorARM::VisitCondition(HCondition* comp) {
1042  if (!comp->NeedsMaterialization()) return;
1043  LocationSummary* locations = comp->GetLocations();
1044  Register left = locations->InAt(0).AsRegister<Register>();
1045
1046  if (locations->InAt(1).IsRegister()) {
1047    __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1048  } else {
1049    DCHECK(locations->InAt(1).IsConstant());
1050    int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
1051    ShifterOperand operand;
1052    if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1053      __ cmp(left, operand);
1054    } else {
1055      Register temp = IP;
1056      __ LoadImmediate(temp, value);
1057      __ cmp(left, ShifterOperand(temp));
1058    }
1059  }
1060  __ it(ARMCondition(comp->GetCondition()), kItElse);
1061  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1062         ARMCondition(comp->GetCondition()));
1063  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1064         ARMOppositeCondition(comp->GetCondition()));
1065}
1066
1067void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1068  VisitCondition(comp);
1069}
1070
1071void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1072  VisitCondition(comp);
1073}
1074
1075void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1076  VisitCondition(comp);
1077}
1078
1079void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1080  VisitCondition(comp);
1081}
1082
1083void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1084  VisitCondition(comp);
1085}
1086
1087void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1088  VisitCondition(comp);
1089}
1090
1091void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1092  VisitCondition(comp);
1093}
1094
1095void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1096  VisitCondition(comp);
1097}
1098
1099void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1100  VisitCondition(comp);
1101}
1102
1103void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1104  VisitCondition(comp);
1105}
1106
1107void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1108  VisitCondition(comp);
1109}
1110
1111void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1112  VisitCondition(comp);
1113}
1114
1115void LocationsBuilderARM::VisitLocal(HLocal* local) {
1116  local->SetLocations(nullptr);
1117}
1118
1119void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1120  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1121}
1122
1123void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1124  load->SetLocations(nullptr);
1125}
1126
1127void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1128  // Nothing to do, this is driven by the code generator.
1129  UNUSED(load);
1130}
1131
1132void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1133  LocationSummary* locations =
1134      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1135  switch (store->InputAt(1)->GetType()) {
1136    case Primitive::kPrimBoolean:
1137    case Primitive::kPrimByte:
1138    case Primitive::kPrimChar:
1139    case Primitive::kPrimShort:
1140    case Primitive::kPrimInt:
1141    case Primitive::kPrimNot:
1142    case Primitive::kPrimFloat:
1143      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1144      break;
1145
1146    case Primitive::kPrimLong:
1147    case Primitive::kPrimDouble:
1148      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1149      break;
1150
1151    default:
1152      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1153  }
1154}
1155
1156void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1157  UNUSED(store);
1158}
1159
1160void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1161  LocationSummary* locations =
1162      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1163  locations->SetOut(Location::ConstantLocation(constant));
1164}
1165
1166void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1167  // Will be generated at use site.
1168  UNUSED(constant);
1169}
1170
1171void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1172  LocationSummary* locations =
1173      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1174  locations->SetOut(Location::ConstantLocation(constant));
1175}
1176
1177void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1178  // Will be generated at use site.
1179  UNUSED(constant);
1180}
1181
1182void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1183  LocationSummary* locations =
1184      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1185  locations->SetOut(Location::ConstantLocation(constant));
1186}
1187
1188void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1189  // Will be generated at use site.
1190  UNUSED(constant);
1191}
1192
1193void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1194  LocationSummary* locations =
1195      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1196  locations->SetOut(Location::ConstantLocation(constant));
1197}
1198
1199void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1200  // Will be generated at use site.
1201  UNUSED(constant);
1202}
1203
1204void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1205  LocationSummary* locations =
1206      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1207  locations->SetOut(Location::ConstantLocation(constant));
1208}
1209
1210void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1211  // Will be generated at use site.
1212  UNUSED(constant);
1213}
1214
1215void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1216  memory_barrier->SetLocations(nullptr);
1217}
1218
1219void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1220  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1221}
1222
1223void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1224  ret->SetLocations(nullptr);
1225}
1226
1227void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1228  UNUSED(ret);
1229  codegen_->GenerateFrameExit();
1230}
1231
1232void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1233  LocationSummary* locations =
1234      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1235  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1236}
1237
1238void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1239  UNUSED(ret);
1240  codegen_->GenerateFrameExit();
1241}
1242
1243void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1244  // When we do not run baseline, explicit clinit checks triggered by static
1245  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1246  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1247
1248  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1249                                         codegen_->GetInstructionSetFeatures());
1250  if (intrinsic.TryDispatch(invoke)) {
1251    return;
1252  }
1253
1254  HandleInvoke(invoke);
1255}
1256
1257void CodeGeneratorARM::LoadCurrentMethod(Register reg) {
1258  DCHECK(RequiresCurrentMethod());
1259  __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
1260}
1261
1262static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1263  if (invoke->GetLocations()->Intrinsified()) {
1264    IntrinsicCodeGeneratorARM intrinsic(codegen);
1265    intrinsic.Dispatch(invoke);
1266    return true;
1267  }
1268  return false;
1269}
1270
1271void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1272  // When we do not run baseline, explicit clinit checks triggered by static
1273  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1274  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1275
1276  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1277    return;
1278  }
1279
1280  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1281
1282  codegen_->GenerateStaticOrDirectCall(invoke, temp);
1283  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1284}
1285
1286void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1287  LocationSummary* locations =
1288      new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
1289  locations->AddTemp(Location::RegisterLocation(R0));
1290
1291  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1292  for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
1293    HInstruction* input = invoke->InputAt(i);
1294    locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
1295  }
1296
1297  locations->SetOut(calling_convention_visitor.GetReturnLocation(invoke->GetType()));
1298}
1299
1300void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1301  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1302                                         codegen_->GetInstructionSetFeatures());
1303  if (intrinsic.TryDispatch(invoke)) {
1304    return;
1305  }
1306
1307  HandleInvoke(invoke);
1308}
1309
1310void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1311  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1312    return;
1313  }
1314
1315  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1316  uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
1317          invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
1318  LocationSummary* locations = invoke->GetLocations();
1319  Location receiver = locations->InAt(0);
1320  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1321  // temp = object->GetClass();
1322  if (receiver.IsStackSlot()) {
1323    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1324    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1325  } else {
1326    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1327  }
1328  codegen_->MaybeRecordImplicitNullCheck(invoke);
1329  // temp = temp->GetMethodAt(method_offset);
1330  uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1331      kArmWordSize).Int32Value();
1332  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1333  // LR = temp->GetEntryPoint();
1334  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1335  // LR();
1336  __ blx(LR);
1337  DCHECK(!codegen_->IsLeafMethod());
1338  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1339}
1340
1341void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1342  HandleInvoke(invoke);
1343  // Add the hidden argument.
1344  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1345}
1346
1347void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1348  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1349  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1350  uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
1351          (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
1352  LocationSummary* locations = invoke->GetLocations();
1353  Location receiver = locations->InAt(0);
1354  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1355
1356  // Set the hidden argument.
1357  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1358                   invoke->GetDexMethodIndex());
1359
1360  // temp = object->GetClass();
1361  if (receiver.IsStackSlot()) {
1362    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1363    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1364  } else {
1365    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1366  }
1367  codegen_->MaybeRecordImplicitNullCheck(invoke);
1368  // temp = temp->GetImtEntryAt(method_offset);
1369  uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1370      kArmWordSize).Int32Value();
1371  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1372  // LR = temp->GetEntryPoint();
1373  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1374  // LR();
1375  __ blx(LR);
1376  DCHECK(!codegen_->IsLeafMethod());
1377  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1378}
1379
1380void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1381  LocationSummary* locations =
1382      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1383  switch (neg->GetResultType()) {
1384    case Primitive::kPrimInt: {
1385      locations->SetInAt(0, Location::RequiresRegister());
1386      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1387      break;
1388    }
1389    case Primitive::kPrimLong: {
1390      locations->SetInAt(0, Location::RequiresRegister());
1391      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1392      break;
1393    }
1394
1395    case Primitive::kPrimFloat:
1396    case Primitive::kPrimDouble:
1397      locations->SetInAt(0, Location::RequiresFpuRegister());
1398      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1399      break;
1400
1401    default:
1402      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1403  }
1404}
1405
1406void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1407  LocationSummary* locations = neg->GetLocations();
1408  Location out = locations->Out();
1409  Location in = locations->InAt(0);
1410  switch (neg->GetResultType()) {
1411    case Primitive::kPrimInt:
1412      DCHECK(in.IsRegister());
1413      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1414      break;
1415
1416    case Primitive::kPrimLong:
1417      DCHECK(in.IsRegisterPair());
1418      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1419      __ rsbs(out.AsRegisterPairLow<Register>(),
1420              in.AsRegisterPairLow<Register>(),
1421              ShifterOperand(0));
1422      // We cannot emit an RSC (Reverse Subtract with Carry)
1423      // instruction here, as it does not exist in the Thumb-2
1424      // instruction set.  We use the following approach
1425      // using SBC and SUB instead.
1426      //
1427      // out.hi = -C
1428      __ sbc(out.AsRegisterPairHigh<Register>(),
1429             out.AsRegisterPairHigh<Register>(),
1430             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1431      // out.hi = out.hi - in.hi
1432      __ sub(out.AsRegisterPairHigh<Register>(),
1433             out.AsRegisterPairHigh<Register>(),
1434             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1435      break;
1436
1437    case Primitive::kPrimFloat:
1438      DCHECK(in.IsFpuRegister());
1439      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1440      break;
1441
1442    case Primitive::kPrimDouble:
1443      DCHECK(in.IsFpuRegisterPair());
1444      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1445               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1446      break;
1447
1448    default:
1449      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1450  }
1451}
1452
1453void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1454  Primitive::Type result_type = conversion->GetResultType();
1455  Primitive::Type input_type = conversion->GetInputType();
1456  DCHECK_NE(result_type, input_type);
1457
1458  // The float-to-long and double-to-long type conversions rely on a
1459  // call to the runtime.
1460  LocationSummary::CallKind call_kind =
1461      ((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1462       && result_type == Primitive::kPrimLong)
1463      ? LocationSummary::kCall
1464      : LocationSummary::kNoCall;
1465  LocationSummary* locations =
1466      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1467
1468  // The Java language does not allow treating boolean as an integral type but
1469  // our bit representation makes it safe.
1470
1471  switch (result_type) {
1472    case Primitive::kPrimByte:
1473      switch (input_type) {
1474        case Primitive::kPrimBoolean:
1475          // Boolean input is a result of code transformations.
1476        case Primitive::kPrimShort:
1477        case Primitive::kPrimInt:
1478        case Primitive::kPrimChar:
1479          // Processing a Dex `int-to-byte' instruction.
1480          locations->SetInAt(0, Location::RequiresRegister());
1481          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1482          break;
1483
1484        default:
1485          LOG(FATAL) << "Unexpected type conversion from " << input_type
1486                     << " to " << result_type;
1487      }
1488      break;
1489
1490    case Primitive::kPrimShort:
1491      switch (input_type) {
1492        case Primitive::kPrimBoolean:
1493          // Boolean input is a result of code transformations.
1494        case Primitive::kPrimByte:
1495        case Primitive::kPrimInt:
1496        case Primitive::kPrimChar:
1497          // Processing a Dex `int-to-short' instruction.
1498          locations->SetInAt(0, Location::RequiresRegister());
1499          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1500          break;
1501
1502        default:
1503          LOG(FATAL) << "Unexpected type conversion from " << input_type
1504                     << " to " << result_type;
1505      }
1506      break;
1507
1508    case Primitive::kPrimInt:
1509      switch (input_type) {
1510        case Primitive::kPrimLong:
1511          // Processing a Dex `long-to-int' instruction.
1512          locations->SetInAt(0, Location::Any());
1513          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1514          break;
1515
1516        case Primitive::kPrimFloat:
1517          // Processing a Dex `float-to-int' instruction.
1518          locations->SetInAt(0, Location::RequiresFpuRegister());
1519          locations->SetOut(Location::RequiresRegister());
1520          locations->AddTemp(Location::RequiresFpuRegister());
1521          break;
1522
1523        case Primitive::kPrimDouble:
1524          // Processing a Dex `double-to-int' instruction.
1525          locations->SetInAt(0, Location::RequiresFpuRegister());
1526          locations->SetOut(Location::RequiresRegister());
1527          locations->AddTemp(Location::RequiresFpuRegister());
1528          break;
1529
1530        default:
1531          LOG(FATAL) << "Unexpected type conversion from " << input_type
1532                     << " to " << result_type;
1533      }
1534      break;
1535
1536    case Primitive::kPrimLong:
1537      switch (input_type) {
1538        case Primitive::kPrimBoolean:
1539          // Boolean input is a result of code transformations.
1540        case Primitive::kPrimByte:
1541        case Primitive::kPrimShort:
1542        case Primitive::kPrimInt:
1543        case Primitive::kPrimChar:
1544          // Processing a Dex `int-to-long' instruction.
1545          locations->SetInAt(0, Location::RequiresRegister());
1546          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1547          break;
1548
1549        case Primitive::kPrimFloat: {
1550          // Processing a Dex `float-to-long' instruction.
1551          InvokeRuntimeCallingConvention calling_convention;
1552          locations->SetInAt(0, Location::FpuRegisterLocation(
1553              calling_convention.GetFpuRegisterAt(0)));
1554          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1555          break;
1556        }
1557
1558        case Primitive::kPrimDouble: {
1559          // Processing a Dex `double-to-long' instruction.
1560          InvokeRuntimeCallingConvention calling_convention;
1561          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1562              calling_convention.GetFpuRegisterAt(0),
1563              calling_convention.GetFpuRegisterAt(1)));
1564          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1565          break;
1566        }
1567
1568        default:
1569          LOG(FATAL) << "Unexpected type conversion from " << input_type
1570                     << " to " << result_type;
1571      }
1572      break;
1573
1574    case Primitive::kPrimChar:
1575      switch (input_type) {
1576        case Primitive::kPrimBoolean:
1577          // Boolean input is a result of code transformations.
1578        case Primitive::kPrimByte:
1579        case Primitive::kPrimShort:
1580        case Primitive::kPrimInt:
1581          // Processing a Dex `int-to-char' instruction.
1582          locations->SetInAt(0, Location::RequiresRegister());
1583          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1584          break;
1585
1586        default:
1587          LOG(FATAL) << "Unexpected type conversion from " << input_type
1588                     << " to " << result_type;
1589      }
1590      break;
1591
1592    case Primitive::kPrimFloat:
1593      switch (input_type) {
1594        case Primitive::kPrimBoolean:
1595          // Boolean input is a result of code transformations.
1596        case Primitive::kPrimByte:
1597        case Primitive::kPrimShort:
1598        case Primitive::kPrimInt:
1599        case Primitive::kPrimChar:
1600          // Processing a Dex `int-to-float' instruction.
1601          locations->SetInAt(0, Location::RequiresRegister());
1602          locations->SetOut(Location::RequiresFpuRegister());
1603          break;
1604
1605        case Primitive::kPrimLong:
1606          // Processing a Dex `long-to-float' instruction.
1607          locations->SetInAt(0, Location::RequiresRegister());
1608          locations->SetOut(Location::RequiresFpuRegister());
1609          locations->AddTemp(Location::RequiresRegister());
1610          locations->AddTemp(Location::RequiresRegister());
1611          locations->AddTemp(Location::RequiresFpuRegister());
1612          locations->AddTemp(Location::RequiresFpuRegister());
1613          break;
1614
1615        case Primitive::kPrimDouble:
1616          // Processing a Dex `double-to-float' instruction.
1617          locations->SetInAt(0, Location::RequiresFpuRegister());
1618          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1619          break;
1620
1621        default:
1622          LOG(FATAL) << "Unexpected type conversion from " << input_type
1623                     << " to " << result_type;
1624      };
1625      break;
1626
1627    case Primitive::kPrimDouble:
1628      switch (input_type) {
1629        case Primitive::kPrimBoolean:
1630          // Boolean input is a result of code transformations.
1631        case Primitive::kPrimByte:
1632        case Primitive::kPrimShort:
1633        case Primitive::kPrimInt:
1634        case Primitive::kPrimChar:
1635          // Processing a Dex `int-to-double' instruction.
1636          locations->SetInAt(0, Location::RequiresRegister());
1637          locations->SetOut(Location::RequiresFpuRegister());
1638          break;
1639
1640        case Primitive::kPrimLong:
1641          // Processing a Dex `long-to-double' instruction.
1642          locations->SetInAt(0, Location::RequiresRegister());
1643          locations->SetOut(Location::RequiresFpuRegister());
1644          locations->AddTemp(Location::RequiresRegister());
1645          locations->AddTemp(Location::RequiresRegister());
1646          locations->AddTemp(Location::RequiresFpuRegister());
1647          break;
1648
1649        case Primitive::kPrimFloat:
1650          // Processing a Dex `float-to-double' instruction.
1651          locations->SetInAt(0, Location::RequiresFpuRegister());
1652          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1653          break;
1654
1655        default:
1656          LOG(FATAL) << "Unexpected type conversion from " << input_type
1657                     << " to " << result_type;
1658      };
1659      break;
1660
1661    default:
1662      LOG(FATAL) << "Unexpected type conversion from " << input_type
1663                 << " to " << result_type;
1664  }
1665}
1666
1667void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1668  LocationSummary* locations = conversion->GetLocations();
1669  Location out = locations->Out();
1670  Location in = locations->InAt(0);
1671  Primitive::Type result_type = conversion->GetResultType();
1672  Primitive::Type input_type = conversion->GetInputType();
1673  DCHECK_NE(result_type, input_type);
1674  switch (result_type) {
1675    case Primitive::kPrimByte:
1676      switch (input_type) {
1677        case Primitive::kPrimBoolean:
1678          // Boolean input is a result of code transformations.
1679        case Primitive::kPrimShort:
1680        case Primitive::kPrimInt:
1681        case Primitive::kPrimChar:
1682          // Processing a Dex `int-to-byte' instruction.
1683          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
1684          break;
1685
1686        default:
1687          LOG(FATAL) << "Unexpected type conversion from " << input_type
1688                     << " to " << result_type;
1689      }
1690      break;
1691
1692    case Primitive::kPrimShort:
1693      switch (input_type) {
1694        case Primitive::kPrimBoolean:
1695          // Boolean input is a result of code transformations.
1696        case Primitive::kPrimByte:
1697        case Primitive::kPrimInt:
1698        case Primitive::kPrimChar:
1699          // Processing a Dex `int-to-short' instruction.
1700          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1701          break;
1702
1703        default:
1704          LOG(FATAL) << "Unexpected type conversion from " << input_type
1705                     << " to " << result_type;
1706      }
1707      break;
1708
1709    case Primitive::kPrimInt:
1710      switch (input_type) {
1711        case Primitive::kPrimLong:
1712          // Processing a Dex `long-to-int' instruction.
1713          DCHECK(out.IsRegister());
1714          if (in.IsRegisterPair()) {
1715            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
1716          } else if (in.IsDoubleStackSlot()) {
1717            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
1718          } else {
1719            DCHECK(in.IsConstant());
1720            DCHECK(in.GetConstant()->IsLongConstant());
1721            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
1722            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
1723          }
1724          break;
1725
1726        case Primitive::kPrimFloat: {
1727          // Processing a Dex `float-to-int' instruction.
1728          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1729          __ vmovs(temp, in.AsFpuRegister<SRegister>());
1730          __ vcvtis(temp, temp);
1731          __ vmovrs(out.AsRegister<Register>(), temp);
1732          break;
1733        }
1734
1735        case Primitive::kPrimDouble: {
1736          // Processing a Dex `double-to-int' instruction.
1737          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1738          DRegister temp_d = FromLowSToD(temp_s);
1739          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1740          __ vcvtid(temp_s, temp_d);
1741          __ vmovrs(out.AsRegister<Register>(), temp_s);
1742          break;
1743        }
1744
1745        default:
1746          LOG(FATAL) << "Unexpected type conversion from " << input_type
1747                     << " to " << result_type;
1748      }
1749      break;
1750
1751    case Primitive::kPrimLong:
1752      switch (input_type) {
1753        case Primitive::kPrimBoolean:
1754          // Boolean input is a result of code transformations.
1755        case Primitive::kPrimByte:
1756        case Primitive::kPrimShort:
1757        case Primitive::kPrimInt:
1758        case Primitive::kPrimChar:
1759          // Processing a Dex `int-to-long' instruction.
1760          DCHECK(out.IsRegisterPair());
1761          DCHECK(in.IsRegister());
1762          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
1763          // Sign extension.
1764          __ Asr(out.AsRegisterPairHigh<Register>(),
1765                 out.AsRegisterPairLow<Register>(),
1766                 31);
1767          break;
1768
1769        case Primitive::kPrimFloat:
1770          // Processing a Dex `float-to-long' instruction.
1771          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
1772                                  conversion,
1773                                  conversion->GetDexPc(),
1774                                  nullptr);
1775          break;
1776
1777        case Primitive::kPrimDouble:
1778          // Processing a Dex `double-to-long' instruction.
1779          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
1780                                  conversion,
1781                                  conversion->GetDexPc(),
1782                                  nullptr);
1783          break;
1784
1785        default:
1786          LOG(FATAL) << "Unexpected type conversion from " << input_type
1787                     << " to " << result_type;
1788      }
1789      break;
1790
1791    case Primitive::kPrimChar:
1792      switch (input_type) {
1793        case Primitive::kPrimBoolean:
1794          // Boolean input is a result of code transformations.
1795        case Primitive::kPrimByte:
1796        case Primitive::kPrimShort:
1797        case Primitive::kPrimInt:
1798          // Processing a Dex `int-to-char' instruction.
1799          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1800          break;
1801
1802        default:
1803          LOG(FATAL) << "Unexpected type conversion from " << input_type
1804                     << " to " << result_type;
1805      }
1806      break;
1807
1808    case Primitive::kPrimFloat:
1809      switch (input_type) {
1810        case Primitive::kPrimBoolean:
1811          // Boolean input is a result of code transformations.
1812        case Primitive::kPrimByte:
1813        case Primitive::kPrimShort:
1814        case Primitive::kPrimInt:
1815        case Primitive::kPrimChar: {
1816          // Processing a Dex `int-to-float' instruction.
1817          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
1818          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
1819          break;
1820        }
1821
1822        case Primitive::kPrimLong: {
1823          // Processing a Dex `long-to-float' instruction.
1824          Register low = in.AsRegisterPairLow<Register>();
1825          Register high = in.AsRegisterPairHigh<Register>();
1826          SRegister output = out.AsFpuRegister<SRegister>();
1827          Register constant_low = locations->GetTemp(0).AsRegister<Register>();
1828          Register constant_high = locations->GetTemp(1).AsRegister<Register>();
1829          SRegister temp1_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
1830          DRegister temp1_d = FromLowSToD(temp1_s);
1831          SRegister temp2_s = locations->GetTemp(3).AsFpuRegisterPairLow<SRegister>();
1832          DRegister temp2_d = FromLowSToD(temp2_s);
1833
1834          // Operations use doubles for precision reasons (each 32-bit
1835          // half of a long fits in the 53-bit mantissa of a double,
1836          // but not in the 24-bit mantissa of a float).  This is
1837          // especially important for the low bits.  The result is
1838          // eventually converted to float.
1839
1840          // temp1_d = int-to-double(high)
1841          __ vmovsr(temp1_s, high);
1842          __ vcvtdi(temp1_d, temp1_s);
1843          // Using vmovd to load the `k2Pow32EncodingForDouble` constant
1844          // as an immediate value into `temp2_d` does not work, as
1845          // this instruction only transfers 8 significant bits of its
1846          // immediate operand.  Instead, use two 32-bit core
1847          // registers to load `k2Pow32EncodingForDouble` into
1848          // `temp2_d`.
1849          __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
1850          __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
1851          __ vmovdrr(temp2_d, constant_low, constant_high);
1852          // temp1_d = temp1_d * 2^32
1853          __ vmuld(temp1_d, temp1_d, temp2_d);
1854          // temp2_d = unsigned-to-double(low)
1855          __ vmovsr(temp2_s, low);
1856          __ vcvtdu(temp2_d, temp2_s);
1857          // temp1_d = temp1_d + temp2_d
1858          __ vaddd(temp1_d, temp1_d, temp2_d);
1859          // output = double-to-float(temp1_d);
1860          __ vcvtsd(output, temp1_d);
1861          break;
1862        }
1863
1864        case Primitive::kPrimDouble:
1865          // Processing a Dex `double-to-float' instruction.
1866          __ vcvtsd(out.AsFpuRegister<SRegister>(),
1867                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1868          break;
1869
1870        default:
1871          LOG(FATAL) << "Unexpected type conversion from " << input_type
1872                     << " to " << result_type;
1873      };
1874      break;
1875
1876    case Primitive::kPrimDouble:
1877      switch (input_type) {
1878        case Primitive::kPrimBoolean:
1879          // Boolean input is a result of code transformations.
1880        case Primitive::kPrimByte:
1881        case Primitive::kPrimShort:
1882        case Primitive::kPrimInt:
1883        case Primitive::kPrimChar: {
1884          // Processing a Dex `int-to-double' instruction.
1885          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
1886          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1887                    out.AsFpuRegisterPairLow<SRegister>());
1888          break;
1889        }
1890
1891        case Primitive::kPrimLong: {
1892          // Processing a Dex `long-to-double' instruction.
1893          Register low = in.AsRegisterPairLow<Register>();
1894          Register high = in.AsRegisterPairHigh<Register>();
1895          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
1896          DRegister out_d = FromLowSToD(out_s);
1897          Register constant_low = locations->GetTemp(0).AsRegister<Register>();
1898          Register constant_high = locations->GetTemp(1).AsRegister<Register>();
1899          SRegister temp_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
1900          DRegister temp_d = FromLowSToD(temp_s);
1901
1902          // out_d = int-to-double(high)
1903          __ vmovsr(out_s, high);
1904          __ vcvtdi(out_d, out_s);
1905          // Using vmovd to load the `k2Pow32EncodingForDouble` constant
1906          // as an immediate value into `temp_d` does not work, as
1907          // this instruction only transfers 8 significant bits of its
1908          // immediate operand.  Instead, use two 32-bit core
1909          // registers to load `k2Pow32EncodingForDouble` into `temp_d`.
1910          __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
1911          __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
1912          __ vmovdrr(temp_d, constant_low, constant_high);
1913          // out_d = out_d * 2^32
1914          __ vmuld(out_d, out_d, temp_d);
1915          // temp_d = unsigned-to-double(low)
1916          __ vmovsr(temp_s, low);
1917          __ vcvtdu(temp_d, temp_s);
1918          // out_d = out_d + temp_d
1919          __ vaddd(out_d, out_d, temp_d);
1920          break;
1921        }
1922
1923        case Primitive::kPrimFloat:
1924          // Processing a Dex `float-to-double' instruction.
1925          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1926                    in.AsFpuRegister<SRegister>());
1927          break;
1928
1929        default:
1930          LOG(FATAL) << "Unexpected type conversion from " << input_type
1931                     << " to " << result_type;
1932      };
1933      break;
1934
1935    default:
1936      LOG(FATAL) << "Unexpected type conversion from " << input_type
1937                 << " to " << result_type;
1938  }
1939}
1940
1941void LocationsBuilderARM::VisitAdd(HAdd* add) {
1942  LocationSummary* locations =
1943      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
1944  switch (add->GetResultType()) {
1945    case Primitive::kPrimInt: {
1946      locations->SetInAt(0, Location::RequiresRegister());
1947      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
1948      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1949      break;
1950    }
1951
1952    case Primitive::kPrimLong: {
1953      locations->SetInAt(0, Location::RequiresRegister());
1954      locations->SetInAt(1, Location::RequiresRegister());
1955      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1956      break;
1957    }
1958
1959    case Primitive::kPrimFloat:
1960    case Primitive::kPrimDouble: {
1961      locations->SetInAt(0, Location::RequiresFpuRegister());
1962      locations->SetInAt(1, Location::RequiresFpuRegister());
1963      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1964      break;
1965    }
1966
1967    default:
1968      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
1969  }
1970}
1971
1972void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
1973  LocationSummary* locations = add->GetLocations();
1974  Location out = locations->Out();
1975  Location first = locations->InAt(0);
1976  Location second = locations->InAt(1);
1977  switch (add->GetResultType()) {
1978    case Primitive::kPrimInt:
1979      if (second.IsRegister()) {
1980        __ add(out.AsRegister<Register>(),
1981               first.AsRegister<Register>(),
1982               ShifterOperand(second.AsRegister<Register>()));
1983      } else {
1984        __ AddConstant(out.AsRegister<Register>(),
1985                       first.AsRegister<Register>(),
1986                       second.GetConstant()->AsIntConstant()->GetValue());
1987      }
1988      break;
1989
1990    case Primitive::kPrimLong: {
1991      DCHECK(second.IsRegisterPair());
1992      __ adds(out.AsRegisterPairLow<Register>(),
1993              first.AsRegisterPairLow<Register>(),
1994              ShifterOperand(second.AsRegisterPairLow<Register>()));
1995      __ adc(out.AsRegisterPairHigh<Register>(),
1996             first.AsRegisterPairHigh<Register>(),
1997             ShifterOperand(second.AsRegisterPairHigh<Register>()));
1998      break;
1999    }
2000
2001    case Primitive::kPrimFloat:
2002      __ vadds(out.AsFpuRegister<SRegister>(),
2003               first.AsFpuRegister<SRegister>(),
2004               second.AsFpuRegister<SRegister>());
2005      break;
2006
2007    case Primitive::kPrimDouble:
2008      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2009               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2010               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2011      break;
2012
2013    default:
2014      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2015  }
2016}
2017
2018void LocationsBuilderARM::VisitSub(HSub* sub) {
2019  LocationSummary* locations =
2020      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2021  switch (sub->GetResultType()) {
2022    case Primitive::kPrimInt: {
2023      locations->SetInAt(0, Location::RequiresRegister());
2024      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2025      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2026      break;
2027    }
2028
2029    case Primitive::kPrimLong: {
2030      locations->SetInAt(0, Location::RequiresRegister());
2031      locations->SetInAt(1, Location::RequiresRegister());
2032      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2033      break;
2034    }
2035    case Primitive::kPrimFloat:
2036    case Primitive::kPrimDouble: {
2037      locations->SetInAt(0, Location::RequiresFpuRegister());
2038      locations->SetInAt(1, Location::RequiresFpuRegister());
2039      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2040      break;
2041    }
2042    default:
2043      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2044  }
2045}
2046
2047void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2048  LocationSummary* locations = sub->GetLocations();
2049  Location out = locations->Out();
2050  Location first = locations->InAt(0);
2051  Location second = locations->InAt(1);
2052  switch (sub->GetResultType()) {
2053    case Primitive::kPrimInt: {
2054      if (second.IsRegister()) {
2055        __ sub(out.AsRegister<Register>(),
2056               first.AsRegister<Register>(),
2057               ShifterOperand(second.AsRegister<Register>()));
2058      } else {
2059        __ AddConstant(out.AsRegister<Register>(),
2060                       first.AsRegister<Register>(),
2061                       -second.GetConstant()->AsIntConstant()->GetValue());
2062      }
2063      break;
2064    }
2065
2066    case Primitive::kPrimLong: {
2067      DCHECK(second.IsRegisterPair());
2068      __ subs(out.AsRegisterPairLow<Register>(),
2069              first.AsRegisterPairLow<Register>(),
2070              ShifterOperand(second.AsRegisterPairLow<Register>()));
2071      __ sbc(out.AsRegisterPairHigh<Register>(),
2072             first.AsRegisterPairHigh<Register>(),
2073             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2074      break;
2075    }
2076
2077    case Primitive::kPrimFloat: {
2078      __ vsubs(out.AsFpuRegister<SRegister>(),
2079               first.AsFpuRegister<SRegister>(),
2080               second.AsFpuRegister<SRegister>());
2081      break;
2082    }
2083
2084    case Primitive::kPrimDouble: {
2085      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2086               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2087               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2088      break;
2089    }
2090
2091
2092    default:
2093      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2094  }
2095}
2096
2097void LocationsBuilderARM::VisitMul(HMul* mul) {
2098  LocationSummary* locations =
2099      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2100  switch (mul->GetResultType()) {
2101    case Primitive::kPrimInt:
2102    case Primitive::kPrimLong:  {
2103      locations->SetInAt(0, Location::RequiresRegister());
2104      locations->SetInAt(1, Location::RequiresRegister());
2105      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2106      break;
2107    }
2108
2109    case Primitive::kPrimFloat:
2110    case Primitive::kPrimDouble: {
2111      locations->SetInAt(0, Location::RequiresFpuRegister());
2112      locations->SetInAt(1, Location::RequiresFpuRegister());
2113      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2114      break;
2115    }
2116
2117    default:
2118      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2119  }
2120}
2121
2122void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2123  LocationSummary* locations = mul->GetLocations();
2124  Location out = locations->Out();
2125  Location first = locations->InAt(0);
2126  Location second = locations->InAt(1);
2127  switch (mul->GetResultType()) {
2128    case Primitive::kPrimInt: {
2129      __ mul(out.AsRegister<Register>(),
2130             first.AsRegister<Register>(),
2131             second.AsRegister<Register>());
2132      break;
2133    }
2134    case Primitive::kPrimLong: {
2135      Register out_hi = out.AsRegisterPairHigh<Register>();
2136      Register out_lo = out.AsRegisterPairLow<Register>();
2137      Register in1_hi = first.AsRegisterPairHigh<Register>();
2138      Register in1_lo = first.AsRegisterPairLow<Register>();
2139      Register in2_hi = second.AsRegisterPairHigh<Register>();
2140      Register in2_lo = second.AsRegisterPairLow<Register>();
2141
2142      // Extra checks to protect caused by the existence of R1_R2.
2143      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2144      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2145      DCHECK_NE(out_hi, in1_lo);
2146      DCHECK_NE(out_hi, in2_lo);
2147
2148      // input: in1 - 64 bits, in2 - 64 bits
2149      // output: out
2150      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2151      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2152      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2153
2154      // IP <- in1.lo * in2.hi
2155      __ mul(IP, in1_lo, in2_hi);
2156      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2157      __ mla(out_hi, in1_hi, in2_lo, IP);
2158      // out.lo <- (in1.lo * in2.lo)[31:0];
2159      __ umull(out_lo, IP, in1_lo, in2_lo);
2160      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2161      __ add(out_hi, out_hi, ShifterOperand(IP));
2162      break;
2163    }
2164
2165    case Primitive::kPrimFloat: {
2166      __ vmuls(out.AsFpuRegister<SRegister>(),
2167               first.AsFpuRegister<SRegister>(),
2168               second.AsFpuRegister<SRegister>());
2169      break;
2170    }
2171
2172    case Primitive::kPrimDouble: {
2173      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2174               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2175               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2176      break;
2177    }
2178
2179    default:
2180      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2181  }
2182}
2183
2184void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2185  DCHECK(instruction->IsDiv() || instruction->IsRem());
2186  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2187
2188  LocationSummary* locations = instruction->GetLocations();
2189  Location second = locations->InAt(1);
2190  DCHECK(second.IsConstant());
2191
2192  Register out = locations->Out().AsRegister<Register>();
2193  Register dividend = locations->InAt(0).AsRegister<Register>();
2194  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2195  DCHECK(imm == 1 || imm == -1);
2196
2197  if (instruction->IsRem()) {
2198    __ LoadImmediate(out, 0);
2199  } else {
2200    if (imm == 1) {
2201      __ Mov(out, dividend);
2202    } else {
2203      __ rsb(out, dividend, ShifterOperand(0));
2204    }
2205  }
2206}
2207
2208void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2209  DCHECK(instruction->IsDiv() || instruction->IsRem());
2210  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2211
2212  LocationSummary* locations = instruction->GetLocations();
2213  Location second = locations->InAt(1);
2214  DCHECK(second.IsConstant());
2215
2216  Register out = locations->Out().AsRegister<Register>();
2217  Register dividend = locations->InAt(0).AsRegister<Register>();
2218  Register temp = locations->GetTemp(0).AsRegister<Register>();
2219  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2220  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2221  DCHECK(IsPowerOfTwo(abs_imm));
2222  int ctz_imm = CTZ(abs_imm);
2223
2224  if (ctz_imm == 1) {
2225    __ Lsr(temp, dividend, 32 - ctz_imm);
2226  } else {
2227    __ Asr(temp, dividend, 31);
2228    __ Lsr(temp, temp, 32 - ctz_imm);
2229  }
2230  __ add(out, temp, ShifterOperand(dividend));
2231
2232  if (instruction->IsDiv()) {
2233    __ Asr(out, out, ctz_imm);
2234    if (imm < 0) {
2235      __ rsb(out, out, ShifterOperand(0));
2236    }
2237  } else {
2238    __ ubfx(out, out, 0, ctz_imm);
2239    __ sub(out, out, ShifterOperand(temp));
2240  }
2241}
2242
2243void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2244  DCHECK(instruction->IsDiv() || instruction->IsRem());
2245  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2246
2247  LocationSummary* locations = instruction->GetLocations();
2248  Location second = locations->InAt(1);
2249  DCHECK(second.IsConstant());
2250
2251  Register out = locations->Out().AsRegister<Register>();
2252  Register dividend = locations->InAt(0).AsRegister<Register>();
2253  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2254  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2255  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2256
2257  int64_t magic;
2258  int shift;
2259  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2260
2261  __ LoadImmediate(temp1, magic);
2262  __ smull(temp2, temp1, dividend, temp1);
2263
2264  if (imm > 0 && magic < 0) {
2265    __ add(temp1, temp1, ShifterOperand(dividend));
2266  } else if (imm < 0 && magic > 0) {
2267    __ sub(temp1, temp1, ShifterOperand(dividend));
2268  }
2269
2270  if (shift != 0) {
2271    __ Asr(temp1, temp1, shift);
2272  }
2273
2274  if (instruction->IsDiv()) {
2275    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2276  } else {
2277    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2278    // TODO: Strength reduction for mls.
2279    __ LoadImmediate(temp2, imm);
2280    __ mls(out, temp1, temp2, dividend);
2281  }
2282}
2283
2284void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2285  DCHECK(instruction->IsDiv() || instruction->IsRem());
2286  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2287
2288  LocationSummary* locations = instruction->GetLocations();
2289  Location second = locations->InAt(1);
2290  DCHECK(second.IsConstant());
2291
2292  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2293  if (imm == 0) {
2294    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2295  } else if (imm == 1 || imm == -1) {
2296    DivRemOneOrMinusOne(instruction);
2297  } else if (IsPowerOfTwo(std::abs(imm))) {
2298    DivRemByPowerOfTwo(instruction);
2299  } else {
2300    DCHECK(imm <= -2 || imm >= 2);
2301    GenerateDivRemWithAnyConstant(instruction);
2302  }
2303}
2304
2305void LocationsBuilderARM::VisitDiv(HDiv* div) {
2306  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2307  if (div->GetResultType() == Primitive::kPrimLong) {
2308    // pLdiv runtime call.
2309    call_kind = LocationSummary::kCall;
2310  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2311    // sdiv will be replaced by other instruction sequence.
2312  } else if (div->GetResultType() == Primitive::kPrimInt &&
2313             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2314    // pIdivmod runtime call.
2315    call_kind = LocationSummary::kCall;
2316  }
2317
2318  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2319
2320  switch (div->GetResultType()) {
2321    case Primitive::kPrimInt: {
2322      if (div->InputAt(1)->IsConstant()) {
2323        locations->SetInAt(0, Location::RequiresRegister());
2324        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2325        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2326        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2327        if (abs_imm <= 1) {
2328          // No temp register required.
2329        } else {
2330          locations->AddTemp(Location::RequiresRegister());
2331          if (!IsPowerOfTwo(abs_imm)) {
2332            locations->AddTemp(Location::RequiresRegister());
2333          }
2334        }
2335      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2336        locations->SetInAt(0, Location::RequiresRegister());
2337        locations->SetInAt(1, Location::RequiresRegister());
2338        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2339      } else {
2340        InvokeRuntimeCallingConvention calling_convention;
2341        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2342        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2343        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2344        //       we only need the former.
2345        locations->SetOut(Location::RegisterLocation(R0));
2346      }
2347      break;
2348    }
2349    case Primitive::kPrimLong: {
2350      InvokeRuntimeCallingConvention calling_convention;
2351      locations->SetInAt(0, Location::RegisterPairLocation(
2352          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2353      locations->SetInAt(1, Location::RegisterPairLocation(
2354          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2355      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2356      break;
2357    }
2358    case Primitive::kPrimFloat:
2359    case Primitive::kPrimDouble: {
2360      locations->SetInAt(0, Location::RequiresFpuRegister());
2361      locations->SetInAt(1, Location::RequiresFpuRegister());
2362      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2363      break;
2364    }
2365
2366    default:
2367      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2368  }
2369}
2370
2371void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2372  LocationSummary* locations = div->GetLocations();
2373  Location out = locations->Out();
2374  Location first = locations->InAt(0);
2375  Location second = locations->InAt(1);
2376
2377  switch (div->GetResultType()) {
2378    case Primitive::kPrimInt: {
2379      if (second.IsConstant()) {
2380        GenerateDivRemConstantIntegral(div);
2381      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2382        __ sdiv(out.AsRegister<Register>(),
2383                first.AsRegister<Register>(),
2384                second.AsRegister<Register>());
2385      } else {
2386        InvokeRuntimeCallingConvention calling_convention;
2387        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2388        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2389        DCHECK_EQ(R0, out.AsRegister<Register>());
2390
2391        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2392      }
2393      break;
2394    }
2395
2396    case Primitive::kPrimLong: {
2397      InvokeRuntimeCallingConvention calling_convention;
2398      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2399      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2400      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2401      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2402      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2403      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2404
2405      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2406      break;
2407    }
2408
2409    case Primitive::kPrimFloat: {
2410      __ vdivs(out.AsFpuRegister<SRegister>(),
2411               first.AsFpuRegister<SRegister>(),
2412               second.AsFpuRegister<SRegister>());
2413      break;
2414    }
2415
2416    case Primitive::kPrimDouble: {
2417      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2418               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2419               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2420      break;
2421    }
2422
2423    default:
2424      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2425  }
2426}
2427
2428void LocationsBuilderARM::VisitRem(HRem* rem) {
2429  Primitive::Type type = rem->GetResultType();
2430
2431  // Most remainders are implemented in the runtime.
2432  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2433  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2434    // sdiv will be replaced by other instruction sequence.
2435    call_kind = LocationSummary::kNoCall;
2436  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2437             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2438    // Have hardware divide instruction for int, do it with three instructions.
2439    call_kind = LocationSummary::kNoCall;
2440  }
2441
2442  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2443
2444  switch (type) {
2445    case Primitive::kPrimInt: {
2446      if (rem->InputAt(1)->IsConstant()) {
2447        locations->SetInAt(0, Location::RequiresRegister());
2448        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2449        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2450        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2451        if (abs_imm <= 1) {
2452          // No temp register required.
2453        } else {
2454          locations->AddTemp(Location::RequiresRegister());
2455          if (!IsPowerOfTwo(abs_imm)) {
2456            locations->AddTemp(Location::RequiresRegister());
2457          }
2458        }
2459      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2460        locations->SetInAt(0, Location::RequiresRegister());
2461        locations->SetInAt(1, Location::RequiresRegister());
2462        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2463        locations->AddTemp(Location::RequiresRegister());
2464      } else {
2465        InvokeRuntimeCallingConvention calling_convention;
2466        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2467        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2468        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2469        //       we only need the latter.
2470        locations->SetOut(Location::RegisterLocation(R1));
2471      }
2472      break;
2473    }
2474    case Primitive::kPrimLong: {
2475      InvokeRuntimeCallingConvention calling_convention;
2476      locations->SetInAt(0, Location::RegisterPairLocation(
2477          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2478      locations->SetInAt(1, Location::RegisterPairLocation(
2479          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2480      // The runtime helper puts the output in R2,R3.
2481      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2482      break;
2483    }
2484    case Primitive::kPrimFloat: {
2485      InvokeRuntimeCallingConvention calling_convention;
2486      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2487      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2488      locations->SetOut(Location::FpuRegisterLocation(S0));
2489      break;
2490    }
2491
2492    case Primitive::kPrimDouble: {
2493      InvokeRuntimeCallingConvention calling_convention;
2494      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2495          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2496      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2497          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2498      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2499      break;
2500    }
2501
2502    default:
2503      LOG(FATAL) << "Unexpected rem type " << type;
2504  }
2505}
2506
2507void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2508  LocationSummary* locations = rem->GetLocations();
2509  Location out = locations->Out();
2510  Location first = locations->InAt(0);
2511  Location second = locations->InAt(1);
2512
2513  Primitive::Type type = rem->GetResultType();
2514  switch (type) {
2515    case Primitive::kPrimInt: {
2516        if (second.IsConstant()) {
2517          GenerateDivRemConstantIntegral(rem);
2518        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2519        Register reg1 = first.AsRegister<Register>();
2520        Register reg2 = second.AsRegister<Register>();
2521        Register temp = locations->GetTemp(0).AsRegister<Register>();
2522
2523        // temp = reg1 / reg2  (integer division)
2524        // temp = temp * reg2
2525        // dest = reg1 - temp
2526        __ sdiv(temp, reg1, reg2);
2527        __ mul(temp, temp, reg2);
2528        __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
2529      } else {
2530        InvokeRuntimeCallingConvention calling_convention;
2531        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2532        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2533        DCHECK_EQ(R1, out.AsRegister<Register>());
2534
2535        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2536      }
2537      break;
2538    }
2539
2540    case Primitive::kPrimLong: {
2541      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2542      break;
2543    }
2544
2545    case Primitive::kPrimFloat: {
2546      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2547      break;
2548    }
2549
2550    case Primitive::kPrimDouble: {
2551      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2552      break;
2553    }
2554
2555    default:
2556      LOG(FATAL) << "Unexpected rem type " << type;
2557  }
2558}
2559
2560void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2561  LocationSummary* locations =
2562      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2563  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2564  if (instruction->HasUses()) {
2565    locations->SetOut(Location::SameAsFirstInput());
2566  }
2567}
2568
2569void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2570  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2571  codegen_->AddSlowPath(slow_path);
2572
2573  LocationSummary* locations = instruction->GetLocations();
2574  Location value = locations->InAt(0);
2575
2576  switch (instruction->GetType()) {
2577    case Primitive::kPrimInt: {
2578      if (value.IsRegister()) {
2579        __ cmp(value.AsRegister<Register>(), ShifterOperand(0));
2580        __ b(slow_path->GetEntryLabel(), EQ);
2581      } else {
2582        DCHECK(value.IsConstant()) << value;
2583        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2584          __ b(slow_path->GetEntryLabel());
2585        }
2586      }
2587      break;
2588    }
2589    case Primitive::kPrimLong: {
2590      if (value.IsRegisterPair()) {
2591        __ orrs(IP,
2592                value.AsRegisterPairLow<Register>(),
2593                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2594        __ b(slow_path->GetEntryLabel(), EQ);
2595      } else {
2596        DCHECK(value.IsConstant()) << value;
2597        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2598          __ b(slow_path->GetEntryLabel());
2599        }
2600      }
2601      break;
2602    default:
2603      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2604    }
2605  }
2606}
2607
2608void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2609  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2610
2611  LocationSummary* locations =
2612      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2613
2614  switch (op->GetResultType()) {
2615    case Primitive::kPrimInt: {
2616      locations->SetInAt(0, Location::RequiresRegister());
2617      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2618      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2619      break;
2620    }
2621    case Primitive::kPrimLong: {
2622      locations->SetInAt(0, Location::RequiresRegister());
2623      locations->SetInAt(1, Location::RequiresRegister());
2624      locations->AddTemp(Location::RequiresRegister());
2625      locations->SetOut(Location::RequiresRegister());
2626      break;
2627    }
2628    default:
2629      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2630  }
2631}
2632
2633void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2634  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2635
2636  LocationSummary* locations = op->GetLocations();
2637  Location out = locations->Out();
2638  Location first = locations->InAt(0);
2639  Location second = locations->InAt(1);
2640
2641  Primitive::Type type = op->GetResultType();
2642  switch (type) {
2643    case Primitive::kPrimInt: {
2644      Register out_reg = out.AsRegister<Register>();
2645      Register first_reg = first.AsRegister<Register>();
2646      // Arm doesn't mask the shift count so we need to do it ourselves.
2647      if (second.IsRegister()) {
2648        Register second_reg = second.AsRegister<Register>();
2649        __ and_(second_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2650        if (op->IsShl()) {
2651          __ Lsl(out_reg, first_reg, second_reg);
2652        } else if (op->IsShr()) {
2653          __ Asr(out_reg, first_reg, second_reg);
2654        } else {
2655          __ Lsr(out_reg, first_reg, second_reg);
2656        }
2657      } else {
2658        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2659        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2660        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
2661          __ Mov(out_reg, first_reg);
2662        } else if (op->IsShl()) {
2663          __ Lsl(out_reg, first_reg, shift_value);
2664        } else if (op->IsShr()) {
2665          __ Asr(out_reg, first_reg, shift_value);
2666        } else {
2667          __ Lsr(out_reg, first_reg, shift_value);
2668        }
2669      }
2670      break;
2671    }
2672    case Primitive::kPrimLong: {
2673      Register o_h = out.AsRegisterPairHigh<Register>();
2674      Register o_l = out.AsRegisterPairLow<Register>();
2675
2676      Register temp = locations->GetTemp(0).AsRegister<Register>();
2677
2678      Register high = first.AsRegisterPairHigh<Register>();
2679      Register low = first.AsRegisterPairLow<Register>();
2680
2681      Register second_reg = second.AsRegister<Register>();
2682
2683      if (op->IsShl()) {
2684        // Shift the high part
2685        __ and_(second_reg, second_reg, ShifterOperand(63));
2686        __ Lsl(o_h, high, second_reg);
2687        // Shift the low part and `or` what overflew on the high part
2688        __ rsb(temp, second_reg, ShifterOperand(32));
2689        __ Lsr(temp, low, temp);
2690        __ orr(o_h, o_h, ShifterOperand(temp));
2691        // If the shift is > 32 bits, override the high part
2692        __ subs(temp, second_reg, ShifterOperand(32));
2693        __ it(PL);
2694        __ Lsl(o_h, low, temp, false, PL);
2695        // Shift the low part
2696        __ Lsl(o_l, low, second_reg);
2697      } else if (op->IsShr()) {
2698        // Shift the low part
2699        __ and_(second_reg, second_reg, ShifterOperand(63));
2700        __ Lsr(o_l, low, second_reg);
2701        // Shift the high part and `or` what underflew on the low part
2702        __ rsb(temp, second_reg, ShifterOperand(32));
2703        __ Lsl(temp, high, temp);
2704        __ orr(o_l, o_l, ShifterOperand(temp));
2705        // If the shift is > 32 bits, override the low part
2706        __ subs(temp, second_reg, ShifterOperand(32));
2707        __ it(PL);
2708        __ Asr(o_l, high, temp, false, PL);
2709        // Shift the high part
2710        __ Asr(o_h, high, second_reg);
2711      } else {
2712        // same as Shr except we use `Lsr`s and not `Asr`s
2713        __ and_(second_reg, second_reg, ShifterOperand(63));
2714        __ Lsr(o_l, low, second_reg);
2715        __ rsb(temp, second_reg, ShifterOperand(32));
2716        __ Lsl(temp, high, temp);
2717        __ orr(o_l, o_l, ShifterOperand(temp));
2718        __ subs(temp, second_reg, ShifterOperand(32));
2719        __ it(PL);
2720        __ Lsr(o_l, high, temp, false, PL);
2721        __ Lsr(o_h, high, second_reg);
2722      }
2723      break;
2724    }
2725    default:
2726      LOG(FATAL) << "Unexpected operation type " << type;
2727  }
2728}
2729
2730void LocationsBuilderARM::VisitShl(HShl* shl) {
2731  HandleShift(shl);
2732}
2733
2734void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
2735  HandleShift(shl);
2736}
2737
2738void LocationsBuilderARM::VisitShr(HShr* shr) {
2739  HandleShift(shr);
2740}
2741
2742void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
2743  HandleShift(shr);
2744}
2745
2746void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
2747  HandleShift(ushr);
2748}
2749
2750void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
2751  HandleShift(ushr);
2752}
2753
2754void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
2755  LocationSummary* locations =
2756      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2757  InvokeRuntimeCallingConvention calling_convention;
2758  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2759  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2760  locations->SetOut(Location::RegisterLocation(R0));
2761}
2762
2763void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
2764  InvokeRuntimeCallingConvention calling_convention;
2765  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
2766  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2767  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2768                          instruction,
2769                          instruction->GetDexPc(),
2770                          nullptr);
2771}
2772
2773void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
2774  LocationSummary* locations =
2775      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2776  InvokeRuntimeCallingConvention calling_convention;
2777  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2778  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2779  locations->SetOut(Location::RegisterLocation(R0));
2780  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2781}
2782
2783void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
2784  InvokeRuntimeCallingConvention calling_convention;
2785  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2));
2786  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2787  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2788                          instruction,
2789                          instruction->GetDexPc(),
2790                          nullptr);
2791}
2792
2793void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
2794  LocationSummary* locations =
2795      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2796  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2797  if (location.IsStackSlot()) {
2798    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2799  } else if (location.IsDoubleStackSlot()) {
2800    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2801  }
2802  locations->SetOut(location);
2803}
2804
2805void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
2806  // Nothing to do, the parameter is already at its location.
2807  UNUSED(instruction);
2808}
2809
2810void LocationsBuilderARM::VisitNot(HNot* not_) {
2811  LocationSummary* locations =
2812      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
2813  locations->SetInAt(0, Location::RequiresRegister());
2814  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2815}
2816
2817void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
2818  LocationSummary* locations = not_->GetLocations();
2819  Location out = locations->Out();
2820  Location in = locations->InAt(0);
2821  switch (not_->GetResultType()) {
2822    case Primitive::kPrimInt:
2823      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
2824      break;
2825
2826    case Primitive::kPrimLong:
2827      __ mvn(out.AsRegisterPairLow<Register>(),
2828             ShifterOperand(in.AsRegisterPairLow<Register>()));
2829      __ mvn(out.AsRegisterPairHigh<Register>(),
2830             ShifterOperand(in.AsRegisterPairHigh<Register>()));
2831      break;
2832
2833    default:
2834      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
2835  }
2836}
2837
2838void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
2839  LocationSummary* locations =
2840      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
2841  locations->SetInAt(0, Location::RequiresRegister());
2842  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2843}
2844
2845void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
2846  LocationSummary* locations = bool_not->GetLocations();
2847  Location out = locations->Out();
2848  Location in = locations->InAt(0);
2849  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
2850}
2851
2852void LocationsBuilderARM::VisitCompare(HCompare* compare) {
2853  LocationSummary* locations =
2854      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
2855  switch (compare->InputAt(0)->GetType()) {
2856    case Primitive::kPrimLong: {
2857      locations->SetInAt(0, Location::RequiresRegister());
2858      locations->SetInAt(1, Location::RequiresRegister());
2859      // Output overlaps because it is written before doing the low comparison.
2860      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2861      break;
2862    }
2863    case Primitive::kPrimFloat:
2864    case Primitive::kPrimDouble: {
2865      locations->SetInAt(0, Location::RequiresFpuRegister());
2866      locations->SetInAt(1, Location::RequiresFpuRegister());
2867      locations->SetOut(Location::RequiresRegister());
2868      break;
2869    }
2870    default:
2871      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
2872  }
2873}
2874
2875void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
2876  LocationSummary* locations = compare->GetLocations();
2877  Register out = locations->Out().AsRegister<Register>();
2878  Location left = locations->InAt(0);
2879  Location right = locations->InAt(1);
2880
2881  Label less, greater, done;
2882  Primitive::Type type = compare->InputAt(0)->GetType();
2883  switch (type) {
2884    case Primitive::kPrimLong: {
2885      __ cmp(left.AsRegisterPairHigh<Register>(),
2886             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
2887      __ b(&less, LT);
2888      __ b(&greater, GT);
2889      // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
2890      __ LoadImmediate(out, 0);
2891      __ cmp(left.AsRegisterPairLow<Register>(),
2892             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
2893      break;
2894    }
2895    case Primitive::kPrimFloat:
2896    case Primitive::kPrimDouble: {
2897      __ LoadImmediate(out, 0);
2898      if (type == Primitive::kPrimFloat) {
2899        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
2900      } else {
2901        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
2902                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
2903      }
2904      __ vmstat();  // transfer FP status register to ARM APSR.
2905      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
2906      break;
2907    }
2908    default:
2909      LOG(FATAL) << "Unexpected compare type " << type;
2910  }
2911  __ b(&done, EQ);
2912  __ b(&less, CC);  // CC is for both: unsigned compare for longs and 'less than' for floats.
2913
2914  __ Bind(&greater);
2915  __ LoadImmediate(out, 1);
2916  __ b(&done);
2917
2918  __ Bind(&less);
2919  __ LoadImmediate(out, -1);
2920
2921  __ Bind(&done);
2922}
2923
2924void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
2925  LocationSummary* locations =
2926      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2927  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2928    locations->SetInAt(i, Location::Any());
2929  }
2930  locations->SetOut(Location::Any());
2931}
2932
2933void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
2934  UNUSED(instruction);
2935  LOG(FATAL) << "Unreachable";
2936}
2937
2938void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
2939  // TODO (ported from quick): revisit Arm barrier kinds
2940  DmbOptions flavour = DmbOptions::ISH;  // quiet c++ warnings
2941  switch (kind) {
2942    case MemBarrierKind::kAnyStore:
2943    case MemBarrierKind::kLoadAny:
2944    case MemBarrierKind::kAnyAny: {
2945      flavour = DmbOptions::ISH;
2946      break;
2947    }
2948    case MemBarrierKind::kStoreStore: {
2949      flavour = DmbOptions::ISHST;
2950      break;
2951    }
2952    default:
2953      LOG(FATAL) << "Unexpected memory barrier " << kind;
2954  }
2955  __ dmb(flavour);
2956}
2957
2958void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
2959                                                         uint32_t offset,
2960                                                         Register out_lo,
2961                                                         Register out_hi) {
2962  if (offset != 0) {
2963    __ LoadImmediate(out_lo, offset);
2964    __ add(IP, addr, ShifterOperand(out_lo));
2965    addr = IP;
2966  }
2967  __ ldrexd(out_lo, out_hi, addr);
2968}
2969
2970void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
2971                                                          uint32_t offset,
2972                                                          Register value_lo,
2973                                                          Register value_hi,
2974                                                          Register temp1,
2975                                                          Register temp2,
2976                                                          HInstruction* instruction) {
2977  Label fail;
2978  if (offset != 0) {
2979    __ LoadImmediate(temp1, offset);
2980    __ add(IP, addr, ShifterOperand(temp1));
2981    addr = IP;
2982  }
2983  __ Bind(&fail);
2984  // We need a load followed by store. (The address used in a STREX instruction must
2985  // be the same as the address in the most recently executed LDREX instruction.)
2986  __ ldrexd(temp1, temp2, addr);
2987  codegen_->MaybeRecordImplicitNullCheck(instruction);
2988  __ strexd(temp1, value_lo, value_hi, addr);
2989  __ cmp(temp1, ShifterOperand(0));
2990  __ b(&fail, NE);
2991}
2992
2993void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
2994  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
2995
2996  LocationSummary* locations =
2997      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2998  locations->SetInAt(0, Location::RequiresRegister());
2999
3000  Primitive::Type field_type = field_info.GetFieldType();
3001  if (Primitive::IsFloatingPointType(field_type)) {
3002    locations->SetInAt(1, Location::RequiresFpuRegister());
3003  } else {
3004    locations->SetInAt(1, Location::RequiresRegister());
3005  }
3006
3007  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3008  bool generate_volatile = field_info.IsVolatile()
3009      && is_wide
3010      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3011  // Temporary registers for the write barrier.
3012  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3013  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3014    locations->AddTemp(Location::RequiresRegister());
3015    locations->AddTemp(Location::RequiresRegister());
3016  } else if (generate_volatile) {
3017    // Arm encoding have some additional constraints for ldrexd/strexd:
3018    // - registers need to be consecutive
3019    // - the first register should be even but not R14.
3020    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3021    // enable Arm encoding.
3022    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3023
3024    locations->AddTemp(Location::RequiresRegister());
3025    locations->AddTemp(Location::RequiresRegister());
3026    if (field_type == Primitive::kPrimDouble) {
3027      // For doubles we need two more registers to copy the value.
3028      locations->AddTemp(Location::RegisterLocation(R2));
3029      locations->AddTemp(Location::RegisterLocation(R3));
3030    }
3031  }
3032}
3033
3034void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3035                                                 const FieldInfo& field_info,
3036                                                 bool value_can_be_null) {
3037  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3038
3039  LocationSummary* locations = instruction->GetLocations();
3040  Register base = locations->InAt(0).AsRegister<Register>();
3041  Location value = locations->InAt(1);
3042
3043  bool is_volatile = field_info.IsVolatile();
3044  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3045  Primitive::Type field_type = field_info.GetFieldType();
3046  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3047
3048  if (is_volatile) {
3049    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3050  }
3051
3052  switch (field_type) {
3053    case Primitive::kPrimBoolean:
3054    case Primitive::kPrimByte: {
3055      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3056      break;
3057    }
3058
3059    case Primitive::kPrimShort:
3060    case Primitive::kPrimChar: {
3061      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3062      break;
3063    }
3064
3065    case Primitive::kPrimInt:
3066    case Primitive::kPrimNot: {
3067      __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3068      break;
3069    }
3070
3071    case Primitive::kPrimLong: {
3072      if (is_volatile && !atomic_ldrd_strd) {
3073        GenerateWideAtomicStore(base, offset,
3074                                value.AsRegisterPairLow<Register>(),
3075                                value.AsRegisterPairHigh<Register>(),
3076                                locations->GetTemp(0).AsRegister<Register>(),
3077                                locations->GetTemp(1).AsRegister<Register>(),
3078                                instruction);
3079      } else {
3080        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3081        codegen_->MaybeRecordImplicitNullCheck(instruction);
3082      }
3083      break;
3084    }
3085
3086    case Primitive::kPrimFloat: {
3087      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3088      break;
3089    }
3090
3091    case Primitive::kPrimDouble: {
3092      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3093      if (is_volatile && !atomic_ldrd_strd) {
3094        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3095        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3096
3097        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3098
3099        GenerateWideAtomicStore(base, offset,
3100                                value_reg_lo,
3101                                value_reg_hi,
3102                                locations->GetTemp(2).AsRegister<Register>(),
3103                                locations->GetTemp(3).AsRegister<Register>(),
3104                                instruction);
3105      } else {
3106        __ StoreDToOffset(value_reg, base, offset);
3107        codegen_->MaybeRecordImplicitNullCheck(instruction);
3108      }
3109      break;
3110    }
3111
3112    case Primitive::kPrimVoid:
3113      LOG(FATAL) << "Unreachable type " << field_type;
3114      UNREACHABLE();
3115  }
3116
3117  // Longs and doubles are handled in the switch.
3118  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3119    codegen_->MaybeRecordImplicitNullCheck(instruction);
3120  }
3121
3122  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3123    Register temp = locations->GetTemp(0).AsRegister<Register>();
3124    Register card = locations->GetTemp(1).AsRegister<Register>();
3125    codegen_->MarkGCCard(
3126        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3127  }
3128
3129  if (is_volatile) {
3130    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3131  }
3132}
3133
3134void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3135  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3136  LocationSummary* locations =
3137      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3138  locations->SetInAt(0, Location::RequiresRegister());
3139
3140  bool volatile_for_double = field_info.IsVolatile()
3141      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3142      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3143  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3144
3145  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3146    locations->SetOut(Location::RequiresFpuRegister());
3147  } else {
3148    locations->SetOut(Location::RequiresRegister(),
3149                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3150  }
3151  if (volatile_for_double) {
3152    // Arm encoding have some additional constraints for ldrexd/strexd:
3153    // - registers need to be consecutive
3154    // - the first register should be even but not R14.
3155    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3156    // enable Arm encoding.
3157    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3158    locations->AddTemp(Location::RequiresRegister());
3159    locations->AddTemp(Location::RequiresRegister());
3160  }
3161}
3162
3163void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3164                                                 const FieldInfo& field_info) {
3165  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3166
3167  LocationSummary* locations = instruction->GetLocations();
3168  Register base = locations->InAt(0).AsRegister<Register>();
3169  Location out = locations->Out();
3170  bool is_volatile = field_info.IsVolatile();
3171  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3172  Primitive::Type field_type = field_info.GetFieldType();
3173  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3174
3175  switch (field_type) {
3176    case Primitive::kPrimBoolean: {
3177      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3178      break;
3179    }
3180
3181    case Primitive::kPrimByte: {
3182      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3183      break;
3184    }
3185
3186    case Primitive::kPrimShort: {
3187      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3188      break;
3189    }
3190
3191    case Primitive::kPrimChar: {
3192      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3193      break;
3194    }
3195
3196    case Primitive::kPrimInt:
3197    case Primitive::kPrimNot: {
3198      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3199      break;
3200    }
3201
3202    case Primitive::kPrimLong: {
3203      if (is_volatile && !atomic_ldrd_strd) {
3204        GenerateWideAtomicLoad(base, offset,
3205                               out.AsRegisterPairLow<Register>(),
3206                               out.AsRegisterPairHigh<Register>());
3207      } else {
3208        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3209      }
3210      break;
3211    }
3212
3213    case Primitive::kPrimFloat: {
3214      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3215      break;
3216    }
3217
3218    case Primitive::kPrimDouble: {
3219      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3220      if (is_volatile && !atomic_ldrd_strd) {
3221        Register lo = locations->GetTemp(0).AsRegister<Register>();
3222        Register hi = locations->GetTemp(1).AsRegister<Register>();
3223        GenerateWideAtomicLoad(base, offset, lo, hi);
3224        codegen_->MaybeRecordImplicitNullCheck(instruction);
3225        __ vmovdrr(out_reg, lo, hi);
3226      } else {
3227        __ LoadDFromOffset(out_reg, base, offset);
3228        codegen_->MaybeRecordImplicitNullCheck(instruction);
3229      }
3230      break;
3231    }
3232
3233    case Primitive::kPrimVoid:
3234      LOG(FATAL) << "Unreachable type " << field_type;
3235      UNREACHABLE();
3236  }
3237
3238  // Doubles are handled in the switch.
3239  if (field_type != Primitive::kPrimDouble) {
3240    codegen_->MaybeRecordImplicitNullCheck(instruction);
3241  }
3242
3243  if (is_volatile) {
3244    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3245  }
3246}
3247
3248void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3249  HandleFieldSet(instruction, instruction->GetFieldInfo());
3250}
3251
3252void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3253  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3254}
3255
3256void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3257  HandleFieldGet(instruction, instruction->GetFieldInfo());
3258}
3259
3260void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3261  HandleFieldGet(instruction, instruction->GetFieldInfo());
3262}
3263
3264void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3265  HandleFieldGet(instruction, instruction->GetFieldInfo());
3266}
3267
3268void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3269  HandleFieldGet(instruction, instruction->GetFieldInfo());
3270}
3271
3272void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3273  HandleFieldSet(instruction, instruction->GetFieldInfo());
3274}
3275
3276void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3277  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3278}
3279
3280void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3281  LocationSummary* locations =
3282      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3283  locations->SetInAt(0, Location::RequiresRegister());
3284  if (instruction->HasUses()) {
3285    locations->SetOut(Location::SameAsFirstInput());
3286  }
3287}
3288
3289void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3290  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3291    return;
3292  }
3293  Location obj = instruction->GetLocations()->InAt(0);
3294
3295  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3296  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3297}
3298
3299void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3300  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3301  codegen_->AddSlowPath(slow_path);
3302
3303  LocationSummary* locations = instruction->GetLocations();
3304  Location obj = locations->InAt(0);
3305
3306  __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
3307  __ b(slow_path->GetEntryLabel(), EQ);
3308}
3309
3310void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3311  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
3312    GenerateImplicitNullCheck(instruction);
3313  } else {
3314    GenerateExplicitNullCheck(instruction);
3315  }
3316}
3317
3318void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3319  LocationSummary* locations =
3320      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3321  locations->SetInAt(0, Location::RequiresRegister());
3322  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3323  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3324    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3325  } else {
3326    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3327  }
3328}
3329
3330void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3331  LocationSummary* locations = instruction->GetLocations();
3332  Register obj = locations->InAt(0).AsRegister<Register>();
3333  Location index = locations->InAt(1);
3334
3335  switch (instruction->GetType()) {
3336    case Primitive::kPrimBoolean: {
3337      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3338      Register out = locations->Out().AsRegister<Register>();
3339      if (index.IsConstant()) {
3340        size_t offset =
3341            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3342        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3343      } else {
3344        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3345        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3346      }
3347      break;
3348    }
3349
3350    case Primitive::kPrimByte: {
3351      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3352      Register out = locations->Out().AsRegister<Register>();
3353      if (index.IsConstant()) {
3354        size_t offset =
3355            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3356        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3357      } else {
3358        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3359        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3360      }
3361      break;
3362    }
3363
3364    case Primitive::kPrimShort: {
3365      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3366      Register out = locations->Out().AsRegister<Register>();
3367      if (index.IsConstant()) {
3368        size_t offset =
3369            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3370        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3371      } else {
3372        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3373        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3374      }
3375      break;
3376    }
3377
3378    case Primitive::kPrimChar: {
3379      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3380      Register out = locations->Out().AsRegister<Register>();
3381      if (index.IsConstant()) {
3382        size_t offset =
3383            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3384        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3385      } else {
3386        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3387        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3388      }
3389      break;
3390    }
3391
3392    case Primitive::kPrimInt:
3393    case Primitive::kPrimNot: {
3394      DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
3395      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3396      Register out = locations->Out().AsRegister<Register>();
3397      if (index.IsConstant()) {
3398        size_t offset =
3399            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3400        __ LoadFromOffset(kLoadWord, out, obj, offset);
3401      } else {
3402        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3403        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3404      }
3405      break;
3406    }
3407
3408    case Primitive::kPrimLong: {
3409      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3410      Location out = locations->Out();
3411      if (index.IsConstant()) {
3412        size_t offset =
3413            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3414        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3415      } else {
3416        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3417        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3418      }
3419      break;
3420    }
3421
3422    case Primitive::kPrimFloat: {
3423      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3424      Location out = locations->Out();
3425      DCHECK(out.IsFpuRegister());
3426      if (index.IsConstant()) {
3427        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3428        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3429      } else {
3430        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3431        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3432      }
3433      break;
3434    }
3435
3436    case Primitive::kPrimDouble: {
3437      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3438      Location out = locations->Out();
3439      DCHECK(out.IsFpuRegisterPair());
3440      if (index.IsConstant()) {
3441        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3442        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3443      } else {
3444        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3445        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3446      }
3447      break;
3448    }
3449
3450    case Primitive::kPrimVoid:
3451      LOG(FATAL) << "Unreachable type " << instruction->GetType();
3452      UNREACHABLE();
3453  }
3454  codegen_->MaybeRecordImplicitNullCheck(instruction);
3455}
3456
3457void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3458  Primitive::Type value_type = instruction->GetComponentType();
3459
3460  bool needs_write_barrier =
3461      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3462  bool needs_runtime_call = instruction->NeedsTypeCheck();
3463
3464  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3465      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3466  if (needs_runtime_call) {
3467    InvokeRuntimeCallingConvention calling_convention;
3468    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3469    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3470    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3471  } else {
3472    locations->SetInAt(0, Location::RequiresRegister());
3473    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3474    if (Primitive::IsFloatingPointType(value_type)) {
3475      locations->SetInAt(2, Location::RequiresFpuRegister());
3476    } else {
3477      locations->SetInAt(2, Location::RequiresRegister());
3478    }
3479
3480    if (needs_write_barrier) {
3481      // Temporary registers for the write barrier.
3482      locations->AddTemp(Location::RequiresRegister());
3483      locations->AddTemp(Location::RequiresRegister());
3484    }
3485  }
3486}
3487
3488void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3489  LocationSummary* locations = instruction->GetLocations();
3490  Register obj = locations->InAt(0).AsRegister<Register>();
3491  Location index = locations->InAt(1);
3492  Primitive::Type value_type = instruction->GetComponentType();
3493  bool needs_runtime_call = locations->WillCall();
3494  bool needs_write_barrier =
3495      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3496
3497  switch (value_type) {
3498    case Primitive::kPrimBoolean:
3499    case Primitive::kPrimByte: {
3500      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3501      Register value = locations->InAt(2).AsRegister<Register>();
3502      if (index.IsConstant()) {
3503        size_t offset =
3504            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3505        __ StoreToOffset(kStoreByte, value, obj, offset);
3506      } else {
3507        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3508        __ StoreToOffset(kStoreByte, value, IP, data_offset);
3509      }
3510      break;
3511    }
3512
3513    case Primitive::kPrimShort:
3514    case Primitive::kPrimChar: {
3515      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3516      Register value = locations->InAt(2).AsRegister<Register>();
3517      if (index.IsConstant()) {
3518        size_t offset =
3519            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3520        __ StoreToOffset(kStoreHalfword, value, obj, offset);
3521      } else {
3522        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3523        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3524      }
3525      break;
3526    }
3527
3528    case Primitive::kPrimInt:
3529    case Primitive::kPrimNot: {
3530      if (!needs_runtime_call) {
3531        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3532        Register value = locations->InAt(2).AsRegister<Register>();
3533        if (index.IsConstant()) {
3534          size_t offset =
3535              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3536          __ StoreToOffset(kStoreWord, value, obj, offset);
3537        } else {
3538          DCHECK(index.IsRegister()) << index;
3539          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3540          __ StoreToOffset(kStoreWord, value, IP, data_offset);
3541        }
3542        codegen_->MaybeRecordImplicitNullCheck(instruction);
3543        if (needs_write_barrier) {
3544          DCHECK_EQ(value_type, Primitive::kPrimNot);
3545          Register temp = locations->GetTemp(0).AsRegister<Register>();
3546          Register card = locations->GetTemp(1).AsRegister<Register>();
3547          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
3548        }
3549      } else {
3550        DCHECK_EQ(value_type, Primitive::kPrimNot);
3551        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3552                                instruction,
3553                                instruction->GetDexPc(),
3554                                nullptr);
3555      }
3556      break;
3557    }
3558
3559    case Primitive::kPrimLong: {
3560      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3561      Location value = locations->InAt(2);
3562      if (index.IsConstant()) {
3563        size_t offset =
3564            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3565        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3566      } else {
3567        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3568        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3569      }
3570      break;
3571    }
3572
3573    case Primitive::kPrimFloat: {
3574      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3575      Location value = locations->InAt(2);
3576      DCHECK(value.IsFpuRegister());
3577      if (index.IsConstant()) {
3578        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3579        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3580      } else {
3581        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3582        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3583      }
3584      break;
3585    }
3586
3587    case Primitive::kPrimDouble: {
3588      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3589      Location value = locations->InAt(2);
3590      DCHECK(value.IsFpuRegisterPair());
3591      if (index.IsConstant()) {
3592        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3593        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3594      } else {
3595        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3596        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3597      }
3598
3599      break;
3600    }
3601
3602    case Primitive::kPrimVoid:
3603      LOG(FATAL) << "Unreachable type " << value_type;
3604      UNREACHABLE();
3605  }
3606
3607  // Ints and objects are handled in the switch.
3608  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
3609    codegen_->MaybeRecordImplicitNullCheck(instruction);
3610  }
3611}
3612
3613void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
3614  LocationSummary* locations =
3615      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3616  locations->SetInAt(0, Location::RequiresRegister());
3617  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3618}
3619
3620void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
3621  LocationSummary* locations = instruction->GetLocations();
3622  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
3623  Register obj = locations->InAt(0).AsRegister<Register>();
3624  Register out = locations->Out().AsRegister<Register>();
3625  __ LoadFromOffset(kLoadWord, out, obj, offset);
3626  codegen_->MaybeRecordImplicitNullCheck(instruction);
3627}
3628
3629void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3630  LocationSummary* locations =
3631      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3632  locations->SetInAt(0, Location::RequiresRegister());
3633  locations->SetInAt(1, Location::RequiresRegister());
3634  if (instruction->HasUses()) {
3635    locations->SetOut(Location::SameAsFirstInput());
3636  }
3637}
3638
3639void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3640  LocationSummary* locations = instruction->GetLocations();
3641  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
3642      instruction, locations->InAt(0), locations->InAt(1));
3643  codegen_->AddSlowPath(slow_path);
3644
3645  Register index = locations->InAt(0).AsRegister<Register>();
3646  Register length = locations->InAt(1).AsRegister<Register>();
3647
3648  __ cmp(index, ShifterOperand(length));
3649  __ b(slow_path->GetEntryLabel(), CS);
3650}
3651
3652void CodeGeneratorARM::MarkGCCard(Register temp,
3653                                  Register card,
3654                                  Register object,
3655                                  Register value,
3656                                  bool can_be_null) {
3657  Label is_null;
3658  if (can_be_null) {
3659    __ CompareAndBranchIfZero(value, &is_null);
3660  }
3661  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
3662  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
3663  __ strb(card, Address(card, temp));
3664  if (can_be_null) {
3665    __ Bind(&is_null);
3666  }
3667}
3668
3669void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
3670  temp->SetLocations(nullptr);
3671}
3672
3673void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
3674  // Nothing to do, this is driven by the code generator.
3675  UNUSED(temp);
3676}
3677
3678void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
3679  UNUSED(instruction);
3680  LOG(FATAL) << "Unreachable";
3681}
3682
3683void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
3684  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3685}
3686
3687void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3688  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3689}
3690
3691void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3692  HBasicBlock* block = instruction->GetBlock();
3693  if (block->GetLoopInformation() != nullptr) {
3694    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3695    // The back edge will generate the suspend check.
3696    return;
3697  }
3698  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3699    // The goto will generate the suspend check.
3700    return;
3701  }
3702  GenerateSuspendCheck(instruction, nullptr);
3703}
3704
3705void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
3706                                                       HBasicBlock* successor) {
3707  SuspendCheckSlowPathARM* slow_path =
3708      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
3709  if (slow_path == nullptr) {
3710    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
3711    instruction->SetSlowPath(slow_path);
3712    codegen_->AddSlowPath(slow_path);
3713    if (successor != nullptr) {
3714      DCHECK(successor->IsLoopHeader());
3715      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
3716    }
3717  } else {
3718    DCHECK_EQ(slow_path->GetSuccessor(), successor);
3719  }
3720
3721  __ LoadFromOffset(
3722      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
3723  __ cmp(IP, ShifterOperand(0));
3724  // TODO: Figure out the branch offsets and use cbz/cbnz.
3725  if (successor == nullptr) {
3726    __ b(slow_path->GetEntryLabel(), NE);
3727    __ Bind(slow_path->GetReturnLabel());
3728  } else {
3729    __ b(codegen_->GetLabelOf(successor), EQ);
3730    __ b(slow_path->GetEntryLabel());
3731  }
3732}
3733
3734ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
3735  return codegen_->GetAssembler();
3736}
3737
3738void ParallelMoveResolverARM::EmitMove(size_t index) {
3739  MoveOperands* move = moves_.Get(index);
3740  Location source = move->GetSource();
3741  Location destination = move->GetDestination();
3742
3743  if (source.IsRegister()) {
3744    if (destination.IsRegister()) {
3745      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
3746    } else {
3747      DCHECK(destination.IsStackSlot());
3748      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
3749                       SP, destination.GetStackIndex());
3750    }
3751  } else if (source.IsStackSlot()) {
3752    if (destination.IsRegister()) {
3753      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
3754                        SP, source.GetStackIndex());
3755    } else if (destination.IsFpuRegister()) {
3756      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
3757    } else {
3758      DCHECK(destination.IsStackSlot());
3759      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
3760      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3761    }
3762  } else if (source.IsFpuRegister()) {
3763    if (destination.IsFpuRegister()) {
3764      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
3765    } else {
3766      DCHECK(destination.IsStackSlot());
3767      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
3768    }
3769  } else if (source.IsDoubleStackSlot()) {
3770    if (destination.IsDoubleStackSlot()) {
3771      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
3772      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
3773    } else if (destination.IsRegisterPair()) {
3774      DCHECK(ExpectedPairLayout(destination));
3775      __ LoadFromOffset(
3776          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
3777    } else {
3778      DCHECK(destination.IsFpuRegisterPair()) << destination;
3779      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3780                         SP,
3781                         source.GetStackIndex());
3782    }
3783  } else if (source.IsRegisterPair()) {
3784    if (destination.IsRegisterPair()) {
3785      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
3786      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
3787    } else {
3788      DCHECK(destination.IsDoubleStackSlot()) << destination;
3789      DCHECK(ExpectedPairLayout(source));
3790      __ StoreToOffset(
3791          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
3792    }
3793  } else if (source.IsFpuRegisterPair()) {
3794    if (destination.IsFpuRegisterPair()) {
3795      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3796               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
3797    } else {
3798      DCHECK(destination.IsDoubleStackSlot()) << destination;
3799      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
3800                        SP,
3801                        destination.GetStackIndex());
3802    }
3803  } else {
3804    DCHECK(source.IsConstant()) << source;
3805    HConstant* constant = source.GetConstant();
3806    if (constant->IsIntConstant() || constant->IsNullConstant()) {
3807      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
3808      if (destination.IsRegister()) {
3809        __ LoadImmediate(destination.AsRegister<Register>(), value);
3810      } else {
3811        DCHECK(destination.IsStackSlot());
3812        __ LoadImmediate(IP, value);
3813        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3814      }
3815    } else if (constant->IsLongConstant()) {
3816      int64_t value = constant->AsLongConstant()->GetValue();
3817      if (destination.IsRegisterPair()) {
3818        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
3819        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
3820      } else {
3821        DCHECK(destination.IsDoubleStackSlot()) << destination;
3822        __ LoadImmediate(IP, Low32Bits(value));
3823        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3824        __ LoadImmediate(IP, High32Bits(value));
3825        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3826      }
3827    } else if (constant->IsDoubleConstant()) {
3828      double value = constant->AsDoubleConstant()->GetValue();
3829      if (destination.IsFpuRegisterPair()) {
3830        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
3831      } else {
3832        DCHECK(destination.IsDoubleStackSlot()) << destination;
3833        uint64_t int_value = bit_cast<uint64_t, double>(value);
3834        __ LoadImmediate(IP, Low32Bits(int_value));
3835        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3836        __ LoadImmediate(IP, High32Bits(int_value));
3837        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3838      }
3839    } else {
3840      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
3841      float value = constant->AsFloatConstant()->GetValue();
3842      if (destination.IsFpuRegister()) {
3843        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
3844      } else {
3845        DCHECK(destination.IsStackSlot());
3846        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
3847        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3848      }
3849    }
3850  }
3851}
3852
3853void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
3854  __ Mov(IP, reg);
3855  __ LoadFromOffset(kLoadWord, reg, SP, mem);
3856  __ StoreToOffset(kStoreWord, IP, SP, mem);
3857}
3858
3859void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
3860  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
3861  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
3862  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
3863                    SP, mem1 + stack_offset);
3864  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
3865  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
3866                   SP, mem2 + stack_offset);
3867  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
3868}
3869
3870void ParallelMoveResolverARM::EmitSwap(size_t index) {
3871  MoveOperands* move = moves_.Get(index);
3872  Location source = move->GetSource();
3873  Location destination = move->GetDestination();
3874
3875  if (source.IsRegister() && destination.IsRegister()) {
3876    DCHECK_NE(source.AsRegister<Register>(), IP);
3877    DCHECK_NE(destination.AsRegister<Register>(), IP);
3878    __ Mov(IP, source.AsRegister<Register>());
3879    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
3880    __ Mov(destination.AsRegister<Register>(), IP);
3881  } else if (source.IsRegister() && destination.IsStackSlot()) {
3882    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
3883  } else if (source.IsStackSlot() && destination.IsRegister()) {
3884    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
3885  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
3886    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3887  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
3888    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
3889    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
3890    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
3891  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
3892    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
3893    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
3894    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
3895    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
3896               destination.AsRegisterPairHigh<Register>(),
3897               DTMP);
3898  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
3899    Register low_reg = source.IsRegisterPair()
3900        ? source.AsRegisterPairLow<Register>()
3901        : destination.AsRegisterPairLow<Register>();
3902    int mem = source.IsRegisterPair()
3903        ? destination.GetStackIndex()
3904        : source.GetStackIndex();
3905    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
3906    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
3907    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
3908    __ StoreDToOffset(DTMP, SP, mem);
3909  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
3910    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
3911    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3912    __ vmovd(DTMP, first);
3913    __ vmovd(first, second);
3914    __ vmovd(second, DTMP);
3915  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
3916    DRegister reg = source.IsFpuRegisterPair()
3917        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
3918        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3919    int mem = source.IsFpuRegisterPair()
3920        ? destination.GetStackIndex()
3921        : source.GetStackIndex();
3922    __ vmovd(DTMP, reg);
3923    __ LoadDFromOffset(reg, SP, mem);
3924    __ StoreDToOffset(DTMP, SP, mem);
3925  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
3926    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
3927                                           : destination.AsFpuRegister<SRegister>();
3928    int mem = source.IsFpuRegister()
3929        ? destination.GetStackIndex()
3930        : source.GetStackIndex();
3931
3932    __ vmovrs(IP, reg);
3933    __ LoadSFromOffset(reg, SP, mem);
3934    __ StoreToOffset(kStoreWord, IP, SP, mem);
3935  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
3936    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3937    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
3938  } else {
3939    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
3940  }
3941}
3942
3943void ParallelMoveResolverARM::SpillScratch(int reg) {
3944  __ Push(static_cast<Register>(reg));
3945}
3946
3947void ParallelMoveResolverARM::RestoreScratch(int reg) {
3948  __ Pop(static_cast<Register>(reg));
3949}
3950
3951void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
3952  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
3953      ? LocationSummary::kCallOnSlowPath
3954      : LocationSummary::kNoCall;
3955  LocationSummary* locations =
3956      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
3957  locations->SetOut(Location::RequiresRegister());
3958}
3959
3960void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
3961  Register out = cls->GetLocations()->Out().AsRegister<Register>();
3962  if (cls->IsReferrersClass()) {
3963    DCHECK(!cls->CanCallRuntime());
3964    DCHECK(!cls->MustGenerateClinitCheck());
3965    codegen_->LoadCurrentMethod(out);
3966    __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
3967  } else {
3968    DCHECK(cls->CanCallRuntime());
3969    codegen_->LoadCurrentMethod(out);
3970    __ LoadFromOffset(
3971        kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
3972    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
3973
3974    SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3975        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
3976    codegen_->AddSlowPath(slow_path);
3977    __ cmp(out, ShifterOperand(0));
3978    __ b(slow_path->GetEntryLabel(), EQ);
3979    if (cls->MustGenerateClinitCheck()) {
3980      GenerateClassInitializationCheck(slow_path, out);
3981    } else {
3982      __ Bind(slow_path->GetExitLabel());
3983    }
3984  }
3985}
3986
3987void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
3988  LocationSummary* locations =
3989      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
3990  locations->SetInAt(0, Location::RequiresRegister());
3991  if (check->HasUses()) {
3992    locations->SetOut(Location::SameAsFirstInput());
3993  }
3994}
3995
3996void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
3997  // We assume the class is not null.
3998  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3999      check->GetLoadClass(), check, check->GetDexPc(), true);
4000  codegen_->AddSlowPath(slow_path);
4001  GenerateClassInitializationCheck(slow_path,
4002                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4003}
4004
4005void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4006    SlowPathCodeARM* slow_path, Register class_reg) {
4007  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4008  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4009  __ b(slow_path->GetEntryLabel(), LT);
4010  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4011  // properly. Therefore, we do a memory fence.
4012  __ dmb(ISH);
4013  __ Bind(slow_path->GetExitLabel());
4014}
4015
4016void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4017  LocationSummary* locations =
4018      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4019  locations->SetOut(Location::RequiresRegister());
4020}
4021
4022void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4023  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4024  codegen_->AddSlowPath(slow_path);
4025
4026  Register out = load->GetLocations()->Out().AsRegister<Register>();
4027  codegen_->LoadCurrentMethod(out);
4028  __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
4029  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4030  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4031  __ cmp(out, ShifterOperand(0));
4032  __ b(slow_path->GetEntryLabel(), EQ);
4033  __ Bind(slow_path->GetExitLabel());
4034}
4035
4036void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4037  LocationSummary* locations =
4038      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4039  locations->SetOut(Location::RequiresRegister());
4040}
4041
4042void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4043  Register out = load->GetLocations()->Out().AsRegister<Register>();
4044  int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4045  __ LoadFromOffset(kLoadWord, out, TR, offset);
4046  __ LoadImmediate(IP, 0);
4047  __ StoreToOffset(kStoreWord, IP, TR, offset);
4048}
4049
4050void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4051  LocationSummary* locations =
4052      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4053  InvokeRuntimeCallingConvention calling_convention;
4054  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4055}
4056
4057void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4058  codegen_->InvokeRuntime(
4059      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4060}
4061
4062void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4063  LocationSummary::CallKind call_kind = instruction->IsClassFinal()
4064      ? LocationSummary::kNoCall
4065      : LocationSummary::kCallOnSlowPath;
4066  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4067  locations->SetInAt(0, Location::RequiresRegister());
4068  locations->SetInAt(1, Location::RequiresRegister());
4069  // The out register is used as a temporary, so it overlaps with the inputs.
4070  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4071}
4072
4073void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4074  LocationSummary* locations = instruction->GetLocations();
4075  Register obj = locations->InAt(0).AsRegister<Register>();
4076  Register cls = locations->InAt(1).AsRegister<Register>();
4077  Register out = locations->Out().AsRegister<Register>();
4078  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4079  Label done, zero;
4080  SlowPathCodeARM* slow_path = nullptr;
4081
4082  // Return 0 if `obj` is null.
4083  // avoid null check if we know obj is not null.
4084  if (instruction->MustDoNullCheck()) {
4085    __ cmp(obj, ShifterOperand(0));
4086    __ b(&zero, EQ);
4087  }
4088  // Compare the class of `obj` with `cls`.
4089  __ LoadFromOffset(kLoadWord, out, obj, class_offset);
4090  __ cmp(out, ShifterOperand(cls));
4091  if (instruction->IsClassFinal()) {
4092    // Classes must be equal for the instanceof to succeed.
4093    __ b(&zero, NE);
4094    __ LoadImmediate(out, 1);
4095    __ b(&done);
4096  } else {
4097    // If the classes are not equal, we go into a slow path.
4098    DCHECK(locations->OnlyCallsOnSlowPath());
4099    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4100        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
4101    codegen_->AddSlowPath(slow_path);
4102    __ b(slow_path->GetEntryLabel(), NE);
4103    __ LoadImmediate(out, 1);
4104    __ b(&done);
4105  }
4106
4107  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
4108    __ Bind(&zero);
4109    __ LoadImmediate(out, 0);
4110  }
4111
4112  if (slow_path != nullptr) {
4113    __ Bind(slow_path->GetExitLabel());
4114  }
4115  __ Bind(&done);
4116}
4117
4118void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4119  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4120      instruction, LocationSummary::kCallOnSlowPath);
4121  locations->SetInAt(0, Location::RequiresRegister());
4122  locations->SetInAt(1, Location::RequiresRegister());
4123  locations->AddTemp(Location::RequiresRegister());
4124}
4125
4126void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4127  LocationSummary* locations = instruction->GetLocations();
4128  Register obj = locations->InAt(0).AsRegister<Register>();
4129  Register cls = locations->InAt(1).AsRegister<Register>();
4130  Register temp = locations->GetTemp(0).AsRegister<Register>();
4131  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4132
4133  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4134      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
4135  codegen_->AddSlowPath(slow_path);
4136
4137  // avoid null check if we know obj is not null.
4138  if (instruction->MustDoNullCheck()) {
4139    __ cmp(obj, ShifterOperand(0));
4140    __ b(slow_path->GetExitLabel(), EQ);
4141  }
4142  // Compare the class of `obj` with `cls`.
4143  __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4144  __ cmp(temp, ShifterOperand(cls));
4145  __ b(slow_path->GetEntryLabel(), NE);
4146  __ Bind(slow_path->GetExitLabel());
4147}
4148
4149void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4150  LocationSummary* locations =
4151      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4152  InvokeRuntimeCallingConvention calling_convention;
4153  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4154}
4155
4156void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4157  codegen_->InvokeRuntime(instruction->IsEnter()
4158        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4159      instruction,
4160      instruction->GetDexPc(),
4161      nullptr);
4162}
4163
4164void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
4165void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
4166void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4167
4168void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4169  LocationSummary* locations =
4170      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4171  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4172         || instruction->GetResultType() == Primitive::kPrimLong);
4173  locations->SetInAt(0, Location::RequiresRegister());
4174  locations->SetInAt(1, Location::RequiresRegister());
4175  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4176}
4177
4178void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4179  HandleBitwiseOperation(instruction);
4180}
4181
4182void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4183  HandleBitwiseOperation(instruction);
4184}
4185
4186void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4187  HandleBitwiseOperation(instruction);
4188}
4189
4190void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4191  LocationSummary* locations = instruction->GetLocations();
4192
4193  if (instruction->GetResultType() == Primitive::kPrimInt) {
4194    Register first = locations->InAt(0).AsRegister<Register>();
4195    Register second = locations->InAt(1).AsRegister<Register>();
4196    Register out = locations->Out().AsRegister<Register>();
4197    if (instruction->IsAnd()) {
4198      __ and_(out, first, ShifterOperand(second));
4199    } else if (instruction->IsOr()) {
4200      __ orr(out, first, ShifterOperand(second));
4201    } else {
4202      DCHECK(instruction->IsXor());
4203      __ eor(out, first, ShifterOperand(second));
4204    }
4205  } else {
4206    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4207    Location first = locations->InAt(0);
4208    Location second = locations->InAt(1);
4209    Location out = locations->Out();
4210    if (instruction->IsAnd()) {
4211      __ and_(out.AsRegisterPairLow<Register>(),
4212              first.AsRegisterPairLow<Register>(),
4213              ShifterOperand(second.AsRegisterPairLow<Register>()));
4214      __ and_(out.AsRegisterPairHigh<Register>(),
4215              first.AsRegisterPairHigh<Register>(),
4216              ShifterOperand(second.AsRegisterPairHigh<Register>()));
4217    } else if (instruction->IsOr()) {
4218      __ orr(out.AsRegisterPairLow<Register>(),
4219             first.AsRegisterPairLow<Register>(),
4220             ShifterOperand(second.AsRegisterPairLow<Register>()));
4221      __ orr(out.AsRegisterPairHigh<Register>(),
4222             first.AsRegisterPairHigh<Register>(),
4223             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4224    } else {
4225      DCHECK(instruction->IsXor());
4226      __ eor(out.AsRegisterPairLow<Register>(),
4227             first.AsRegisterPairLow<Register>(),
4228             ShifterOperand(second.AsRegisterPairLow<Register>()));
4229      __ eor(out.AsRegisterPairHigh<Register>(),
4230             first.AsRegisterPairHigh<Register>(),
4231             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4232    }
4233  }
4234}
4235
4236void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
4237  DCHECK_EQ(temp, kArtMethodRegister);
4238
4239  // TODO: Implement all kinds of calls:
4240  // 1) boot -> boot
4241  // 2) app -> boot
4242  // 3) app -> app
4243  //
4244  // Currently we implement the app -> app logic, which looks up in the resolve cache.
4245
4246  if (invoke->IsStringInit()) {
4247    // temp = thread->string_init_entrypoint
4248    __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
4249    // LR = temp[offset_of_quick_compiled_code]
4250    __ LoadFromOffset(kLoadWord, LR, temp,
4251                      mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4252                          kArmWordSize).Int32Value());
4253    // LR()
4254    __ blx(LR);
4255  } else {
4256    // temp = method;
4257    LoadCurrentMethod(temp);
4258    if (!invoke->IsRecursive()) {
4259      // temp = temp->dex_cache_resolved_methods_;
4260      __ LoadFromOffset(
4261          kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
4262      // temp = temp[index_in_cache]
4263      __ LoadFromOffset(
4264          kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
4265      // LR = temp[offset_of_quick_compiled_code]
4266      __ LoadFromOffset(kLoadWord, LR, temp,
4267                        mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4268                            kArmWordSize).Int32Value());
4269      // LR()
4270      __ blx(LR);
4271    } else {
4272      __ bl(GetFrameEntryLabel());
4273    }
4274  }
4275
4276  DCHECK(!IsLeafMethod());
4277}
4278
4279void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4280  // Nothing to do, this should be removed during prepare for register allocator.
4281  UNUSED(instruction);
4282  LOG(FATAL) << "Unreachable";
4283}
4284
4285void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4286  // Nothing to do, this should be removed during prepare for register allocator.
4287  UNUSED(instruction);
4288  LOG(FATAL) << "Unreachable";
4289}
4290
4291}  // namespace arm
4292}  // namespace art
4293