code_generator_arm.cc revision fd88f16100cceafbfde1b4f095f17e89444d6fa8
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "gc/accounting/card_table.h"
24#include "intrinsics.h"
25#include "intrinsics_arm.h"
26#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "thread.h"
29#include "utils/arm/assembler_arm.h"
30#include "utils/arm/managed_register_arm.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35
36namespace arm {
37
38static bool ExpectedPairLayout(Location location) {
39  // We expected this for both core and fpu register pairs.
40  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
41}
42
43static constexpr int kCurrentMethodStackOffset = 0;
44static constexpr Register kMethodRegisterArgument = R0;
45
46// We unconditionally allocate R5 to ensure we can do long operations
47// with baseline.
48static constexpr Register kCoreSavedRegisterForBaseline = R5;
49static constexpr Register kCoreCalleeSaves[] =
50    { R5, R6, R7, R8, R10, R11, PC };
51static constexpr SRegister kFpuCalleeSaves[] =
52    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
53
54// D31 cannot be split into two S registers, and the register allocator only works on
55// S registers. Therefore there is no need to block it.
56static constexpr DRegister DTMP = D31;
57
58#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
59#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
60
61class NullCheckSlowPathARM : public SlowPathCodeARM {
62 public:
63  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
64
65  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
66    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
67    __ Bind(GetEntryLabel());
68    arm_codegen->InvokeRuntime(
69        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
70  }
71
72 private:
73  HNullCheck* const instruction_;
74  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
75};
76
77class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
78 public:
79  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
80
81  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
82    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
83    __ Bind(GetEntryLabel());
84    arm_codegen->InvokeRuntime(
85        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
86  }
87
88 private:
89  HDivZeroCheck* const instruction_;
90  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
91};
92
93class SuspendCheckSlowPathARM : public SlowPathCodeARM {
94 public:
95  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
96      : instruction_(instruction), successor_(successor) {}
97
98  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
99    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
100    __ Bind(GetEntryLabel());
101    SaveLiveRegisters(codegen, instruction_->GetLocations());
102    arm_codegen->InvokeRuntime(
103        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
104    RestoreLiveRegisters(codegen, instruction_->GetLocations());
105    if (successor_ == nullptr) {
106      __ b(GetReturnLabel());
107    } else {
108      __ b(arm_codegen->GetLabelOf(successor_));
109    }
110  }
111
112  Label* GetReturnLabel() {
113    DCHECK(successor_ == nullptr);
114    return &return_label_;
115  }
116
117  HBasicBlock* GetSuccessor() const {
118    return successor_;
119  }
120
121 private:
122  HSuspendCheck* const instruction_;
123  // If not null, the block to branch to after the suspend check.
124  HBasicBlock* const successor_;
125
126  // If `successor_` is null, the label to branch to after the suspend check.
127  Label return_label_;
128
129  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
130};
131
132class BoundsCheckSlowPathARM : public SlowPathCodeARM {
133 public:
134  BoundsCheckSlowPathARM(HBoundsCheck* instruction,
135                         Location index_location,
136                         Location length_location)
137      : instruction_(instruction),
138        index_location_(index_location),
139        length_location_(length_location) {}
140
141  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
142    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
143    __ Bind(GetEntryLabel());
144    // We're moving two locations to locations that could overlap, so we need a parallel
145    // move resolver.
146    InvokeRuntimeCallingConvention calling_convention;
147    codegen->EmitParallelMoves(
148        index_location_,
149        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
150        Primitive::kPrimInt,
151        length_location_,
152        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
153        Primitive::kPrimInt);
154    arm_codegen->InvokeRuntime(
155        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
156  }
157
158 private:
159  HBoundsCheck* const instruction_;
160  const Location index_location_;
161  const Location length_location_;
162
163  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
164};
165
166class LoadClassSlowPathARM : public SlowPathCodeARM {
167 public:
168  LoadClassSlowPathARM(HLoadClass* cls,
169                       HInstruction* at,
170                       uint32_t dex_pc,
171                       bool do_clinit)
172      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
173    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
174  }
175
176  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
177    LocationSummary* locations = at_->GetLocations();
178
179    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
180    __ Bind(GetEntryLabel());
181    SaveLiveRegisters(codegen, locations);
182
183    InvokeRuntimeCallingConvention calling_convention;
184    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
185    int32_t entry_point_offset = do_clinit_
186        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
187        : QUICK_ENTRY_POINT(pInitializeType);
188    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
189
190    // Move the class to the desired location.
191    Location out = locations->Out();
192    if (out.IsValid()) {
193      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
194      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
195    }
196    RestoreLiveRegisters(codegen, locations);
197    __ b(GetExitLabel());
198  }
199
200 private:
201  // The class this slow path will load.
202  HLoadClass* const cls_;
203
204  // The instruction where this slow path is happening.
205  // (Might be the load class or an initialization check).
206  HInstruction* const at_;
207
208  // The dex PC of `at_`.
209  const uint32_t dex_pc_;
210
211  // Whether to initialize the class.
212  const bool do_clinit_;
213
214  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
215};
216
217class LoadStringSlowPathARM : public SlowPathCodeARM {
218 public:
219  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
220
221  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
222    LocationSummary* locations = instruction_->GetLocations();
223    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
224
225    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
226    __ Bind(GetEntryLabel());
227    SaveLiveRegisters(codegen, locations);
228
229    InvokeRuntimeCallingConvention calling_convention;
230    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
231    arm_codegen->InvokeRuntime(
232        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
233    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
234
235    RestoreLiveRegisters(codegen, locations);
236    __ b(GetExitLabel());
237  }
238
239 private:
240  HLoadString* const instruction_;
241
242  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
243};
244
245class TypeCheckSlowPathARM : public SlowPathCodeARM {
246 public:
247  TypeCheckSlowPathARM(HInstruction* instruction,
248                       Location class_to_check,
249                       Location object_class,
250                       uint32_t dex_pc)
251      : instruction_(instruction),
252        class_to_check_(class_to_check),
253        object_class_(object_class),
254        dex_pc_(dex_pc) {}
255
256  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
257    LocationSummary* locations = instruction_->GetLocations();
258    DCHECK(instruction_->IsCheckCast()
259           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
260
261    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
262    __ Bind(GetEntryLabel());
263    SaveLiveRegisters(codegen, locations);
264
265    // We're moving two locations to locations that could overlap, so we need a parallel
266    // move resolver.
267    InvokeRuntimeCallingConvention calling_convention;
268    codegen->EmitParallelMoves(
269        class_to_check_,
270        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
271        Primitive::kPrimNot,
272        object_class_,
273        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
274        Primitive::kPrimNot);
275
276    if (instruction_->IsInstanceOf()) {
277      arm_codegen->InvokeRuntime(
278          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
279      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
280    } else {
281      DCHECK(instruction_->IsCheckCast());
282      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
283    }
284
285    RestoreLiveRegisters(codegen, locations);
286    __ b(GetExitLabel());
287  }
288
289 private:
290  HInstruction* const instruction_;
291  const Location class_to_check_;
292  const Location object_class_;
293  uint32_t dex_pc_;
294
295  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
296};
297
298class DeoptimizationSlowPathARM : public SlowPathCodeARM {
299 public:
300  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
301    : instruction_(instruction) {}
302
303  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
304    __ Bind(GetEntryLabel());
305    SaveLiveRegisters(codegen, instruction_->GetLocations());
306    DCHECK(instruction_->IsDeoptimize());
307    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
308    uint32_t dex_pc = deoptimize->GetDexPc();
309    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
310    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
311  }
312
313 private:
314  HInstruction* const instruction_;
315  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
316};
317
318#undef __
319
320#undef __
321#define __ down_cast<ArmAssembler*>(GetAssembler())->
322
323inline Condition ARMCondition(IfCondition cond) {
324  switch (cond) {
325    case kCondEQ: return EQ;
326    case kCondNE: return NE;
327    case kCondLT: return LT;
328    case kCondLE: return LE;
329    case kCondGT: return GT;
330    case kCondGE: return GE;
331    default:
332      LOG(FATAL) << "Unknown if condition";
333  }
334  return EQ;        // Unreachable.
335}
336
337inline Condition ARMOppositeCondition(IfCondition cond) {
338  switch (cond) {
339    case kCondEQ: return NE;
340    case kCondNE: return EQ;
341    case kCondLT: return GE;
342    case kCondLE: return GT;
343    case kCondGT: return LE;
344    case kCondGE: return LT;
345    default:
346      LOG(FATAL) << "Unknown if condition";
347  }
348  return EQ;        // Unreachable.
349}
350
351void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
352  stream << Register(reg);
353}
354
355void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
356  stream << SRegister(reg);
357}
358
359size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
360  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
361  return kArmWordSize;
362}
363
364size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
365  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
366  return kArmWordSize;
367}
368
369size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
370  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
371  return kArmWordSize;
372}
373
374size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
375  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
376  return kArmWordSize;
377}
378
379CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
380                                   const ArmInstructionSetFeatures& isa_features,
381                                   const CompilerOptions& compiler_options)
382    : CodeGenerator(graph,
383                    kNumberOfCoreRegisters,
384                    kNumberOfSRegisters,
385                    kNumberOfRegisterPairs,
386                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
387                                        arraysize(kCoreCalleeSaves)),
388                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
389                                        arraysize(kFpuCalleeSaves)),
390                    compiler_options),
391      block_labels_(graph->GetArena(), 0),
392      location_builder_(graph, this),
393      instruction_visitor_(graph, this),
394      move_resolver_(graph->GetArena(), this),
395      assembler_(false /* can_relocate_branches */),
396      isa_features_(isa_features) {
397  // Save the PC register to mimic Quick.
398  AddAllocatedRegister(Location::RegisterLocation(PC));
399}
400
401Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
402  switch (type) {
403    case Primitive::kPrimLong: {
404      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
405      ArmManagedRegister pair =
406          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
407      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
408      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
409
410      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
411      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
412      UpdateBlockedPairRegisters();
413      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
414    }
415
416    case Primitive::kPrimByte:
417    case Primitive::kPrimBoolean:
418    case Primitive::kPrimChar:
419    case Primitive::kPrimShort:
420    case Primitive::kPrimInt:
421    case Primitive::kPrimNot: {
422      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
423      // Block all register pairs that contain `reg`.
424      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
425        ArmManagedRegister current =
426            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
427        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
428          blocked_register_pairs_[i] = true;
429        }
430      }
431      return Location::RegisterLocation(reg);
432    }
433
434    case Primitive::kPrimFloat: {
435      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
436      return Location::FpuRegisterLocation(reg);
437    }
438
439    case Primitive::kPrimDouble: {
440      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
441      DCHECK_EQ(reg % 2, 0);
442      return Location::FpuRegisterPairLocation(reg, reg + 1);
443    }
444
445    case Primitive::kPrimVoid:
446      LOG(FATAL) << "Unreachable type " << type;
447  }
448
449  return Location();
450}
451
452void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
453  // Don't allocate the dalvik style register pair passing.
454  blocked_register_pairs_[R1_R2] = true;
455
456  // Stack register, LR and PC are always reserved.
457  blocked_core_registers_[SP] = true;
458  blocked_core_registers_[LR] = true;
459  blocked_core_registers_[PC] = true;
460
461  // Reserve thread register.
462  blocked_core_registers_[TR] = true;
463
464  // Reserve temp register.
465  blocked_core_registers_[IP] = true;
466
467  if (is_baseline) {
468    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
469      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
470    }
471
472    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
473
474    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
475      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
476    }
477  }
478
479  UpdateBlockedPairRegisters();
480}
481
482void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
483  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
484    ArmManagedRegister current =
485        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
486    if (blocked_core_registers_[current.AsRegisterPairLow()]
487        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
488      blocked_register_pairs_[i] = true;
489    }
490  }
491}
492
493InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
494      : HGraphVisitor(graph),
495        assembler_(codegen->GetAssembler()),
496        codegen_(codegen) {}
497
498void CodeGeneratorARM::ComputeSpillMask() {
499  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
500  // Save one extra register for baseline. Note that on thumb2, there is no easy
501  // instruction to restore just the PC, so this actually helps both baseline
502  // and non-baseline to save and restore at least two registers at entry and exit.
503  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
504  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
505  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
506  // We use vpush and vpop for saving and restoring floating point registers, which take
507  // a SRegister and the number of registers to save/restore after that SRegister. We
508  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
509  // but in the range.
510  if (fpu_spill_mask_ != 0) {
511    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
512    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
513    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
514      fpu_spill_mask_ |= (1 << i);
515    }
516  }
517}
518
519static dwarf::Reg DWARFReg(Register reg) {
520  return dwarf::Reg::ArmCore(static_cast<int>(reg));
521}
522
523static dwarf::Reg DWARFReg(SRegister reg) {
524  return dwarf::Reg::ArmFp(static_cast<int>(reg));
525}
526
527void CodeGeneratorARM::GenerateFrameEntry() {
528  bool skip_overflow_check =
529      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
530  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
531  __ Bind(&frame_entry_label_);
532
533  if (HasEmptyFrame()) {
534    return;
535  }
536
537  if (!skip_overflow_check) {
538    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
539    __ LoadFromOffset(kLoadWord, IP, IP, 0);
540    RecordPcInfo(nullptr, 0);
541  }
542
543  // PC is in the list of callee-save to mimic Quick, but we need to push
544  // LR at entry instead.
545  uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
546  __ PushList(push_mask);
547  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
548  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
549  if (fpu_spill_mask_ != 0) {
550    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
551    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
552    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
553    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
554  }
555  int adjust = GetFrameSize() - FrameEntrySpillSize();
556  __ AddConstant(SP, -adjust);
557  __ cfi().AdjustCFAOffset(adjust);
558  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
559}
560
561void CodeGeneratorARM::GenerateFrameExit() {
562  if (HasEmptyFrame()) {
563    __ bx(LR);
564    return;
565  }
566  __ cfi().RememberState();
567  int adjust = GetFrameSize() - FrameEntrySpillSize();
568  __ AddConstant(SP, adjust);
569  __ cfi().AdjustCFAOffset(-adjust);
570  if (fpu_spill_mask_ != 0) {
571    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
572    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
573    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
574    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
575  }
576  __ PopList(core_spill_mask_);
577  __ cfi().RestoreState();
578  __ cfi().DefCFAOffset(GetFrameSize());
579}
580
581void CodeGeneratorARM::Bind(HBasicBlock* block) {
582  __ Bind(GetLabelOf(block));
583}
584
585Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
586  switch (load->GetType()) {
587    case Primitive::kPrimLong:
588    case Primitive::kPrimDouble:
589      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
590
591    case Primitive::kPrimInt:
592    case Primitive::kPrimNot:
593    case Primitive::kPrimFloat:
594      return Location::StackSlot(GetStackSlot(load->GetLocal()));
595
596    case Primitive::kPrimBoolean:
597    case Primitive::kPrimByte:
598    case Primitive::kPrimChar:
599    case Primitive::kPrimShort:
600    case Primitive::kPrimVoid:
601      LOG(FATAL) << "Unexpected type " << load->GetType();
602      UNREACHABLE();
603  }
604
605  LOG(FATAL) << "Unreachable";
606  UNREACHABLE();
607}
608
609Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
610  switch (type) {
611    case Primitive::kPrimBoolean:
612    case Primitive::kPrimByte:
613    case Primitive::kPrimChar:
614    case Primitive::kPrimShort:
615    case Primitive::kPrimInt:
616    case Primitive::kPrimNot: {
617      uint32_t index = gp_index_++;
618      uint32_t stack_index = stack_index_++;
619      if (index < calling_convention.GetNumberOfRegisters()) {
620        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
621      } else {
622        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
623      }
624    }
625
626    case Primitive::kPrimLong: {
627      uint32_t index = gp_index_;
628      uint32_t stack_index = stack_index_;
629      gp_index_ += 2;
630      stack_index_ += 2;
631      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
632        if (calling_convention.GetRegisterAt(index) == R1) {
633          // Skip R1, and use R2_R3 instead.
634          gp_index_++;
635          index++;
636        }
637      }
638      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
639        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
640                  calling_convention.GetRegisterAt(index + 1));
641        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
642                                              calling_convention.GetRegisterAt(index + 1));
643      } else {
644        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
645      }
646    }
647
648    case Primitive::kPrimFloat: {
649      uint32_t stack_index = stack_index_++;
650      if (float_index_ % 2 == 0) {
651        float_index_ = std::max(double_index_, float_index_);
652      }
653      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
654        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
655      } else {
656        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
657      }
658    }
659
660    case Primitive::kPrimDouble: {
661      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
662      uint32_t stack_index = stack_index_;
663      stack_index_ += 2;
664      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
665        uint32_t index = double_index_;
666        double_index_ += 2;
667        Location result = Location::FpuRegisterPairLocation(
668          calling_convention.GetFpuRegisterAt(index),
669          calling_convention.GetFpuRegisterAt(index + 1));
670        DCHECK(ExpectedPairLayout(result));
671        return result;
672      } else {
673        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
674      }
675    }
676
677    case Primitive::kPrimVoid:
678      LOG(FATAL) << "Unexpected parameter type " << type;
679      break;
680  }
681  return Location();
682}
683
684Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
685  switch (type) {
686    case Primitive::kPrimBoolean:
687    case Primitive::kPrimByte:
688    case Primitive::kPrimChar:
689    case Primitive::kPrimShort:
690    case Primitive::kPrimInt:
691    case Primitive::kPrimNot: {
692      return Location::RegisterLocation(R0);
693    }
694
695    case Primitive::kPrimFloat: {
696      return Location::FpuRegisterLocation(S0);
697    }
698
699    case Primitive::kPrimLong: {
700      return Location::RegisterPairLocation(R0, R1);
701    }
702
703    case Primitive::kPrimDouble: {
704      return Location::FpuRegisterPairLocation(S0, S1);
705    }
706
707    case Primitive::kPrimVoid:
708      return Location();
709  }
710  UNREACHABLE();
711}
712
713Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
714  return Location::RegisterLocation(kMethodRegisterArgument);
715}
716
717void CodeGeneratorARM::Move32(Location destination, Location source) {
718  if (source.Equals(destination)) {
719    return;
720  }
721  if (destination.IsRegister()) {
722    if (source.IsRegister()) {
723      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
724    } else if (source.IsFpuRegister()) {
725      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
726    } else {
727      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
728    }
729  } else if (destination.IsFpuRegister()) {
730    if (source.IsRegister()) {
731      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
732    } else if (source.IsFpuRegister()) {
733      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
734    } else {
735      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
736    }
737  } else {
738    DCHECK(destination.IsStackSlot()) << destination;
739    if (source.IsRegister()) {
740      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
741    } else if (source.IsFpuRegister()) {
742      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
743    } else {
744      DCHECK(source.IsStackSlot()) << source;
745      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
746      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
747    }
748  }
749}
750
751void CodeGeneratorARM::Move64(Location destination, Location source) {
752  if (source.Equals(destination)) {
753    return;
754  }
755  if (destination.IsRegisterPair()) {
756    if (source.IsRegisterPair()) {
757      EmitParallelMoves(
758          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
759          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
760          Primitive::kPrimInt,
761          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
762          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
763          Primitive::kPrimInt);
764    } else if (source.IsFpuRegister()) {
765      UNIMPLEMENTED(FATAL);
766    } else {
767      DCHECK(source.IsDoubleStackSlot());
768      DCHECK(ExpectedPairLayout(destination));
769      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
770                        SP, source.GetStackIndex());
771    }
772  } else if (destination.IsFpuRegisterPair()) {
773    if (source.IsDoubleStackSlot()) {
774      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
775                         SP,
776                         source.GetStackIndex());
777    } else {
778      UNIMPLEMENTED(FATAL);
779    }
780  } else {
781    DCHECK(destination.IsDoubleStackSlot());
782    if (source.IsRegisterPair()) {
783      // No conflict possible, so just do the moves.
784      if (source.AsRegisterPairLow<Register>() == R1) {
785        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
786        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
787        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
788      } else {
789        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
790                         SP, destination.GetStackIndex());
791      }
792    } else if (source.IsFpuRegisterPair()) {
793      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
794                        SP,
795                        destination.GetStackIndex());
796    } else {
797      DCHECK(source.IsDoubleStackSlot());
798      EmitParallelMoves(
799          Location::StackSlot(source.GetStackIndex()),
800          Location::StackSlot(destination.GetStackIndex()),
801          Primitive::kPrimInt,
802          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
803          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
804          Primitive::kPrimInt);
805    }
806  }
807}
808
809void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
810  LocationSummary* locations = instruction->GetLocations();
811  if (instruction->IsCurrentMethod()) {
812    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
813  } else if (locations != nullptr && locations->Out().Equals(location)) {
814    return;
815  } else if (locations != nullptr && locations->Out().IsConstant()) {
816    HConstant* const_to_move = locations->Out().GetConstant();
817    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
818      int32_t value = GetInt32ValueOf(const_to_move);
819      if (location.IsRegister()) {
820        __ LoadImmediate(location.AsRegister<Register>(), value);
821      } else {
822        DCHECK(location.IsStackSlot());
823        __ LoadImmediate(IP, value);
824        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
825      }
826    } else {
827      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
828      int64_t value = const_to_move->AsLongConstant()->GetValue();
829      if (location.IsRegisterPair()) {
830        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
831        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
832      } else {
833        DCHECK(location.IsDoubleStackSlot());
834        __ LoadImmediate(IP, Low32Bits(value));
835        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
836        __ LoadImmediate(IP, High32Bits(value));
837        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
838      }
839    }
840  } else if (instruction->IsLoadLocal()) {
841    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
842    switch (instruction->GetType()) {
843      case Primitive::kPrimBoolean:
844      case Primitive::kPrimByte:
845      case Primitive::kPrimChar:
846      case Primitive::kPrimShort:
847      case Primitive::kPrimInt:
848      case Primitive::kPrimNot:
849      case Primitive::kPrimFloat:
850        Move32(location, Location::StackSlot(stack_slot));
851        break;
852
853      case Primitive::kPrimLong:
854      case Primitive::kPrimDouble:
855        Move64(location, Location::DoubleStackSlot(stack_slot));
856        break;
857
858      default:
859        LOG(FATAL) << "Unexpected type " << instruction->GetType();
860    }
861  } else if (instruction->IsTemporary()) {
862    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
863    if (temp_location.IsStackSlot()) {
864      Move32(location, temp_location);
865    } else {
866      DCHECK(temp_location.IsDoubleStackSlot());
867      Move64(location, temp_location);
868    }
869  } else {
870    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
871    switch (instruction->GetType()) {
872      case Primitive::kPrimBoolean:
873      case Primitive::kPrimByte:
874      case Primitive::kPrimChar:
875      case Primitive::kPrimShort:
876      case Primitive::kPrimNot:
877      case Primitive::kPrimInt:
878      case Primitive::kPrimFloat:
879        Move32(location, locations->Out());
880        break;
881
882      case Primitive::kPrimLong:
883      case Primitive::kPrimDouble:
884        Move64(location, locations->Out());
885        break;
886
887      default:
888        LOG(FATAL) << "Unexpected type " << instruction->GetType();
889    }
890  }
891}
892
893void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
894                                     HInstruction* instruction,
895                                     uint32_t dex_pc,
896                                     SlowPathCode* slow_path) {
897  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
898  __ blx(LR);
899  RecordPcInfo(instruction, dex_pc, slow_path);
900  DCHECK(instruction->IsSuspendCheck()
901      || instruction->IsBoundsCheck()
902      || instruction->IsNullCheck()
903      || instruction->IsDivZeroCheck()
904      || instruction->GetLocations()->CanCall()
905      || !IsLeafMethod());
906}
907
908void LocationsBuilderARM::VisitGoto(HGoto* got) {
909  got->SetLocations(nullptr);
910}
911
912void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
913  HBasicBlock* successor = got->GetSuccessor();
914  DCHECK(!successor->IsExitBlock());
915
916  HBasicBlock* block = got->GetBlock();
917  HInstruction* previous = got->GetPrevious();
918
919  HLoopInformation* info = block->GetLoopInformation();
920  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
921    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
922    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
923    return;
924  }
925
926  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
927    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
928  }
929  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
930    __ b(codegen_->GetLabelOf(successor));
931  }
932}
933
934void LocationsBuilderARM::VisitExit(HExit* exit) {
935  exit->SetLocations(nullptr);
936}
937
938void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
939  UNUSED(exit);
940}
941
942void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
943                                                        Label* true_target,
944                                                        Label* false_target,
945                                                        Label* always_true_target) {
946  HInstruction* cond = instruction->InputAt(0);
947  if (cond->IsIntConstant()) {
948    // Constant condition, statically compared against 1.
949    int32_t cond_value = cond->AsIntConstant()->GetValue();
950    if (cond_value == 1) {
951      if (always_true_target != nullptr) {
952        __ b(always_true_target);
953      }
954      return;
955    } else {
956      DCHECK_EQ(cond_value, 0);
957    }
958  } else {
959    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
960      // Condition has been materialized, compare the output to 0
961      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
962      __ cmp(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
963             ShifterOperand(0));
964      __ b(true_target, NE);
965    } else {
966      // Condition has not been materialized, use its inputs as the
967      // comparison and its condition as the branch condition.
968      LocationSummary* locations = cond->GetLocations();
969      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
970      Register left = locations->InAt(0).AsRegister<Register>();
971      if (locations->InAt(1).IsRegister()) {
972        __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
973      } else {
974        DCHECK(locations->InAt(1).IsConstant());
975        HConstant* constant = locations->InAt(1).GetConstant();
976        int32_t value = CodeGenerator::GetInt32ValueOf(constant);
977        ShifterOperand operand;
978        if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
979          __ cmp(left, operand);
980        } else {
981          Register temp = IP;
982          __ LoadImmediate(temp, value);
983          __ cmp(left, ShifterOperand(temp));
984        }
985      }
986      __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
987    }
988  }
989  if (false_target != nullptr) {
990    __ b(false_target);
991  }
992}
993
994void LocationsBuilderARM::VisitIf(HIf* if_instr) {
995  LocationSummary* locations =
996      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
997  HInstruction* cond = if_instr->InputAt(0);
998  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
999    locations->SetInAt(0, Location::RequiresRegister());
1000  }
1001}
1002
1003void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1004  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1005  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1006  Label* always_true_target = true_target;
1007  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1008                                if_instr->IfTrueSuccessor())) {
1009    always_true_target = nullptr;
1010  }
1011  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1012                                if_instr->IfFalseSuccessor())) {
1013    false_target = nullptr;
1014  }
1015  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1016}
1017
1018void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1019  LocationSummary* locations = new (GetGraph()->GetArena())
1020      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1021  HInstruction* cond = deoptimize->InputAt(0);
1022  DCHECK(cond->IsCondition());
1023  if (cond->AsCondition()->NeedsMaterialization()) {
1024    locations->SetInAt(0, Location::RequiresRegister());
1025  }
1026}
1027
1028void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1029  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
1030      DeoptimizationSlowPathARM(deoptimize);
1031  codegen_->AddSlowPath(slow_path);
1032  Label* slow_path_entry = slow_path->GetEntryLabel();
1033  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1034}
1035
1036void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1037  LocationSummary* locations =
1038      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1039  locations->SetInAt(0, Location::RequiresRegister());
1040  locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1041  if (cond->NeedsMaterialization()) {
1042    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1043  }
1044}
1045
1046void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1047  if (!cond->NeedsMaterialization()) return;
1048  LocationSummary* locations = cond->GetLocations();
1049  Register left = locations->InAt(0).AsRegister<Register>();
1050
1051  if (locations->InAt(1).IsRegister()) {
1052    __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1053  } else {
1054    DCHECK(locations->InAt(1).IsConstant());
1055    int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
1056    ShifterOperand operand;
1057    if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1058      __ cmp(left, operand);
1059    } else {
1060      Register temp = IP;
1061      __ LoadImmediate(temp, value);
1062      __ cmp(left, ShifterOperand(temp));
1063    }
1064  }
1065  __ it(ARMCondition(cond->GetCondition()), kItElse);
1066  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1067         ARMCondition(cond->GetCondition()));
1068  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1069         ARMOppositeCondition(cond->GetCondition()));
1070}
1071
1072void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1073  VisitCondition(comp);
1074}
1075
1076void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1077  VisitCondition(comp);
1078}
1079
1080void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1081  VisitCondition(comp);
1082}
1083
1084void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1085  VisitCondition(comp);
1086}
1087
1088void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1089  VisitCondition(comp);
1090}
1091
1092void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1093  VisitCondition(comp);
1094}
1095
1096void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1097  VisitCondition(comp);
1098}
1099
1100void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1101  VisitCondition(comp);
1102}
1103
1104void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1105  VisitCondition(comp);
1106}
1107
1108void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1109  VisitCondition(comp);
1110}
1111
1112void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1113  VisitCondition(comp);
1114}
1115
1116void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1117  VisitCondition(comp);
1118}
1119
1120void LocationsBuilderARM::VisitLocal(HLocal* local) {
1121  local->SetLocations(nullptr);
1122}
1123
1124void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1125  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1126}
1127
1128void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1129  load->SetLocations(nullptr);
1130}
1131
1132void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1133  // Nothing to do, this is driven by the code generator.
1134  UNUSED(load);
1135}
1136
1137void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1138  LocationSummary* locations =
1139      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1140  switch (store->InputAt(1)->GetType()) {
1141    case Primitive::kPrimBoolean:
1142    case Primitive::kPrimByte:
1143    case Primitive::kPrimChar:
1144    case Primitive::kPrimShort:
1145    case Primitive::kPrimInt:
1146    case Primitive::kPrimNot:
1147    case Primitive::kPrimFloat:
1148      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1149      break;
1150
1151    case Primitive::kPrimLong:
1152    case Primitive::kPrimDouble:
1153      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1154      break;
1155
1156    default:
1157      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1158  }
1159}
1160
1161void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1162  UNUSED(store);
1163}
1164
1165void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1166  LocationSummary* locations =
1167      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1168  locations->SetOut(Location::ConstantLocation(constant));
1169}
1170
1171void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1172  // Will be generated at use site.
1173  UNUSED(constant);
1174}
1175
1176void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1177  LocationSummary* locations =
1178      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1179  locations->SetOut(Location::ConstantLocation(constant));
1180}
1181
1182void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1183  // Will be generated at use site.
1184  UNUSED(constant);
1185}
1186
1187void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1188  LocationSummary* locations =
1189      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1190  locations->SetOut(Location::ConstantLocation(constant));
1191}
1192
1193void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1194  // Will be generated at use site.
1195  UNUSED(constant);
1196}
1197
1198void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1199  LocationSummary* locations =
1200      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1201  locations->SetOut(Location::ConstantLocation(constant));
1202}
1203
1204void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1205  // Will be generated at use site.
1206  UNUSED(constant);
1207}
1208
1209void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1210  LocationSummary* locations =
1211      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1212  locations->SetOut(Location::ConstantLocation(constant));
1213}
1214
1215void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1216  // Will be generated at use site.
1217  UNUSED(constant);
1218}
1219
1220void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1221  memory_barrier->SetLocations(nullptr);
1222}
1223
1224void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1225  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1226}
1227
1228void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1229  ret->SetLocations(nullptr);
1230}
1231
1232void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1233  UNUSED(ret);
1234  codegen_->GenerateFrameExit();
1235}
1236
1237void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1238  LocationSummary* locations =
1239      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1240  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1241}
1242
1243void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1244  UNUSED(ret);
1245  codegen_->GenerateFrameExit();
1246}
1247
1248void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1249  // When we do not run baseline, explicit clinit checks triggered by static
1250  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1251  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1252
1253  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1254                                         codegen_->GetInstructionSetFeatures());
1255  if (intrinsic.TryDispatch(invoke)) {
1256    return;
1257  }
1258
1259  HandleInvoke(invoke);
1260}
1261
1262void CodeGeneratorARM::LoadCurrentMethod(Register reg) {
1263  DCHECK(RequiresCurrentMethod());
1264  __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
1265}
1266
1267static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1268  if (invoke->GetLocations()->Intrinsified()) {
1269    IntrinsicCodeGeneratorARM intrinsic(codegen);
1270    intrinsic.Dispatch(invoke);
1271    return true;
1272  }
1273  return false;
1274}
1275
1276void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1277  // When we do not run baseline, explicit clinit checks triggered by static
1278  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1279  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1280
1281  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1282    return;
1283  }
1284
1285  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1286
1287  codegen_->GenerateStaticOrDirectCall(invoke, temp);
1288  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1289}
1290
1291void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1292  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1293  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
1294}
1295
1296void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1297  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1298                                         codegen_->GetInstructionSetFeatures());
1299  if (intrinsic.TryDispatch(invoke)) {
1300    return;
1301  }
1302
1303  HandleInvoke(invoke);
1304}
1305
1306void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1307  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1308    return;
1309  }
1310
1311  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1312  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
1313      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
1314  LocationSummary* locations = invoke->GetLocations();
1315  Location receiver = locations->InAt(0);
1316  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1317  // temp = object->GetClass();
1318  if (receiver.IsStackSlot()) {
1319    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1320    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1321  } else {
1322    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1323  }
1324  codegen_->MaybeRecordImplicitNullCheck(invoke);
1325  // temp = temp->GetMethodAt(method_offset);
1326  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1327      kArmWordSize).Int32Value();
1328  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1329  // LR = temp->GetEntryPoint();
1330  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1331  // LR();
1332  __ blx(LR);
1333  DCHECK(!codegen_->IsLeafMethod());
1334  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1335}
1336
1337void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1338  HandleInvoke(invoke);
1339  // Add the hidden argument.
1340  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1341}
1342
1343void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1344  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1345  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1346  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1347      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1348  LocationSummary* locations = invoke->GetLocations();
1349  Location receiver = locations->InAt(0);
1350  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1351
1352  // Set the hidden argument.
1353  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1354                   invoke->GetDexMethodIndex());
1355
1356  // temp = object->GetClass();
1357  if (receiver.IsStackSlot()) {
1358    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1359    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1360  } else {
1361    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1362  }
1363  codegen_->MaybeRecordImplicitNullCheck(invoke);
1364  // temp = temp->GetImtEntryAt(method_offset);
1365  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1366      kArmWordSize).Int32Value();
1367  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1368  // LR = temp->GetEntryPoint();
1369  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1370  // LR();
1371  __ blx(LR);
1372  DCHECK(!codegen_->IsLeafMethod());
1373  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1374}
1375
1376void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1377  LocationSummary* locations =
1378      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1379  switch (neg->GetResultType()) {
1380    case Primitive::kPrimInt: {
1381      locations->SetInAt(0, Location::RequiresRegister());
1382      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1383      break;
1384    }
1385    case Primitive::kPrimLong: {
1386      locations->SetInAt(0, Location::RequiresRegister());
1387      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1388      break;
1389    }
1390
1391    case Primitive::kPrimFloat:
1392    case Primitive::kPrimDouble:
1393      locations->SetInAt(0, Location::RequiresFpuRegister());
1394      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1395      break;
1396
1397    default:
1398      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1399  }
1400}
1401
1402void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1403  LocationSummary* locations = neg->GetLocations();
1404  Location out = locations->Out();
1405  Location in = locations->InAt(0);
1406  switch (neg->GetResultType()) {
1407    case Primitive::kPrimInt:
1408      DCHECK(in.IsRegister());
1409      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1410      break;
1411
1412    case Primitive::kPrimLong:
1413      DCHECK(in.IsRegisterPair());
1414      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1415      __ rsbs(out.AsRegisterPairLow<Register>(),
1416              in.AsRegisterPairLow<Register>(),
1417              ShifterOperand(0));
1418      // We cannot emit an RSC (Reverse Subtract with Carry)
1419      // instruction here, as it does not exist in the Thumb-2
1420      // instruction set.  We use the following approach
1421      // using SBC and SUB instead.
1422      //
1423      // out.hi = -C
1424      __ sbc(out.AsRegisterPairHigh<Register>(),
1425             out.AsRegisterPairHigh<Register>(),
1426             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1427      // out.hi = out.hi - in.hi
1428      __ sub(out.AsRegisterPairHigh<Register>(),
1429             out.AsRegisterPairHigh<Register>(),
1430             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1431      break;
1432
1433    case Primitive::kPrimFloat:
1434      DCHECK(in.IsFpuRegister());
1435      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1436      break;
1437
1438    case Primitive::kPrimDouble:
1439      DCHECK(in.IsFpuRegisterPair());
1440      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1441               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1442      break;
1443
1444    default:
1445      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1446  }
1447}
1448
1449void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1450  Primitive::Type result_type = conversion->GetResultType();
1451  Primitive::Type input_type = conversion->GetInputType();
1452  DCHECK_NE(result_type, input_type);
1453
1454  // The float-to-long and double-to-long type conversions rely on a
1455  // call to the runtime.
1456  LocationSummary::CallKind call_kind =
1457      ((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1458       && result_type == Primitive::kPrimLong)
1459      ? LocationSummary::kCall
1460      : LocationSummary::kNoCall;
1461  LocationSummary* locations =
1462      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1463
1464  // The Java language does not allow treating boolean as an integral type but
1465  // our bit representation makes it safe.
1466
1467  switch (result_type) {
1468    case Primitive::kPrimByte:
1469      switch (input_type) {
1470        case Primitive::kPrimBoolean:
1471          // Boolean input is a result of code transformations.
1472        case Primitive::kPrimShort:
1473        case Primitive::kPrimInt:
1474        case Primitive::kPrimChar:
1475          // Processing a Dex `int-to-byte' instruction.
1476          locations->SetInAt(0, Location::RequiresRegister());
1477          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1478          break;
1479
1480        default:
1481          LOG(FATAL) << "Unexpected type conversion from " << input_type
1482                     << " to " << result_type;
1483      }
1484      break;
1485
1486    case Primitive::kPrimShort:
1487      switch (input_type) {
1488        case Primitive::kPrimBoolean:
1489          // Boolean input is a result of code transformations.
1490        case Primitive::kPrimByte:
1491        case Primitive::kPrimInt:
1492        case Primitive::kPrimChar:
1493          // Processing a Dex `int-to-short' instruction.
1494          locations->SetInAt(0, Location::RequiresRegister());
1495          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1496          break;
1497
1498        default:
1499          LOG(FATAL) << "Unexpected type conversion from " << input_type
1500                     << " to " << result_type;
1501      }
1502      break;
1503
1504    case Primitive::kPrimInt:
1505      switch (input_type) {
1506        case Primitive::kPrimLong:
1507          // Processing a Dex `long-to-int' instruction.
1508          locations->SetInAt(0, Location::Any());
1509          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1510          break;
1511
1512        case Primitive::kPrimFloat:
1513          // Processing a Dex `float-to-int' instruction.
1514          locations->SetInAt(0, Location::RequiresFpuRegister());
1515          locations->SetOut(Location::RequiresRegister());
1516          locations->AddTemp(Location::RequiresFpuRegister());
1517          break;
1518
1519        case Primitive::kPrimDouble:
1520          // Processing a Dex `double-to-int' instruction.
1521          locations->SetInAt(0, Location::RequiresFpuRegister());
1522          locations->SetOut(Location::RequiresRegister());
1523          locations->AddTemp(Location::RequiresFpuRegister());
1524          break;
1525
1526        default:
1527          LOG(FATAL) << "Unexpected type conversion from " << input_type
1528                     << " to " << result_type;
1529      }
1530      break;
1531
1532    case Primitive::kPrimLong:
1533      switch (input_type) {
1534        case Primitive::kPrimBoolean:
1535          // Boolean input is a result of code transformations.
1536        case Primitive::kPrimByte:
1537        case Primitive::kPrimShort:
1538        case Primitive::kPrimInt:
1539        case Primitive::kPrimChar:
1540          // Processing a Dex `int-to-long' instruction.
1541          locations->SetInAt(0, Location::RequiresRegister());
1542          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1543          break;
1544
1545        case Primitive::kPrimFloat: {
1546          // Processing a Dex `float-to-long' instruction.
1547          InvokeRuntimeCallingConvention calling_convention;
1548          locations->SetInAt(0, Location::FpuRegisterLocation(
1549              calling_convention.GetFpuRegisterAt(0)));
1550          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1551          break;
1552        }
1553
1554        case Primitive::kPrimDouble: {
1555          // Processing a Dex `double-to-long' instruction.
1556          InvokeRuntimeCallingConvention calling_convention;
1557          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1558              calling_convention.GetFpuRegisterAt(0),
1559              calling_convention.GetFpuRegisterAt(1)));
1560          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1561          break;
1562        }
1563
1564        default:
1565          LOG(FATAL) << "Unexpected type conversion from " << input_type
1566                     << " to " << result_type;
1567      }
1568      break;
1569
1570    case Primitive::kPrimChar:
1571      switch (input_type) {
1572        case Primitive::kPrimBoolean:
1573          // Boolean input is a result of code transformations.
1574        case Primitive::kPrimByte:
1575        case Primitive::kPrimShort:
1576        case Primitive::kPrimInt:
1577          // Processing a Dex `int-to-char' instruction.
1578          locations->SetInAt(0, Location::RequiresRegister());
1579          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1580          break;
1581
1582        default:
1583          LOG(FATAL) << "Unexpected type conversion from " << input_type
1584                     << " to " << result_type;
1585      }
1586      break;
1587
1588    case Primitive::kPrimFloat:
1589      switch (input_type) {
1590        case Primitive::kPrimBoolean:
1591          // Boolean input is a result of code transformations.
1592        case Primitive::kPrimByte:
1593        case Primitive::kPrimShort:
1594        case Primitive::kPrimInt:
1595        case Primitive::kPrimChar:
1596          // Processing a Dex `int-to-float' instruction.
1597          locations->SetInAt(0, Location::RequiresRegister());
1598          locations->SetOut(Location::RequiresFpuRegister());
1599          break;
1600
1601        case Primitive::kPrimLong:
1602          // Processing a Dex `long-to-float' instruction.
1603          locations->SetInAt(0, Location::RequiresRegister());
1604          locations->SetOut(Location::RequiresFpuRegister());
1605          locations->AddTemp(Location::RequiresRegister());
1606          locations->AddTemp(Location::RequiresRegister());
1607          locations->AddTemp(Location::RequiresFpuRegister());
1608          locations->AddTemp(Location::RequiresFpuRegister());
1609          break;
1610
1611        case Primitive::kPrimDouble:
1612          // Processing a Dex `double-to-float' instruction.
1613          locations->SetInAt(0, Location::RequiresFpuRegister());
1614          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1615          break;
1616
1617        default:
1618          LOG(FATAL) << "Unexpected type conversion from " << input_type
1619                     << " to " << result_type;
1620      };
1621      break;
1622
1623    case Primitive::kPrimDouble:
1624      switch (input_type) {
1625        case Primitive::kPrimBoolean:
1626          // Boolean input is a result of code transformations.
1627        case Primitive::kPrimByte:
1628        case Primitive::kPrimShort:
1629        case Primitive::kPrimInt:
1630        case Primitive::kPrimChar:
1631          // Processing a Dex `int-to-double' instruction.
1632          locations->SetInAt(0, Location::RequiresRegister());
1633          locations->SetOut(Location::RequiresFpuRegister());
1634          break;
1635
1636        case Primitive::kPrimLong:
1637          // Processing a Dex `long-to-double' instruction.
1638          locations->SetInAt(0, Location::RequiresRegister());
1639          locations->SetOut(Location::RequiresFpuRegister());
1640          locations->AddTemp(Location::RequiresRegister());
1641          locations->AddTemp(Location::RequiresRegister());
1642          locations->AddTemp(Location::RequiresFpuRegister());
1643          break;
1644
1645        case Primitive::kPrimFloat:
1646          // Processing a Dex `float-to-double' instruction.
1647          locations->SetInAt(0, Location::RequiresFpuRegister());
1648          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1649          break;
1650
1651        default:
1652          LOG(FATAL) << "Unexpected type conversion from " << input_type
1653                     << " to " << result_type;
1654      };
1655      break;
1656
1657    default:
1658      LOG(FATAL) << "Unexpected type conversion from " << input_type
1659                 << " to " << result_type;
1660  }
1661}
1662
1663void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1664  LocationSummary* locations = conversion->GetLocations();
1665  Location out = locations->Out();
1666  Location in = locations->InAt(0);
1667  Primitive::Type result_type = conversion->GetResultType();
1668  Primitive::Type input_type = conversion->GetInputType();
1669  DCHECK_NE(result_type, input_type);
1670  switch (result_type) {
1671    case Primitive::kPrimByte:
1672      switch (input_type) {
1673        case Primitive::kPrimBoolean:
1674          // Boolean input is a result of code transformations.
1675        case Primitive::kPrimShort:
1676        case Primitive::kPrimInt:
1677        case Primitive::kPrimChar:
1678          // Processing a Dex `int-to-byte' instruction.
1679          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
1680          break;
1681
1682        default:
1683          LOG(FATAL) << "Unexpected type conversion from " << input_type
1684                     << " to " << result_type;
1685      }
1686      break;
1687
1688    case Primitive::kPrimShort:
1689      switch (input_type) {
1690        case Primitive::kPrimBoolean:
1691          // Boolean input is a result of code transformations.
1692        case Primitive::kPrimByte:
1693        case Primitive::kPrimInt:
1694        case Primitive::kPrimChar:
1695          // Processing a Dex `int-to-short' instruction.
1696          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1697          break;
1698
1699        default:
1700          LOG(FATAL) << "Unexpected type conversion from " << input_type
1701                     << " to " << result_type;
1702      }
1703      break;
1704
1705    case Primitive::kPrimInt:
1706      switch (input_type) {
1707        case Primitive::kPrimLong:
1708          // Processing a Dex `long-to-int' instruction.
1709          DCHECK(out.IsRegister());
1710          if (in.IsRegisterPair()) {
1711            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
1712          } else if (in.IsDoubleStackSlot()) {
1713            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
1714          } else {
1715            DCHECK(in.IsConstant());
1716            DCHECK(in.GetConstant()->IsLongConstant());
1717            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
1718            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
1719          }
1720          break;
1721
1722        case Primitive::kPrimFloat: {
1723          // Processing a Dex `float-to-int' instruction.
1724          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1725          __ vmovs(temp, in.AsFpuRegister<SRegister>());
1726          __ vcvtis(temp, temp);
1727          __ vmovrs(out.AsRegister<Register>(), temp);
1728          break;
1729        }
1730
1731        case Primitive::kPrimDouble: {
1732          // Processing a Dex `double-to-int' instruction.
1733          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1734          DRegister temp_d = FromLowSToD(temp_s);
1735          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1736          __ vcvtid(temp_s, temp_d);
1737          __ vmovrs(out.AsRegister<Register>(), temp_s);
1738          break;
1739        }
1740
1741        default:
1742          LOG(FATAL) << "Unexpected type conversion from " << input_type
1743                     << " to " << result_type;
1744      }
1745      break;
1746
1747    case Primitive::kPrimLong:
1748      switch (input_type) {
1749        case Primitive::kPrimBoolean:
1750          // Boolean input is a result of code transformations.
1751        case Primitive::kPrimByte:
1752        case Primitive::kPrimShort:
1753        case Primitive::kPrimInt:
1754        case Primitive::kPrimChar:
1755          // Processing a Dex `int-to-long' instruction.
1756          DCHECK(out.IsRegisterPair());
1757          DCHECK(in.IsRegister());
1758          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
1759          // Sign extension.
1760          __ Asr(out.AsRegisterPairHigh<Register>(),
1761                 out.AsRegisterPairLow<Register>(),
1762                 31);
1763          break;
1764
1765        case Primitive::kPrimFloat:
1766          // Processing a Dex `float-to-long' instruction.
1767          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
1768                                  conversion,
1769                                  conversion->GetDexPc(),
1770                                  nullptr);
1771          break;
1772
1773        case Primitive::kPrimDouble:
1774          // Processing a Dex `double-to-long' instruction.
1775          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
1776                                  conversion,
1777                                  conversion->GetDexPc(),
1778                                  nullptr);
1779          break;
1780
1781        default:
1782          LOG(FATAL) << "Unexpected type conversion from " << input_type
1783                     << " to " << result_type;
1784      }
1785      break;
1786
1787    case Primitive::kPrimChar:
1788      switch (input_type) {
1789        case Primitive::kPrimBoolean:
1790          // Boolean input is a result of code transformations.
1791        case Primitive::kPrimByte:
1792        case Primitive::kPrimShort:
1793        case Primitive::kPrimInt:
1794          // Processing a Dex `int-to-char' instruction.
1795          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1796          break;
1797
1798        default:
1799          LOG(FATAL) << "Unexpected type conversion from " << input_type
1800                     << " to " << result_type;
1801      }
1802      break;
1803
1804    case Primitive::kPrimFloat:
1805      switch (input_type) {
1806        case Primitive::kPrimBoolean:
1807          // Boolean input is a result of code transformations.
1808        case Primitive::kPrimByte:
1809        case Primitive::kPrimShort:
1810        case Primitive::kPrimInt:
1811        case Primitive::kPrimChar: {
1812          // Processing a Dex `int-to-float' instruction.
1813          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
1814          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
1815          break;
1816        }
1817
1818        case Primitive::kPrimLong: {
1819          // Processing a Dex `long-to-float' instruction.
1820          Register low = in.AsRegisterPairLow<Register>();
1821          Register high = in.AsRegisterPairHigh<Register>();
1822          SRegister output = out.AsFpuRegister<SRegister>();
1823          Register constant_low = locations->GetTemp(0).AsRegister<Register>();
1824          Register constant_high = locations->GetTemp(1).AsRegister<Register>();
1825          SRegister temp1_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
1826          DRegister temp1_d = FromLowSToD(temp1_s);
1827          SRegister temp2_s = locations->GetTemp(3).AsFpuRegisterPairLow<SRegister>();
1828          DRegister temp2_d = FromLowSToD(temp2_s);
1829
1830          // Operations use doubles for precision reasons (each 32-bit
1831          // half of a long fits in the 53-bit mantissa of a double,
1832          // but not in the 24-bit mantissa of a float).  This is
1833          // especially important for the low bits.  The result is
1834          // eventually converted to float.
1835
1836          // temp1_d = int-to-double(high)
1837          __ vmovsr(temp1_s, high);
1838          __ vcvtdi(temp1_d, temp1_s);
1839          // Using vmovd to load the `k2Pow32EncodingForDouble` constant
1840          // as an immediate value into `temp2_d` does not work, as
1841          // this instruction only transfers 8 significant bits of its
1842          // immediate operand.  Instead, use two 32-bit core
1843          // registers to load `k2Pow32EncodingForDouble` into
1844          // `temp2_d`.
1845          __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
1846          __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
1847          __ vmovdrr(temp2_d, constant_low, constant_high);
1848          // temp1_d = temp1_d * 2^32
1849          __ vmuld(temp1_d, temp1_d, temp2_d);
1850          // temp2_d = unsigned-to-double(low)
1851          __ vmovsr(temp2_s, low);
1852          __ vcvtdu(temp2_d, temp2_s);
1853          // temp1_d = temp1_d + temp2_d
1854          __ vaddd(temp1_d, temp1_d, temp2_d);
1855          // output = double-to-float(temp1_d);
1856          __ vcvtsd(output, temp1_d);
1857          break;
1858        }
1859
1860        case Primitive::kPrimDouble:
1861          // Processing a Dex `double-to-float' instruction.
1862          __ vcvtsd(out.AsFpuRegister<SRegister>(),
1863                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1864          break;
1865
1866        default:
1867          LOG(FATAL) << "Unexpected type conversion from " << input_type
1868                     << " to " << result_type;
1869      };
1870      break;
1871
1872    case Primitive::kPrimDouble:
1873      switch (input_type) {
1874        case Primitive::kPrimBoolean:
1875          // Boolean input is a result of code transformations.
1876        case Primitive::kPrimByte:
1877        case Primitive::kPrimShort:
1878        case Primitive::kPrimInt:
1879        case Primitive::kPrimChar: {
1880          // Processing a Dex `int-to-double' instruction.
1881          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
1882          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1883                    out.AsFpuRegisterPairLow<SRegister>());
1884          break;
1885        }
1886
1887        case Primitive::kPrimLong: {
1888          // Processing a Dex `long-to-double' instruction.
1889          Register low = in.AsRegisterPairLow<Register>();
1890          Register high = in.AsRegisterPairHigh<Register>();
1891          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
1892          DRegister out_d = FromLowSToD(out_s);
1893          Register constant_low = locations->GetTemp(0).AsRegister<Register>();
1894          Register constant_high = locations->GetTemp(1).AsRegister<Register>();
1895          SRegister temp_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
1896          DRegister temp_d = FromLowSToD(temp_s);
1897
1898          // out_d = int-to-double(high)
1899          __ vmovsr(out_s, high);
1900          __ vcvtdi(out_d, out_s);
1901          // Using vmovd to load the `k2Pow32EncodingForDouble` constant
1902          // as an immediate value into `temp_d` does not work, as
1903          // this instruction only transfers 8 significant bits of its
1904          // immediate operand.  Instead, use two 32-bit core
1905          // registers to load `k2Pow32EncodingForDouble` into `temp_d`.
1906          __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
1907          __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
1908          __ vmovdrr(temp_d, constant_low, constant_high);
1909          // out_d = out_d * 2^32
1910          __ vmuld(out_d, out_d, temp_d);
1911          // temp_d = unsigned-to-double(low)
1912          __ vmovsr(temp_s, low);
1913          __ vcvtdu(temp_d, temp_s);
1914          // out_d = out_d + temp_d
1915          __ vaddd(out_d, out_d, temp_d);
1916          break;
1917        }
1918
1919        case Primitive::kPrimFloat:
1920          // Processing a Dex `float-to-double' instruction.
1921          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1922                    in.AsFpuRegister<SRegister>());
1923          break;
1924
1925        default:
1926          LOG(FATAL) << "Unexpected type conversion from " << input_type
1927                     << " to " << result_type;
1928      };
1929      break;
1930
1931    default:
1932      LOG(FATAL) << "Unexpected type conversion from " << input_type
1933                 << " to " << result_type;
1934  }
1935}
1936
1937void LocationsBuilderARM::VisitAdd(HAdd* add) {
1938  LocationSummary* locations =
1939      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
1940  switch (add->GetResultType()) {
1941    case Primitive::kPrimInt: {
1942      locations->SetInAt(0, Location::RequiresRegister());
1943      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
1944      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1945      break;
1946    }
1947
1948    case Primitive::kPrimLong: {
1949      locations->SetInAt(0, Location::RequiresRegister());
1950      locations->SetInAt(1, Location::RequiresRegister());
1951      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1952      break;
1953    }
1954
1955    case Primitive::kPrimFloat:
1956    case Primitive::kPrimDouble: {
1957      locations->SetInAt(0, Location::RequiresFpuRegister());
1958      locations->SetInAt(1, Location::RequiresFpuRegister());
1959      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1960      break;
1961    }
1962
1963    default:
1964      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
1965  }
1966}
1967
1968void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
1969  LocationSummary* locations = add->GetLocations();
1970  Location out = locations->Out();
1971  Location first = locations->InAt(0);
1972  Location second = locations->InAt(1);
1973  switch (add->GetResultType()) {
1974    case Primitive::kPrimInt:
1975      if (second.IsRegister()) {
1976        __ add(out.AsRegister<Register>(),
1977               first.AsRegister<Register>(),
1978               ShifterOperand(second.AsRegister<Register>()));
1979      } else {
1980        __ AddConstant(out.AsRegister<Register>(),
1981                       first.AsRegister<Register>(),
1982                       second.GetConstant()->AsIntConstant()->GetValue());
1983      }
1984      break;
1985
1986    case Primitive::kPrimLong: {
1987      DCHECK(second.IsRegisterPair());
1988      __ adds(out.AsRegisterPairLow<Register>(),
1989              first.AsRegisterPairLow<Register>(),
1990              ShifterOperand(second.AsRegisterPairLow<Register>()));
1991      __ adc(out.AsRegisterPairHigh<Register>(),
1992             first.AsRegisterPairHigh<Register>(),
1993             ShifterOperand(second.AsRegisterPairHigh<Register>()));
1994      break;
1995    }
1996
1997    case Primitive::kPrimFloat:
1998      __ vadds(out.AsFpuRegister<SRegister>(),
1999               first.AsFpuRegister<SRegister>(),
2000               second.AsFpuRegister<SRegister>());
2001      break;
2002
2003    case Primitive::kPrimDouble:
2004      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2005               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2006               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2007      break;
2008
2009    default:
2010      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2011  }
2012}
2013
2014void LocationsBuilderARM::VisitSub(HSub* sub) {
2015  LocationSummary* locations =
2016      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2017  switch (sub->GetResultType()) {
2018    case Primitive::kPrimInt: {
2019      locations->SetInAt(0, Location::RequiresRegister());
2020      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2021      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2022      break;
2023    }
2024
2025    case Primitive::kPrimLong: {
2026      locations->SetInAt(0, Location::RequiresRegister());
2027      locations->SetInAt(1, Location::RequiresRegister());
2028      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2029      break;
2030    }
2031    case Primitive::kPrimFloat:
2032    case Primitive::kPrimDouble: {
2033      locations->SetInAt(0, Location::RequiresFpuRegister());
2034      locations->SetInAt(1, Location::RequiresFpuRegister());
2035      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2036      break;
2037    }
2038    default:
2039      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2040  }
2041}
2042
2043void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2044  LocationSummary* locations = sub->GetLocations();
2045  Location out = locations->Out();
2046  Location first = locations->InAt(0);
2047  Location second = locations->InAt(1);
2048  switch (sub->GetResultType()) {
2049    case Primitive::kPrimInt: {
2050      if (second.IsRegister()) {
2051        __ sub(out.AsRegister<Register>(),
2052               first.AsRegister<Register>(),
2053               ShifterOperand(second.AsRegister<Register>()));
2054      } else {
2055        __ AddConstant(out.AsRegister<Register>(),
2056                       first.AsRegister<Register>(),
2057                       -second.GetConstant()->AsIntConstant()->GetValue());
2058      }
2059      break;
2060    }
2061
2062    case Primitive::kPrimLong: {
2063      DCHECK(second.IsRegisterPair());
2064      __ subs(out.AsRegisterPairLow<Register>(),
2065              first.AsRegisterPairLow<Register>(),
2066              ShifterOperand(second.AsRegisterPairLow<Register>()));
2067      __ sbc(out.AsRegisterPairHigh<Register>(),
2068             first.AsRegisterPairHigh<Register>(),
2069             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2070      break;
2071    }
2072
2073    case Primitive::kPrimFloat: {
2074      __ vsubs(out.AsFpuRegister<SRegister>(),
2075               first.AsFpuRegister<SRegister>(),
2076               second.AsFpuRegister<SRegister>());
2077      break;
2078    }
2079
2080    case Primitive::kPrimDouble: {
2081      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2082               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2083               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2084      break;
2085    }
2086
2087
2088    default:
2089      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2090  }
2091}
2092
2093void LocationsBuilderARM::VisitMul(HMul* mul) {
2094  LocationSummary* locations =
2095      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2096  switch (mul->GetResultType()) {
2097    case Primitive::kPrimInt:
2098    case Primitive::kPrimLong:  {
2099      locations->SetInAt(0, Location::RequiresRegister());
2100      locations->SetInAt(1, Location::RequiresRegister());
2101      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2102      break;
2103    }
2104
2105    case Primitive::kPrimFloat:
2106    case Primitive::kPrimDouble: {
2107      locations->SetInAt(0, Location::RequiresFpuRegister());
2108      locations->SetInAt(1, Location::RequiresFpuRegister());
2109      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2110      break;
2111    }
2112
2113    default:
2114      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2115  }
2116}
2117
2118void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2119  LocationSummary* locations = mul->GetLocations();
2120  Location out = locations->Out();
2121  Location first = locations->InAt(0);
2122  Location second = locations->InAt(1);
2123  switch (mul->GetResultType()) {
2124    case Primitive::kPrimInt: {
2125      __ mul(out.AsRegister<Register>(),
2126             first.AsRegister<Register>(),
2127             second.AsRegister<Register>());
2128      break;
2129    }
2130    case Primitive::kPrimLong: {
2131      Register out_hi = out.AsRegisterPairHigh<Register>();
2132      Register out_lo = out.AsRegisterPairLow<Register>();
2133      Register in1_hi = first.AsRegisterPairHigh<Register>();
2134      Register in1_lo = first.AsRegisterPairLow<Register>();
2135      Register in2_hi = second.AsRegisterPairHigh<Register>();
2136      Register in2_lo = second.AsRegisterPairLow<Register>();
2137
2138      // Extra checks to protect caused by the existence of R1_R2.
2139      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2140      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2141      DCHECK_NE(out_hi, in1_lo);
2142      DCHECK_NE(out_hi, in2_lo);
2143
2144      // input: in1 - 64 bits, in2 - 64 bits
2145      // output: out
2146      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2147      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2148      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2149
2150      // IP <- in1.lo * in2.hi
2151      __ mul(IP, in1_lo, in2_hi);
2152      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2153      __ mla(out_hi, in1_hi, in2_lo, IP);
2154      // out.lo <- (in1.lo * in2.lo)[31:0];
2155      __ umull(out_lo, IP, in1_lo, in2_lo);
2156      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2157      __ add(out_hi, out_hi, ShifterOperand(IP));
2158      break;
2159    }
2160
2161    case Primitive::kPrimFloat: {
2162      __ vmuls(out.AsFpuRegister<SRegister>(),
2163               first.AsFpuRegister<SRegister>(),
2164               second.AsFpuRegister<SRegister>());
2165      break;
2166    }
2167
2168    case Primitive::kPrimDouble: {
2169      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2170               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2171               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2172      break;
2173    }
2174
2175    default:
2176      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2177  }
2178}
2179
2180void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2181  DCHECK(instruction->IsDiv() || instruction->IsRem());
2182  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2183
2184  LocationSummary* locations = instruction->GetLocations();
2185  Location second = locations->InAt(1);
2186  DCHECK(second.IsConstant());
2187
2188  Register out = locations->Out().AsRegister<Register>();
2189  Register dividend = locations->InAt(0).AsRegister<Register>();
2190  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2191  DCHECK(imm == 1 || imm == -1);
2192
2193  if (instruction->IsRem()) {
2194    __ LoadImmediate(out, 0);
2195  } else {
2196    if (imm == 1) {
2197      __ Mov(out, dividend);
2198    } else {
2199      __ rsb(out, dividend, ShifterOperand(0));
2200    }
2201  }
2202}
2203
2204void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2205  DCHECK(instruction->IsDiv() || instruction->IsRem());
2206  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2207
2208  LocationSummary* locations = instruction->GetLocations();
2209  Location second = locations->InAt(1);
2210  DCHECK(second.IsConstant());
2211
2212  Register out = locations->Out().AsRegister<Register>();
2213  Register dividend = locations->InAt(0).AsRegister<Register>();
2214  Register temp = locations->GetTemp(0).AsRegister<Register>();
2215  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2216  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2217  DCHECK(IsPowerOfTwo(abs_imm));
2218  int ctz_imm = CTZ(abs_imm);
2219
2220  if (ctz_imm == 1) {
2221    __ Lsr(temp, dividend, 32 - ctz_imm);
2222  } else {
2223    __ Asr(temp, dividend, 31);
2224    __ Lsr(temp, temp, 32 - ctz_imm);
2225  }
2226  __ add(out, temp, ShifterOperand(dividend));
2227
2228  if (instruction->IsDiv()) {
2229    __ Asr(out, out, ctz_imm);
2230    if (imm < 0) {
2231      __ rsb(out, out, ShifterOperand(0));
2232    }
2233  } else {
2234    __ ubfx(out, out, 0, ctz_imm);
2235    __ sub(out, out, ShifterOperand(temp));
2236  }
2237}
2238
2239void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2240  DCHECK(instruction->IsDiv() || instruction->IsRem());
2241  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2242
2243  LocationSummary* locations = instruction->GetLocations();
2244  Location second = locations->InAt(1);
2245  DCHECK(second.IsConstant());
2246
2247  Register out = locations->Out().AsRegister<Register>();
2248  Register dividend = locations->InAt(0).AsRegister<Register>();
2249  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2250  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2251  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2252
2253  int64_t magic;
2254  int shift;
2255  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2256
2257  __ LoadImmediate(temp1, magic);
2258  __ smull(temp2, temp1, dividend, temp1);
2259
2260  if (imm > 0 && magic < 0) {
2261    __ add(temp1, temp1, ShifterOperand(dividend));
2262  } else if (imm < 0 && magic > 0) {
2263    __ sub(temp1, temp1, ShifterOperand(dividend));
2264  }
2265
2266  if (shift != 0) {
2267    __ Asr(temp1, temp1, shift);
2268  }
2269
2270  if (instruction->IsDiv()) {
2271    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2272  } else {
2273    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2274    // TODO: Strength reduction for mls.
2275    __ LoadImmediate(temp2, imm);
2276    __ mls(out, temp1, temp2, dividend);
2277  }
2278}
2279
2280void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2281  DCHECK(instruction->IsDiv() || instruction->IsRem());
2282  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2283
2284  LocationSummary* locations = instruction->GetLocations();
2285  Location second = locations->InAt(1);
2286  DCHECK(second.IsConstant());
2287
2288  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2289  if (imm == 0) {
2290    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2291  } else if (imm == 1 || imm == -1) {
2292    DivRemOneOrMinusOne(instruction);
2293  } else if (IsPowerOfTwo(std::abs(imm))) {
2294    DivRemByPowerOfTwo(instruction);
2295  } else {
2296    DCHECK(imm <= -2 || imm >= 2);
2297    GenerateDivRemWithAnyConstant(instruction);
2298  }
2299}
2300
2301void LocationsBuilderARM::VisitDiv(HDiv* div) {
2302  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2303  if (div->GetResultType() == Primitive::kPrimLong) {
2304    // pLdiv runtime call.
2305    call_kind = LocationSummary::kCall;
2306  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2307    // sdiv will be replaced by other instruction sequence.
2308  } else if (div->GetResultType() == Primitive::kPrimInt &&
2309             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2310    // pIdivmod runtime call.
2311    call_kind = LocationSummary::kCall;
2312  }
2313
2314  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2315
2316  switch (div->GetResultType()) {
2317    case Primitive::kPrimInt: {
2318      if (div->InputAt(1)->IsConstant()) {
2319        locations->SetInAt(0, Location::RequiresRegister());
2320        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2321        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2322        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2323        if (abs_imm <= 1) {
2324          // No temp register required.
2325        } else {
2326          locations->AddTemp(Location::RequiresRegister());
2327          if (!IsPowerOfTwo(abs_imm)) {
2328            locations->AddTemp(Location::RequiresRegister());
2329          }
2330        }
2331      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2332        locations->SetInAt(0, Location::RequiresRegister());
2333        locations->SetInAt(1, Location::RequiresRegister());
2334        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2335      } else {
2336        InvokeRuntimeCallingConvention calling_convention;
2337        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2338        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2339        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2340        //       we only need the former.
2341        locations->SetOut(Location::RegisterLocation(R0));
2342      }
2343      break;
2344    }
2345    case Primitive::kPrimLong: {
2346      InvokeRuntimeCallingConvention calling_convention;
2347      locations->SetInAt(0, Location::RegisterPairLocation(
2348          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2349      locations->SetInAt(1, Location::RegisterPairLocation(
2350          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2351      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2352      break;
2353    }
2354    case Primitive::kPrimFloat:
2355    case Primitive::kPrimDouble: {
2356      locations->SetInAt(0, Location::RequiresFpuRegister());
2357      locations->SetInAt(1, Location::RequiresFpuRegister());
2358      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2359      break;
2360    }
2361
2362    default:
2363      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2364  }
2365}
2366
2367void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2368  LocationSummary* locations = div->GetLocations();
2369  Location out = locations->Out();
2370  Location first = locations->InAt(0);
2371  Location second = locations->InAt(1);
2372
2373  switch (div->GetResultType()) {
2374    case Primitive::kPrimInt: {
2375      if (second.IsConstant()) {
2376        GenerateDivRemConstantIntegral(div);
2377      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2378        __ sdiv(out.AsRegister<Register>(),
2379                first.AsRegister<Register>(),
2380                second.AsRegister<Register>());
2381      } else {
2382        InvokeRuntimeCallingConvention calling_convention;
2383        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2384        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2385        DCHECK_EQ(R0, out.AsRegister<Register>());
2386
2387        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2388      }
2389      break;
2390    }
2391
2392    case Primitive::kPrimLong: {
2393      InvokeRuntimeCallingConvention calling_convention;
2394      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2395      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2396      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2397      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2398      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2399      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2400
2401      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2402      break;
2403    }
2404
2405    case Primitive::kPrimFloat: {
2406      __ vdivs(out.AsFpuRegister<SRegister>(),
2407               first.AsFpuRegister<SRegister>(),
2408               second.AsFpuRegister<SRegister>());
2409      break;
2410    }
2411
2412    case Primitive::kPrimDouble: {
2413      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2414               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2415               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2416      break;
2417    }
2418
2419    default:
2420      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2421  }
2422}
2423
2424void LocationsBuilderARM::VisitRem(HRem* rem) {
2425  Primitive::Type type = rem->GetResultType();
2426
2427  // Most remainders are implemented in the runtime.
2428  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2429  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2430    // sdiv will be replaced by other instruction sequence.
2431    call_kind = LocationSummary::kNoCall;
2432  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2433             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2434    // Have hardware divide instruction for int, do it with three instructions.
2435    call_kind = LocationSummary::kNoCall;
2436  }
2437
2438  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2439
2440  switch (type) {
2441    case Primitive::kPrimInt: {
2442      if (rem->InputAt(1)->IsConstant()) {
2443        locations->SetInAt(0, Location::RequiresRegister());
2444        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2445        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2446        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2447        if (abs_imm <= 1) {
2448          // No temp register required.
2449        } else {
2450          locations->AddTemp(Location::RequiresRegister());
2451          if (!IsPowerOfTwo(abs_imm)) {
2452            locations->AddTemp(Location::RequiresRegister());
2453          }
2454        }
2455      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2456        locations->SetInAt(0, Location::RequiresRegister());
2457        locations->SetInAt(1, Location::RequiresRegister());
2458        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2459        locations->AddTemp(Location::RequiresRegister());
2460      } else {
2461        InvokeRuntimeCallingConvention calling_convention;
2462        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2463        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2464        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2465        //       we only need the latter.
2466        locations->SetOut(Location::RegisterLocation(R1));
2467      }
2468      break;
2469    }
2470    case Primitive::kPrimLong: {
2471      InvokeRuntimeCallingConvention calling_convention;
2472      locations->SetInAt(0, Location::RegisterPairLocation(
2473          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2474      locations->SetInAt(1, Location::RegisterPairLocation(
2475          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2476      // The runtime helper puts the output in R2,R3.
2477      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2478      break;
2479    }
2480    case Primitive::kPrimFloat: {
2481      InvokeRuntimeCallingConvention calling_convention;
2482      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2483      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2484      locations->SetOut(Location::FpuRegisterLocation(S0));
2485      break;
2486    }
2487
2488    case Primitive::kPrimDouble: {
2489      InvokeRuntimeCallingConvention calling_convention;
2490      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2491          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2492      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2493          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2494      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2495      break;
2496    }
2497
2498    default:
2499      LOG(FATAL) << "Unexpected rem type " << type;
2500  }
2501}
2502
2503void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2504  LocationSummary* locations = rem->GetLocations();
2505  Location out = locations->Out();
2506  Location first = locations->InAt(0);
2507  Location second = locations->InAt(1);
2508
2509  Primitive::Type type = rem->GetResultType();
2510  switch (type) {
2511    case Primitive::kPrimInt: {
2512        if (second.IsConstant()) {
2513          GenerateDivRemConstantIntegral(rem);
2514        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2515        Register reg1 = first.AsRegister<Register>();
2516        Register reg2 = second.AsRegister<Register>();
2517        Register temp = locations->GetTemp(0).AsRegister<Register>();
2518
2519        // temp = reg1 / reg2  (integer division)
2520        // temp = temp * reg2
2521        // dest = reg1 - temp
2522        __ sdiv(temp, reg1, reg2);
2523        __ mul(temp, temp, reg2);
2524        __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
2525      } else {
2526        InvokeRuntimeCallingConvention calling_convention;
2527        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2528        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2529        DCHECK_EQ(R1, out.AsRegister<Register>());
2530
2531        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2532      }
2533      break;
2534    }
2535
2536    case Primitive::kPrimLong: {
2537      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2538      break;
2539    }
2540
2541    case Primitive::kPrimFloat: {
2542      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2543      break;
2544    }
2545
2546    case Primitive::kPrimDouble: {
2547      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2548      break;
2549    }
2550
2551    default:
2552      LOG(FATAL) << "Unexpected rem type " << type;
2553  }
2554}
2555
2556void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2557  LocationSummary* locations =
2558      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2559  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2560  if (instruction->HasUses()) {
2561    locations->SetOut(Location::SameAsFirstInput());
2562  }
2563}
2564
2565void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2566  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2567  codegen_->AddSlowPath(slow_path);
2568
2569  LocationSummary* locations = instruction->GetLocations();
2570  Location value = locations->InAt(0);
2571
2572  switch (instruction->GetType()) {
2573    case Primitive::kPrimInt: {
2574      if (value.IsRegister()) {
2575        __ cmp(value.AsRegister<Register>(), ShifterOperand(0));
2576        __ b(slow_path->GetEntryLabel(), EQ);
2577      } else {
2578        DCHECK(value.IsConstant()) << value;
2579        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2580          __ b(slow_path->GetEntryLabel());
2581        }
2582      }
2583      break;
2584    }
2585    case Primitive::kPrimLong: {
2586      if (value.IsRegisterPair()) {
2587        __ orrs(IP,
2588                value.AsRegisterPairLow<Register>(),
2589                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2590        __ b(slow_path->GetEntryLabel(), EQ);
2591      } else {
2592        DCHECK(value.IsConstant()) << value;
2593        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2594          __ b(slow_path->GetEntryLabel());
2595        }
2596      }
2597      break;
2598    default:
2599      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2600    }
2601  }
2602}
2603
2604void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2605  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2606
2607  LocationSummary* locations =
2608      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2609
2610  switch (op->GetResultType()) {
2611    case Primitive::kPrimInt: {
2612      locations->SetInAt(0, Location::RequiresRegister());
2613      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2614      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2615      break;
2616    }
2617    case Primitive::kPrimLong: {
2618      locations->SetInAt(0, Location::RequiresRegister());
2619      locations->SetInAt(1, Location::RequiresRegister());
2620      locations->AddTemp(Location::RequiresRegister());
2621      locations->SetOut(Location::RequiresRegister());
2622      break;
2623    }
2624    default:
2625      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2626  }
2627}
2628
2629void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2630  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2631
2632  LocationSummary* locations = op->GetLocations();
2633  Location out = locations->Out();
2634  Location first = locations->InAt(0);
2635  Location second = locations->InAt(1);
2636
2637  Primitive::Type type = op->GetResultType();
2638  switch (type) {
2639    case Primitive::kPrimInt: {
2640      Register out_reg = out.AsRegister<Register>();
2641      Register first_reg = first.AsRegister<Register>();
2642      // Arm doesn't mask the shift count so we need to do it ourselves.
2643      if (second.IsRegister()) {
2644        Register second_reg = second.AsRegister<Register>();
2645        __ and_(second_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2646        if (op->IsShl()) {
2647          __ Lsl(out_reg, first_reg, second_reg);
2648        } else if (op->IsShr()) {
2649          __ Asr(out_reg, first_reg, second_reg);
2650        } else {
2651          __ Lsr(out_reg, first_reg, second_reg);
2652        }
2653      } else {
2654        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2655        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2656        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
2657          __ Mov(out_reg, first_reg);
2658        } else if (op->IsShl()) {
2659          __ Lsl(out_reg, first_reg, shift_value);
2660        } else if (op->IsShr()) {
2661          __ Asr(out_reg, first_reg, shift_value);
2662        } else {
2663          __ Lsr(out_reg, first_reg, shift_value);
2664        }
2665      }
2666      break;
2667    }
2668    case Primitive::kPrimLong: {
2669      Register o_h = out.AsRegisterPairHigh<Register>();
2670      Register o_l = out.AsRegisterPairLow<Register>();
2671
2672      Register temp = locations->GetTemp(0).AsRegister<Register>();
2673
2674      Register high = first.AsRegisterPairHigh<Register>();
2675      Register low = first.AsRegisterPairLow<Register>();
2676
2677      Register second_reg = second.AsRegister<Register>();
2678
2679      if (op->IsShl()) {
2680        // Shift the high part
2681        __ and_(second_reg, second_reg, ShifterOperand(63));
2682        __ Lsl(o_h, high, second_reg);
2683        // Shift the low part and `or` what overflew on the high part
2684        __ rsb(temp, second_reg, ShifterOperand(32));
2685        __ Lsr(temp, low, temp);
2686        __ orr(o_h, o_h, ShifterOperand(temp));
2687        // If the shift is > 32 bits, override the high part
2688        __ subs(temp, second_reg, ShifterOperand(32));
2689        __ it(PL);
2690        __ Lsl(o_h, low, temp, false, PL);
2691        // Shift the low part
2692        __ Lsl(o_l, low, second_reg);
2693      } else if (op->IsShr()) {
2694        // Shift the low part
2695        __ and_(second_reg, second_reg, ShifterOperand(63));
2696        __ Lsr(o_l, low, second_reg);
2697        // Shift the high part and `or` what underflew on the low part
2698        __ rsb(temp, second_reg, ShifterOperand(32));
2699        __ Lsl(temp, high, temp);
2700        __ orr(o_l, o_l, ShifterOperand(temp));
2701        // If the shift is > 32 bits, override the low part
2702        __ subs(temp, second_reg, ShifterOperand(32));
2703        __ it(PL);
2704        __ Asr(o_l, high, temp, false, PL);
2705        // Shift the high part
2706        __ Asr(o_h, high, second_reg);
2707      } else {
2708        // same as Shr except we use `Lsr`s and not `Asr`s
2709        __ and_(second_reg, second_reg, ShifterOperand(63));
2710        __ Lsr(o_l, low, second_reg);
2711        __ rsb(temp, second_reg, ShifterOperand(32));
2712        __ Lsl(temp, high, temp);
2713        __ orr(o_l, o_l, ShifterOperand(temp));
2714        __ subs(temp, second_reg, ShifterOperand(32));
2715        __ it(PL);
2716        __ Lsr(o_l, high, temp, false, PL);
2717        __ Lsr(o_h, high, second_reg);
2718      }
2719      break;
2720    }
2721    default:
2722      LOG(FATAL) << "Unexpected operation type " << type;
2723  }
2724}
2725
2726void LocationsBuilderARM::VisitShl(HShl* shl) {
2727  HandleShift(shl);
2728}
2729
2730void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
2731  HandleShift(shl);
2732}
2733
2734void LocationsBuilderARM::VisitShr(HShr* shr) {
2735  HandleShift(shr);
2736}
2737
2738void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
2739  HandleShift(shr);
2740}
2741
2742void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
2743  HandleShift(ushr);
2744}
2745
2746void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
2747  HandleShift(ushr);
2748}
2749
2750void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
2751  LocationSummary* locations =
2752      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2753  InvokeRuntimeCallingConvention calling_convention;
2754  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2755  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2756  locations->SetOut(Location::RegisterLocation(R0));
2757}
2758
2759void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
2760  InvokeRuntimeCallingConvention calling_convention;
2761  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
2762  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2763  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2764                          instruction,
2765                          instruction->GetDexPc(),
2766                          nullptr);
2767}
2768
2769void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
2770  LocationSummary* locations =
2771      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2772  InvokeRuntimeCallingConvention calling_convention;
2773  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2774  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2775  locations->SetOut(Location::RegisterLocation(R0));
2776  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2777}
2778
2779void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
2780  InvokeRuntimeCallingConvention calling_convention;
2781  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2));
2782  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2783  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2784                          instruction,
2785                          instruction->GetDexPc(),
2786                          nullptr);
2787}
2788
2789void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
2790  LocationSummary* locations =
2791      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2792  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2793  if (location.IsStackSlot()) {
2794    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2795  } else if (location.IsDoubleStackSlot()) {
2796    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2797  }
2798  locations->SetOut(location);
2799}
2800
2801void InstructionCodeGeneratorARM::VisitParameterValue(
2802    HParameterValue* instruction ATTRIBUTE_UNUSED) {
2803  // Nothing to do, the parameter is already at its location.
2804}
2805
2806void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
2807  LocationSummary* locations =
2808      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2809  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2810}
2811
2812void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
2813  // Nothing to do, the method is already at its location.
2814}
2815
2816void LocationsBuilderARM::VisitNot(HNot* not_) {
2817  LocationSummary* locations =
2818      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
2819  locations->SetInAt(0, Location::RequiresRegister());
2820  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2821}
2822
2823void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
2824  LocationSummary* locations = not_->GetLocations();
2825  Location out = locations->Out();
2826  Location in = locations->InAt(0);
2827  switch (not_->GetResultType()) {
2828    case Primitive::kPrimInt:
2829      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
2830      break;
2831
2832    case Primitive::kPrimLong:
2833      __ mvn(out.AsRegisterPairLow<Register>(),
2834             ShifterOperand(in.AsRegisterPairLow<Register>()));
2835      __ mvn(out.AsRegisterPairHigh<Register>(),
2836             ShifterOperand(in.AsRegisterPairHigh<Register>()));
2837      break;
2838
2839    default:
2840      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
2841  }
2842}
2843
2844void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
2845  LocationSummary* locations =
2846      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
2847  locations->SetInAt(0, Location::RequiresRegister());
2848  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2849}
2850
2851void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
2852  LocationSummary* locations = bool_not->GetLocations();
2853  Location out = locations->Out();
2854  Location in = locations->InAt(0);
2855  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
2856}
2857
2858void LocationsBuilderARM::VisitCompare(HCompare* compare) {
2859  LocationSummary* locations =
2860      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
2861  switch (compare->InputAt(0)->GetType()) {
2862    case Primitive::kPrimLong: {
2863      locations->SetInAt(0, Location::RequiresRegister());
2864      locations->SetInAt(1, Location::RequiresRegister());
2865      // Output overlaps because it is written before doing the low comparison.
2866      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2867      break;
2868    }
2869    case Primitive::kPrimFloat:
2870    case Primitive::kPrimDouble: {
2871      locations->SetInAt(0, Location::RequiresFpuRegister());
2872      locations->SetInAt(1, Location::RequiresFpuRegister());
2873      locations->SetOut(Location::RequiresRegister());
2874      break;
2875    }
2876    default:
2877      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
2878  }
2879}
2880
2881void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
2882  LocationSummary* locations = compare->GetLocations();
2883  Register out = locations->Out().AsRegister<Register>();
2884  Location left = locations->InAt(0);
2885  Location right = locations->InAt(1);
2886
2887  NearLabel less, greater, done;
2888  Primitive::Type type = compare->InputAt(0)->GetType();
2889  switch (type) {
2890    case Primitive::kPrimLong: {
2891      __ cmp(left.AsRegisterPairHigh<Register>(),
2892             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
2893      __ b(&less, LT);
2894      __ b(&greater, GT);
2895      // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
2896      __ LoadImmediate(out, 0);
2897      __ cmp(left.AsRegisterPairLow<Register>(),
2898             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
2899      break;
2900    }
2901    case Primitive::kPrimFloat:
2902    case Primitive::kPrimDouble: {
2903      __ LoadImmediate(out, 0);
2904      if (type == Primitive::kPrimFloat) {
2905        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
2906      } else {
2907        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
2908                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
2909      }
2910      __ vmstat();  // transfer FP status register to ARM APSR.
2911      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
2912      break;
2913    }
2914    default:
2915      LOG(FATAL) << "Unexpected compare type " << type;
2916  }
2917  __ b(&done, EQ);
2918  __ b(&less, CC);  // CC is for both: unsigned compare for longs and 'less than' for floats.
2919
2920  __ Bind(&greater);
2921  __ LoadImmediate(out, 1);
2922  __ b(&done);
2923
2924  __ Bind(&less);
2925  __ LoadImmediate(out, -1);
2926
2927  __ Bind(&done);
2928}
2929
2930void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
2931  LocationSummary* locations =
2932      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2933  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2934    locations->SetInAt(i, Location::Any());
2935  }
2936  locations->SetOut(Location::Any());
2937}
2938
2939void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
2940  UNUSED(instruction);
2941  LOG(FATAL) << "Unreachable";
2942}
2943
2944void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
2945  // TODO (ported from quick): revisit Arm barrier kinds
2946  DmbOptions flavour = DmbOptions::ISH;  // quiet c++ warnings
2947  switch (kind) {
2948    case MemBarrierKind::kAnyStore:
2949    case MemBarrierKind::kLoadAny:
2950    case MemBarrierKind::kAnyAny: {
2951      flavour = DmbOptions::ISH;
2952      break;
2953    }
2954    case MemBarrierKind::kStoreStore: {
2955      flavour = DmbOptions::ISHST;
2956      break;
2957    }
2958    default:
2959      LOG(FATAL) << "Unexpected memory barrier " << kind;
2960  }
2961  __ dmb(flavour);
2962}
2963
2964void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
2965                                                         uint32_t offset,
2966                                                         Register out_lo,
2967                                                         Register out_hi) {
2968  if (offset != 0) {
2969    __ LoadImmediate(out_lo, offset);
2970    __ add(IP, addr, ShifterOperand(out_lo));
2971    addr = IP;
2972  }
2973  __ ldrexd(out_lo, out_hi, addr);
2974}
2975
2976void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
2977                                                          uint32_t offset,
2978                                                          Register value_lo,
2979                                                          Register value_hi,
2980                                                          Register temp1,
2981                                                          Register temp2,
2982                                                          HInstruction* instruction) {
2983  NearLabel fail;
2984  if (offset != 0) {
2985    __ LoadImmediate(temp1, offset);
2986    __ add(IP, addr, ShifterOperand(temp1));
2987    addr = IP;
2988  }
2989  __ Bind(&fail);
2990  // We need a load followed by store. (The address used in a STREX instruction must
2991  // be the same as the address in the most recently executed LDREX instruction.)
2992  __ ldrexd(temp1, temp2, addr);
2993  codegen_->MaybeRecordImplicitNullCheck(instruction);
2994  __ strexd(temp1, value_lo, value_hi, addr);
2995  __ cmp(temp1, ShifterOperand(0));
2996  __ b(&fail, NE);
2997}
2998
2999void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
3000  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3001
3002  LocationSummary* locations =
3003      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3004  locations->SetInAt(0, Location::RequiresRegister());
3005
3006  Primitive::Type field_type = field_info.GetFieldType();
3007  if (Primitive::IsFloatingPointType(field_type)) {
3008    locations->SetInAt(1, Location::RequiresFpuRegister());
3009  } else {
3010    locations->SetInAt(1, Location::RequiresRegister());
3011  }
3012
3013  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3014  bool generate_volatile = field_info.IsVolatile()
3015      && is_wide
3016      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3017  // Temporary registers for the write barrier.
3018  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3019  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3020    locations->AddTemp(Location::RequiresRegister());
3021    locations->AddTemp(Location::RequiresRegister());
3022  } else if (generate_volatile) {
3023    // Arm encoding have some additional constraints for ldrexd/strexd:
3024    // - registers need to be consecutive
3025    // - the first register should be even but not R14.
3026    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3027    // enable Arm encoding.
3028    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3029
3030    locations->AddTemp(Location::RequiresRegister());
3031    locations->AddTemp(Location::RequiresRegister());
3032    if (field_type == Primitive::kPrimDouble) {
3033      // For doubles we need two more registers to copy the value.
3034      locations->AddTemp(Location::RegisterLocation(R2));
3035      locations->AddTemp(Location::RegisterLocation(R3));
3036    }
3037  }
3038}
3039
3040void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3041                                                 const FieldInfo& field_info,
3042                                                 bool value_can_be_null) {
3043  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3044
3045  LocationSummary* locations = instruction->GetLocations();
3046  Register base = locations->InAt(0).AsRegister<Register>();
3047  Location value = locations->InAt(1);
3048
3049  bool is_volatile = field_info.IsVolatile();
3050  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3051  Primitive::Type field_type = field_info.GetFieldType();
3052  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3053
3054  if (is_volatile) {
3055    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3056  }
3057
3058  switch (field_type) {
3059    case Primitive::kPrimBoolean:
3060    case Primitive::kPrimByte: {
3061      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3062      break;
3063    }
3064
3065    case Primitive::kPrimShort:
3066    case Primitive::kPrimChar: {
3067      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3068      break;
3069    }
3070
3071    case Primitive::kPrimInt:
3072    case Primitive::kPrimNot: {
3073      __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3074      break;
3075    }
3076
3077    case Primitive::kPrimLong: {
3078      if (is_volatile && !atomic_ldrd_strd) {
3079        GenerateWideAtomicStore(base, offset,
3080                                value.AsRegisterPairLow<Register>(),
3081                                value.AsRegisterPairHigh<Register>(),
3082                                locations->GetTemp(0).AsRegister<Register>(),
3083                                locations->GetTemp(1).AsRegister<Register>(),
3084                                instruction);
3085      } else {
3086        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3087        codegen_->MaybeRecordImplicitNullCheck(instruction);
3088      }
3089      break;
3090    }
3091
3092    case Primitive::kPrimFloat: {
3093      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3094      break;
3095    }
3096
3097    case Primitive::kPrimDouble: {
3098      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3099      if (is_volatile && !atomic_ldrd_strd) {
3100        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3101        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3102
3103        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3104
3105        GenerateWideAtomicStore(base, offset,
3106                                value_reg_lo,
3107                                value_reg_hi,
3108                                locations->GetTemp(2).AsRegister<Register>(),
3109                                locations->GetTemp(3).AsRegister<Register>(),
3110                                instruction);
3111      } else {
3112        __ StoreDToOffset(value_reg, base, offset);
3113        codegen_->MaybeRecordImplicitNullCheck(instruction);
3114      }
3115      break;
3116    }
3117
3118    case Primitive::kPrimVoid:
3119      LOG(FATAL) << "Unreachable type " << field_type;
3120      UNREACHABLE();
3121  }
3122
3123  // Longs and doubles are handled in the switch.
3124  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3125    codegen_->MaybeRecordImplicitNullCheck(instruction);
3126  }
3127
3128  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3129    Register temp = locations->GetTemp(0).AsRegister<Register>();
3130    Register card = locations->GetTemp(1).AsRegister<Register>();
3131    codegen_->MarkGCCard(
3132        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3133  }
3134
3135  if (is_volatile) {
3136    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3137  }
3138}
3139
3140void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3141  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3142  LocationSummary* locations =
3143      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3144  locations->SetInAt(0, Location::RequiresRegister());
3145
3146  bool volatile_for_double = field_info.IsVolatile()
3147      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3148      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3149  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3150
3151  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3152    locations->SetOut(Location::RequiresFpuRegister());
3153  } else {
3154    locations->SetOut(Location::RequiresRegister(),
3155                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3156  }
3157  if (volatile_for_double) {
3158    // Arm encoding have some additional constraints for ldrexd/strexd:
3159    // - registers need to be consecutive
3160    // - the first register should be even but not R14.
3161    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3162    // enable Arm encoding.
3163    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3164    locations->AddTemp(Location::RequiresRegister());
3165    locations->AddTemp(Location::RequiresRegister());
3166  }
3167}
3168
3169void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3170                                                 const FieldInfo& field_info) {
3171  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3172
3173  LocationSummary* locations = instruction->GetLocations();
3174  Register base = locations->InAt(0).AsRegister<Register>();
3175  Location out = locations->Out();
3176  bool is_volatile = field_info.IsVolatile();
3177  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3178  Primitive::Type field_type = field_info.GetFieldType();
3179  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3180
3181  switch (field_type) {
3182    case Primitive::kPrimBoolean: {
3183      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3184      break;
3185    }
3186
3187    case Primitive::kPrimByte: {
3188      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3189      break;
3190    }
3191
3192    case Primitive::kPrimShort: {
3193      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3194      break;
3195    }
3196
3197    case Primitive::kPrimChar: {
3198      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3199      break;
3200    }
3201
3202    case Primitive::kPrimInt:
3203    case Primitive::kPrimNot: {
3204      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3205      break;
3206    }
3207
3208    case Primitive::kPrimLong: {
3209      if (is_volatile && !atomic_ldrd_strd) {
3210        GenerateWideAtomicLoad(base, offset,
3211                               out.AsRegisterPairLow<Register>(),
3212                               out.AsRegisterPairHigh<Register>());
3213      } else {
3214        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3215      }
3216      break;
3217    }
3218
3219    case Primitive::kPrimFloat: {
3220      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3221      break;
3222    }
3223
3224    case Primitive::kPrimDouble: {
3225      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3226      if (is_volatile && !atomic_ldrd_strd) {
3227        Register lo = locations->GetTemp(0).AsRegister<Register>();
3228        Register hi = locations->GetTemp(1).AsRegister<Register>();
3229        GenerateWideAtomicLoad(base, offset, lo, hi);
3230        codegen_->MaybeRecordImplicitNullCheck(instruction);
3231        __ vmovdrr(out_reg, lo, hi);
3232      } else {
3233        __ LoadDFromOffset(out_reg, base, offset);
3234        codegen_->MaybeRecordImplicitNullCheck(instruction);
3235      }
3236      break;
3237    }
3238
3239    case Primitive::kPrimVoid:
3240      LOG(FATAL) << "Unreachable type " << field_type;
3241      UNREACHABLE();
3242  }
3243
3244  // Doubles are handled in the switch.
3245  if (field_type != Primitive::kPrimDouble) {
3246    codegen_->MaybeRecordImplicitNullCheck(instruction);
3247  }
3248
3249  if (is_volatile) {
3250    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3251  }
3252}
3253
3254void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3255  HandleFieldSet(instruction, instruction->GetFieldInfo());
3256}
3257
3258void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3259  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3260}
3261
3262void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3263  HandleFieldGet(instruction, instruction->GetFieldInfo());
3264}
3265
3266void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3267  HandleFieldGet(instruction, instruction->GetFieldInfo());
3268}
3269
3270void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3271  HandleFieldGet(instruction, instruction->GetFieldInfo());
3272}
3273
3274void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3275  HandleFieldGet(instruction, instruction->GetFieldInfo());
3276}
3277
3278void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3279  HandleFieldSet(instruction, instruction->GetFieldInfo());
3280}
3281
3282void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3283  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3284}
3285
3286void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3287  LocationSummary* locations =
3288      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3289  locations->SetInAt(0, Location::RequiresRegister());
3290  if (instruction->HasUses()) {
3291    locations->SetOut(Location::SameAsFirstInput());
3292  }
3293}
3294
3295void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3296  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3297    return;
3298  }
3299  Location obj = instruction->GetLocations()->InAt(0);
3300
3301  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3302  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3303}
3304
3305void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3306  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3307  codegen_->AddSlowPath(slow_path);
3308
3309  LocationSummary* locations = instruction->GetLocations();
3310  Location obj = locations->InAt(0);
3311
3312  __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
3313  __ b(slow_path->GetEntryLabel(), EQ);
3314}
3315
3316void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3317  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
3318    GenerateImplicitNullCheck(instruction);
3319  } else {
3320    GenerateExplicitNullCheck(instruction);
3321  }
3322}
3323
3324void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3325  LocationSummary* locations =
3326      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3327  locations->SetInAt(0, Location::RequiresRegister());
3328  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3329  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3330    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3331  } else {
3332    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3333  }
3334}
3335
3336void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3337  LocationSummary* locations = instruction->GetLocations();
3338  Register obj = locations->InAt(0).AsRegister<Register>();
3339  Location index = locations->InAt(1);
3340
3341  switch (instruction->GetType()) {
3342    case Primitive::kPrimBoolean: {
3343      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3344      Register out = locations->Out().AsRegister<Register>();
3345      if (index.IsConstant()) {
3346        size_t offset =
3347            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3348        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3349      } else {
3350        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3351        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3352      }
3353      break;
3354    }
3355
3356    case Primitive::kPrimByte: {
3357      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3358      Register out = locations->Out().AsRegister<Register>();
3359      if (index.IsConstant()) {
3360        size_t offset =
3361            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3362        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3363      } else {
3364        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3365        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3366      }
3367      break;
3368    }
3369
3370    case Primitive::kPrimShort: {
3371      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3372      Register out = locations->Out().AsRegister<Register>();
3373      if (index.IsConstant()) {
3374        size_t offset =
3375            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3376        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3377      } else {
3378        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3379        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3380      }
3381      break;
3382    }
3383
3384    case Primitive::kPrimChar: {
3385      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3386      Register out = locations->Out().AsRegister<Register>();
3387      if (index.IsConstant()) {
3388        size_t offset =
3389            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3390        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3391      } else {
3392        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3393        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3394      }
3395      break;
3396    }
3397
3398    case Primitive::kPrimInt:
3399    case Primitive::kPrimNot: {
3400      DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
3401      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3402      Register out = locations->Out().AsRegister<Register>();
3403      if (index.IsConstant()) {
3404        size_t offset =
3405            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3406        __ LoadFromOffset(kLoadWord, out, obj, offset);
3407      } else {
3408        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3409        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3410      }
3411      break;
3412    }
3413
3414    case Primitive::kPrimLong: {
3415      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3416      Location out = locations->Out();
3417      if (index.IsConstant()) {
3418        size_t offset =
3419            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3420        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3421      } else {
3422        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3423        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3424      }
3425      break;
3426    }
3427
3428    case Primitive::kPrimFloat: {
3429      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3430      Location out = locations->Out();
3431      DCHECK(out.IsFpuRegister());
3432      if (index.IsConstant()) {
3433        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3434        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3435      } else {
3436        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3437        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3438      }
3439      break;
3440    }
3441
3442    case Primitive::kPrimDouble: {
3443      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3444      Location out = locations->Out();
3445      DCHECK(out.IsFpuRegisterPair());
3446      if (index.IsConstant()) {
3447        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3448        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3449      } else {
3450        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3451        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3452      }
3453      break;
3454    }
3455
3456    case Primitive::kPrimVoid:
3457      LOG(FATAL) << "Unreachable type " << instruction->GetType();
3458      UNREACHABLE();
3459  }
3460  codegen_->MaybeRecordImplicitNullCheck(instruction);
3461}
3462
3463void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3464  Primitive::Type value_type = instruction->GetComponentType();
3465
3466  bool needs_write_barrier =
3467      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3468  bool needs_runtime_call = instruction->NeedsTypeCheck();
3469
3470  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3471      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3472  if (needs_runtime_call) {
3473    InvokeRuntimeCallingConvention calling_convention;
3474    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3475    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3476    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3477  } else {
3478    locations->SetInAt(0, Location::RequiresRegister());
3479    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3480    if (Primitive::IsFloatingPointType(value_type)) {
3481      locations->SetInAt(2, Location::RequiresFpuRegister());
3482    } else {
3483      locations->SetInAt(2, Location::RequiresRegister());
3484    }
3485
3486    if (needs_write_barrier) {
3487      // Temporary registers for the write barrier.
3488      locations->AddTemp(Location::RequiresRegister());
3489      locations->AddTemp(Location::RequiresRegister());
3490    }
3491  }
3492}
3493
3494void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3495  LocationSummary* locations = instruction->GetLocations();
3496  Register obj = locations->InAt(0).AsRegister<Register>();
3497  Location index = locations->InAt(1);
3498  Primitive::Type value_type = instruction->GetComponentType();
3499  bool needs_runtime_call = locations->WillCall();
3500  bool needs_write_barrier =
3501      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3502
3503  switch (value_type) {
3504    case Primitive::kPrimBoolean:
3505    case Primitive::kPrimByte: {
3506      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3507      Register value = locations->InAt(2).AsRegister<Register>();
3508      if (index.IsConstant()) {
3509        size_t offset =
3510            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3511        __ StoreToOffset(kStoreByte, value, obj, offset);
3512      } else {
3513        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3514        __ StoreToOffset(kStoreByte, value, IP, data_offset);
3515      }
3516      break;
3517    }
3518
3519    case Primitive::kPrimShort:
3520    case Primitive::kPrimChar: {
3521      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3522      Register value = locations->InAt(2).AsRegister<Register>();
3523      if (index.IsConstant()) {
3524        size_t offset =
3525            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3526        __ StoreToOffset(kStoreHalfword, value, obj, offset);
3527      } else {
3528        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3529        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3530      }
3531      break;
3532    }
3533
3534    case Primitive::kPrimInt:
3535    case Primitive::kPrimNot: {
3536      if (!needs_runtime_call) {
3537        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3538        Register value = locations->InAt(2).AsRegister<Register>();
3539        if (index.IsConstant()) {
3540          size_t offset =
3541              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3542          __ StoreToOffset(kStoreWord, value, obj, offset);
3543        } else {
3544          DCHECK(index.IsRegister()) << index;
3545          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3546          __ StoreToOffset(kStoreWord, value, IP, data_offset);
3547        }
3548        codegen_->MaybeRecordImplicitNullCheck(instruction);
3549        if (needs_write_barrier) {
3550          DCHECK_EQ(value_type, Primitive::kPrimNot);
3551          Register temp = locations->GetTemp(0).AsRegister<Register>();
3552          Register card = locations->GetTemp(1).AsRegister<Register>();
3553          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
3554        }
3555      } else {
3556        DCHECK_EQ(value_type, Primitive::kPrimNot);
3557        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3558                                instruction,
3559                                instruction->GetDexPc(),
3560                                nullptr);
3561      }
3562      break;
3563    }
3564
3565    case Primitive::kPrimLong: {
3566      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3567      Location value = locations->InAt(2);
3568      if (index.IsConstant()) {
3569        size_t offset =
3570            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3571        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3572      } else {
3573        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3574        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3575      }
3576      break;
3577    }
3578
3579    case Primitive::kPrimFloat: {
3580      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3581      Location value = locations->InAt(2);
3582      DCHECK(value.IsFpuRegister());
3583      if (index.IsConstant()) {
3584        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3585        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3586      } else {
3587        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3588        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3589      }
3590      break;
3591    }
3592
3593    case Primitive::kPrimDouble: {
3594      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3595      Location value = locations->InAt(2);
3596      DCHECK(value.IsFpuRegisterPair());
3597      if (index.IsConstant()) {
3598        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3599        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3600      } else {
3601        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3602        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3603      }
3604
3605      break;
3606    }
3607
3608    case Primitive::kPrimVoid:
3609      LOG(FATAL) << "Unreachable type " << value_type;
3610      UNREACHABLE();
3611  }
3612
3613  // Ints and objects are handled in the switch.
3614  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
3615    codegen_->MaybeRecordImplicitNullCheck(instruction);
3616  }
3617}
3618
3619void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
3620  LocationSummary* locations =
3621      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3622  locations->SetInAt(0, Location::RequiresRegister());
3623  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3624}
3625
3626void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
3627  LocationSummary* locations = instruction->GetLocations();
3628  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
3629  Register obj = locations->InAt(0).AsRegister<Register>();
3630  Register out = locations->Out().AsRegister<Register>();
3631  __ LoadFromOffset(kLoadWord, out, obj, offset);
3632  codegen_->MaybeRecordImplicitNullCheck(instruction);
3633}
3634
3635void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3636  LocationSummary* locations =
3637      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3638  locations->SetInAt(0, Location::RequiresRegister());
3639  locations->SetInAt(1, Location::RequiresRegister());
3640  if (instruction->HasUses()) {
3641    locations->SetOut(Location::SameAsFirstInput());
3642  }
3643}
3644
3645void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3646  LocationSummary* locations = instruction->GetLocations();
3647  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
3648      instruction, locations->InAt(0), locations->InAt(1));
3649  codegen_->AddSlowPath(slow_path);
3650
3651  Register index = locations->InAt(0).AsRegister<Register>();
3652  Register length = locations->InAt(1).AsRegister<Register>();
3653
3654  __ cmp(index, ShifterOperand(length));
3655  __ b(slow_path->GetEntryLabel(), CS);
3656}
3657
3658void CodeGeneratorARM::MarkGCCard(Register temp,
3659                                  Register card,
3660                                  Register object,
3661                                  Register value,
3662                                  bool can_be_null) {
3663  NearLabel is_null;
3664  if (can_be_null) {
3665    __ CompareAndBranchIfZero(value, &is_null);
3666  }
3667  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
3668  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
3669  __ strb(card, Address(card, temp));
3670  if (can_be_null) {
3671    __ Bind(&is_null);
3672  }
3673}
3674
3675void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
3676  temp->SetLocations(nullptr);
3677}
3678
3679void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
3680  // Nothing to do, this is driven by the code generator.
3681  UNUSED(temp);
3682}
3683
3684void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
3685  UNUSED(instruction);
3686  LOG(FATAL) << "Unreachable";
3687}
3688
3689void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
3690  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3691}
3692
3693void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3694  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3695}
3696
3697void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3698  HBasicBlock* block = instruction->GetBlock();
3699  if (block->GetLoopInformation() != nullptr) {
3700    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3701    // The back edge will generate the suspend check.
3702    return;
3703  }
3704  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3705    // The goto will generate the suspend check.
3706    return;
3707  }
3708  GenerateSuspendCheck(instruction, nullptr);
3709}
3710
3711void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
3712                                                       HBasicBlock* successor) {
3713  SuspendCheckSlowPathARM* slow_path =
3714      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
3715  if (slow_path == nullptr) {
3716    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
3717    instruction->SetSlowPath(slow_path);
3718    codegen_->AddSlowPath(slow_path);
3719    if (successor != nullptr) {
3720      DCHECK(successor->IsLoopHeader());
3721      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
3722    }
3723  } else {
3724    DCHECK_EQ(slow_path->GetSuccessor(), successor);
3725  }
3726
3727  __ LoadFromOffset(
3728      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
3729  __ cmp(IP, ShifterOperand(0));
3730  // TODO: Figure out the branch offsets and use cbz/cbnz.
3731  if (successor == nullptr) {
3732    __ b(slow_path->GetEntryLabel(), NE);
3733    __ Bind(slow_path->GetReturnLabel());
3734  } else {
3735    __ b(codegen_->GetLabelOf(successor), EQ);
3736    __ b(slow_path->GetEntryLabel());
3737  }
3738}
3739
3740ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
3741  return codegen_->GetAssembler();
3742}
3743
3744void ParallelMoveResolverARM::EmitMove(size_t index) {
3745  MoveOperands* move = moves_.Get(index);
3746  Location source = move->GetSource();
3747  Location destination = move->GetDestination();
3748
3749  if (source.IsRegister()) {
3750    if (destination.IsRegister()) {
3751      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
3752    } else {
3753      DCHECK(destination.IsStackSlot());
3754      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
3755                       SP, destination.GetStackIndex());
3756    }
3757  } else if (source.IsStackSlot()) {
3758    if (destination.IsRegister()) {
3759      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
3760                        SP, source.GetStackIndex());
3761    } else if (destination.IsFpuRegister()) {
3762      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
3763    } else {
3764      DCHECK(destination.IsStackSlot());
3765      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
3766      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3767    }
3768  } else if (source.IsFpuRegister()) {
3769    if (destination.IsFpuRegister()) {
3770      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
3771    } else {
3772      DCHECK(destination.IsStackSlot());
3773      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
3774    }
3775  } else if (source.IsDoubleStackSlot()) {
3776    if (destination.IsDoubleStackSlot()) {
3777      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
3778      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
3779    } else if (destination.IsRegisterPair()) {
3780      DCHECK(ExpectedPairLayout(destination));
3781      __ LoadFromOffset(
3782          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
3783    } else {
3784      DCHECK(destination.IsFpuRegisterPair()) << destination;
3785      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3786                         SP,
3787                         source.GetStackIndex());
3788    }
3789  } else if (source.IsRegisterPair()) {
3790    if (destination.IsRegisterPair()) {
3791      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
3792      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
3793    } else {
3794      DCHECK(destination.IsDoubleStackSlot()) << destination;
3795      DCHECK(ExpectedPairLayout(source));
3796      __ StoreToOffset(
3797          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
3798    }
3799  } else if (source.IsFpuRegisterPair()) {
3800    if (destination.IsFpuRegisterPair()) {
3801      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3802               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
3803    } else {
3804      DCHECK(destination.IsDoubleStackSlot()) << destination;
3805      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
3806                        SP,
3807                        destination.GetStackIndex());
3808    }
3809  } else {
3810    DCHECK(source.IsConstant()) << source;
3811    HConstant* constant = source.GetConstant();
3812    if (constant->IsIntConstant() || constant->IsNullConstant()) {
3813      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
3814      if (destination.IsRegister()) {
3815        __ LoadImmediate(destination.AsRegister<Register>(), value);
3816      } else {
3817        DCHECK(destination.IsStackSlot());
3818        __ LoadImmediate(IP, value);
3819        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3820      }
3821    } else if (constant->IsLongConstant()) {
3822      int64_t value = constant->AsLongConstant()->GetValue();
3823      if (destination.IsRegisterPair()) {
3824        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
3825        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
3826      } else {
3827        DCHECK(destination.IsDoubleStackSlot()) << destination;
3828        __ LoadImmediate(IP, Low32Bits(value));
3829        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3830        __ LoadImmediate(IP, High32Bits(value));
3831        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3832      }
3833    } else if (constant->IsDoubleConstant()) {
3834      double value = constant->AsDoubleConstant()->GetValue();
3835      if (destination.IsFpuRegisterPair()) {
3836        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
3837      } else {
3838        DCHECK(destination.IsDoubleStackSlot()) << destination;
3839        uint64_t int_value = bit_cast<uint64_t, double>(value);
3840        __ LoadImmediate(IP, Low32Bits(int_value));
3841        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3842        __ LoadImmediate(IP, High32Bits(int_value));
3843        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3844      }
3845    } else {
3846      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
3847      float value = constant->AsFloatConstant()->GetValue();
3848      if (destination.IsFpuRegister()) {
3849        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
3850      } else {
3851        DCHECK(destination.IsStackSlot());
3852        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
3853        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3854      }
3855    }
3856  }
3857}
3858
3859void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
3860  __ Mov(IP, reg);
3861  __ LoadFromOffset(kLoadWord, reg, SP, mem);
3862  __ StoreToOffset(kStoreWord, IP, SP, mem);
3863}
3864
3865void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
3866  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
3867  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
3868  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
3869                    SP, mem1 + stack_offset);
3870  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
3871  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
3872                   SP, mem2 + stack_offset);
3873  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
3874}
3875
3876void ParallelMoveResolverARM::EmitSwap(size_t index) {
3877  MoveOperands* move = moves_.Get(index);
3878  Location source = move->GetSource();
3879  Location destination = move->GetDestination();
3880
3881  if (source.IsRegister() && destination.IsRegister()) {
3882    DCHECK_NE(source.AsRegister<Register>(), IP);
3883    DCHECK_NE(destination.AsRegister<Register>(), IP);
3884    __ Mov(IP, source.AsRegister<Register>());
3885    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
3886    __ Mov(destination.AsRegister<Register>(), IP);
3887  } else if (source.IsRegister() && destination.IsStackSlot()) {
3888    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
3889  } else if (source.IsStackSlot() && destination.IsRegister()) {
3890    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
3891  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
3892    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3893  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
3894    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
3895    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
3896    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
3897  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
3898    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
3899    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
3900    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
3901    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
3902               destination.AsRegisterPairHigh<Register>(),
3903               DTMP);
3904  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
3905    Register low_reg = source.IsRegisterPair()
3906        ? source.AsRegisterPairLow<Register>()
3907        : destination.AsRegisterPairLow<Register>();
3908    int mem = source.IsRegisterPair()
3909        ? destination.GetStackIndex()
3910        : source.GetStackIndex();
3911    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
3912    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
3913    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
3914    __ StoreDToOffset(DTMP, SP, mem);
3915  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
3916    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
3917    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3918    __ vmovd(DTMP, first);
3919    __ vmovd(first, second);
3920    __ vmovd(second, DTMP);
3921  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
3922    DRegister reg = source.IsFpuRegisterPair()
3923        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
3924        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3925    int mem = source.IsFpuRegisterPair()
3926        ? destination.GetStackIndex()
3927        : source.GetStackIndex();
3928    __ vmovd(DTMP, reg);
3929    __ LoadDFromOffset(reg, SP, mem);
3930    __ StoreDToOffset(DTMP, SP, mem);
3931  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
3932    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
3933                                           : destination.AsFpuRegister<SRegister>();
3934    int mem = source.IsFpuRegister()
3935        ? destination.GetStackIndex()
3936        : source.GetStackIndex();
3937
3938    __ vmovrs(IP, reg);
3939    __ LoadSFromOffset(reg, SP, mem);
3940    __ StoreToOffset(kStoreWord, IP, SP, mem);
3941  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
3942    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3943    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
3944  } else {
3945    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
3946  }
3947}
3948
3949void ParallelMoveResolverARM::SpillScratch(int reg) {
3950  __ Push(static_cast<Register>(reg));
3951}
3952
3953void ParallelMoveResolverARM::RestoreScratch(int reg) {
3954  __ Pop(static_cast<Register>(reg));
3955}
3956
3957void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
3958  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
3959      ? LocationSummary::kCallOnSlowPath
3960      : LocationSummary::kNoCall;
3961  LocationSummary* locations =
3962      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
3963  locations->SetInAt(0, Location::RequiresRegister());
3964  locations->SetOut(Location::RequiresRegister());
3965}
3966
3967void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
3968  LocationSummary* locations = cls->GetLocations();
3969  Register out = locations->Out().AsRegister<Register>();
3970  Register current_method = locations->InAt(0).AsRegister<Register>();
3971  if (cls->IsReferrersClass()) {
3972    DCHECK(!cls->CanCallRuntime());
3973    DCHECK(!cls->MustGenerateClinitCheck());
3974    __ LoadFromOffset(
3975        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
3976  } else {
3977    DCHECK(cls->CanCallRuntime());
3978    __ LoadFromOffset(kLoadWord,
3979                      out,
3980                      current_method,
3981                      ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
3982    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
3983
3984    SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3985        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
3986    codegen_->AddSlowPath(slow_path);
3987    __ cmp(out, ShifterOperand(0));
3988    __ b(slow_path->GetEntryLabel(), EQ);
3989    if (cls->MustGenerateClinitCheck()) {
3990      GenerateClassInitializationCheck(slow_path, out);
3991    } else {
3992      __ Bind(slow_path->GetExitLabel());
3993    }
3994  }
3995}
3996
3997void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
3998  LocationSummary* locations =
3999      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
4000  locations->SetInAt(0, Location::RequiresRegister());
4001  if (check->HasUses()) {
4002    locations->SetOut(Location::SameAsFirstInput());
4003  }
4004}
4005
4006void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
4007  // We assume the class is not null.
4008  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4009      check->GetLoadClass(), check, check->GetDexPc(), true);
4010  codegen_->AddSlowPath(slow_path);
4011  GenerateClassInitializationCheck(slow_path,
4012                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4013}
4014
4015void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4016    SlowPathCodeARM* slow_path, Register class_reg) {
4017  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4018  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4019  __ b(slow_path->GetEntryLabel(), LT);
4020  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4021  // properly. Therefore, we do a memory fence.
4022  __ dmb(ISH);
4023  __ Bind(slow_path->GetExitLabel());
4024}
4025
4026void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4027  LocationSummary* locations =
4028      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4029  locations->SetInAt(0, Location::RequiresRegister());
4030  locations->SetOut(Location::RequiresRegister());
4031}
4032
4033void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4034  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4035  codegen_->AddSlowPath(slow_path);
4036
4037  LocationSummary* locations = load->GetLocations();
4038  Register out = locations->Out().AsRegister<Register>();
4039  Register current_method = locations->InAt(0).AsRegister<Register>();
4040  __ LoadFromOffset(
4041      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4042  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4043  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4044  __ cmp(out, ShifterOperand(0));
4045  __ b(slow_path->GetEntryLabel(), EQ);
4046  __ Bind(slow_path->GetExitLabel());
4047}
4048
4049void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4050  LocationSummary* locations =
4051      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4052  locations->SetOut(Location::RequiresRegister());
4053}
4054
4055void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4056  Register out = load->GetLocations()->Out().AsRegister<Register>();
4057  int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4058  __ LoadFromOffset(kLoadWord, out, TR, offset);
4059  __ LoadImmediate(IP, 0);
4060  __ StoreToOffset(kStoreWord, IP, TR, offset);
4061}
4062
4063void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4064  LocationSummary* locations =
4065      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4066  InvokeRuntimeCallingConvention calling_convention;
4067  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4068}
4069
4070void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4071  codegen_->InvokeRuntime(
4072      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4073}
4074
4075void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4076  LocationSummary::CallKind call_kind = instruction->IsClassFinal()
4077      ? LocationSummary::kNoCall
4078      : LocationSummary::kCallOnSlowPath;
4079  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4080  locations->SetInAt(0, Location::RequiresRegister());
4081  locations->SetInAt(1, Location::RequiresRegister());
4082  // The out register is used as a temporary, so it overlaps with the inputs.
4083  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4084}
4085
4086void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4087  LocationSummary* locations = instruction->GetLocations();
4088  Register obj = locations->InAt(0).AsRegister<Register>();
4089  Register cls = locations->InAt(1).AsRegister<Register>();
4090  Register out = locations->Out().AsRegister<Register>();
4091  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4092  NearLabel done, zero;
4093  SlowPathCodeARM* slow_path = nullptr;
4094
4095  // Return 0 if `obj` is null.
4096  // avoid null check if we know obj is not null.
4097  if (instruction->MustDoNullCheck()) {
4098    __ CompareAndBranchIfZero(obj, &zero);
4099  }
4100  // Compare the class of `obj` with `cls`.
4101  __ LoadFromOffset(kLoadWord, out, obj, class_offset);
4102  __ cmp(out, ShifterOperand(cls));
4103  if (instruction->IsClassFinal()) {
4104    // Classes must be equal for the instanceof to succeed.
4105    __ b(&zero, NE);
4106    __ LoadImmediate(out, 1);
4107    __ b(&done);
4108  } else {
4109    // If the classes are not equal, we go into a slow path.
4110    DCHECK(locations->OnlyCallsOnSlowPath());
4111    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4112        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
4113    codegen_->AddSlowPath(slow_path);
4114    __ b(slow_path->GetEntryLabel(), NE);
4115    __ LoadImmediate(out, 1);
4116    __ b(&done);
4117  }
4118
4119  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
4120    __ Bind(&zero);
4121    __ LoadImmediate(out, 0);
4122  }
4123
4124  if (slow_path != nullptr) {
4125    __ Bind(slow_path->GetExitLabel());
4126  }
4127  __ Bind(&done);
4128}
4129
4130void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4131  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4132      instruction, LocationSummary::kCallOnSlowPath);
4133  locations->SetInAt(0, Location::RequiresRegister());
4134  locations->SetInAt(1, Location::RequiresRegister());
4135  locations->AddTemp(Location::RequiresRegister());
4136}
4137
4138void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4139  LocationSummary* locations = instruction->GetLocations();
4140  Register obj = locations->InAt(0).AsRegister<Register>();
4141  Register cls = locations->InAt(1).AsRegister<Register>();
4142  Register temp = locations->GetTemp(0).AsRegister<Register>();
4143  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4144
4145  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4146      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
4147  codegen_->AddSlowPath(slow_path);
4148
4149  NearLabel done;
4150  // avoid null check if we know obj is not null.
4151  if (instruction->MustDoNullCheck()) {
4152    __ CompareAndBranchIfZero(obj, &done);
4153  }
4154  // Compare the class of `obj` with `cls`.
4155  __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4156  __ cmp(temp, ShifterOperand(cls));
4157  __ b(slow_path->GetEntryLabel(), NE);
4158  __ Bind(slow_path->GetExitLabel());
4159  if (instruction->MustDoNullCheck()) {
4160    __ Bind(&done);
4161  }
4162}
4163
4164void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4165  LocationSummary* locations =
4166      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4167  InvokeRuntimeCallingConvention calling_convention;
4168  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4169}
4170
4171void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4172  codegen_->InvokeRuntime(instruction->IsEnter()
4173        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4174      instruction,
4175      instruction->GetDexPc(),
4176      nullptr);
4177}
4178
4179void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
4180void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
4181void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4182
4183void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4184  LocationSummary* locations =
4185      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4186  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4187         || instruction->GetResultType() == Primitive::kPrimLong);
4188  locations->SetInAt(0, Location::RequiresRegister());
4189  locations->SetInAt(1, Location::RequiresRegister());
4190  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4191}
4192
4193void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4194  HandleBitwiseOperation(instruction);
4195}
4196
4197void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4198  HandleBitwiseOperation(instruction);
4199}
4200
4201void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4202  HandleBitwiseOperation(instruction);
4203}
4204
4205void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4206  LocationSummary* locations = instruction->GetLocations();
4207
4208  if (instruction->GetResultType() == Primitive::kPrimInt) {
4209    Register first = locations->InAt(0).AsRegister<Register>();
4210    Register second = locations->InAt(1).AsRegister<Register>();
4211    Register out = locations->Out().AsRegister<Register>();
4212    if (instruction->IsAnd()) {
4213      __ and_(out, first, ShifterOperand(second));
4214    } else if (instruction->IsOr()) {
4215      __ orr(out, first, ShifterOperand(second));
4216    } else {
4217      DCHECK(instruction->IsXor());
4218      __ eor(out, first, ShifterOperand(second));
4219    }
4220  } else {
4221    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4222    Location first = locations->InAt(0);
4223    Location second = locations->InAt(1);
4224    Location out = locations->Out();
4225    if (instruction->IsAnd()) {
4226      __ and_(out.AsRegisterPairLow<Register>(),
4227              first.AsRegisterPairLow<Register>(),
4228              ShifterOperand(second.AsRegisterPairLow<Register>()));
4229      __ and_(out.AsRegisterPairHigh<Register>(),
4230              first.AsRegisterPairHigh<Register>(),
4231              ShifterOperand(second.AsRegisterPairHigh<Register>()));
4232    } else if (instruction->IsOr()) {
4233      __ orr(out.AsRegisterPairLow<Register>(),
4234             first.AsRegisterPairLow<Register>(),
4235             ShifterOperand(second.AsRegisterPairLow<Register>()));
4236      __ orr(out.AsRegisterPairHigh<Register>(),
4237             first.AsRegisterPairHigh<Register>(),
4238             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4239    } else {
4240      DCHECK(instruction->IsXor());
4241      __ eor(out.AsRegisterPairLow<Register>(),
4242             first.AsRegisterPairLow<Register>(),
4243             ShifterOperand(second.AsRegisterPairLow<Register>()));
4244      __ eor(out.AsRegisterPairHigh<Register>(),
4245             first.AsRegisterPairHigh<Register>(),
4246             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4247    }
4248  }
4249}
4250
4251void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
4252  DCHECK_EQ(temp, kArtMethodRegister);
4253
4254  // TODO: Implement all kinds of calls:
4255  // 1) boot -> boot
4256  // 2) app -> boot
4257  // 3) app -> app
4258  //
4259  // Currently we implement the app -> app logic, which looks up in the resolve cache.
4260
4261  if (invoke->IsStringInit()) {
4262    // temp = thread->string_init_entrypoint
4263    __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
4264    // LR = temp[offset_of_quick_compiled_code]
4265    __ LoadFromOffset(kLoadWord, LR, temp,
4266                      ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4267                          kArmWordSize).Int32Value());
4268    // LR()
4269    __ blx(LR);
4270  } else {
4271    // temp = method;
4272    LoadCurrentMethod(temp);
4273    if (!invoke->IsRecursive()) {
4274      // temp = temp->dex_cache_resolved_methods_;
4275      __ LoadFromOffset(
4276          kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
4277      // temp = temp[index_in_cache]
4278      __ LoadFromOffset(
4279          kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
4280      // LR = temp[offset_of_quick_compiled_code]
4281      __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4282          kArmWordSize).Int32Value());
4283      // LR()
4284      __ blx(LR);
4285    } else {
4286      __ bl(GetFrameEntryLabel());
4287    }
4288  }
4289
4290  DCHECK(!IsLeafMethod());
4291}
4292
4293void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4294  // Nothing to do, this should be removed during prepare for register allocator.
4295  UNUSED(instruction);
4296  LOG(FATAL) << "Unreachable";
4297}
4298
4299void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4300  // Nothing to do, this should be removed during prepare for register allocator.
4301  UNUSED(instruction);
4302  LOG(FATAL) << "Unreachable";
4303}
4304
4305}  // namespace arm
4306}  // namespace art
4307