code_generator_arm.cc revision e401d146407d61eeb99f8d6176b2ac13c4df1e33
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "gc/accounting/card_table.h"
24#include "intrinsics.h"
25#include "intrinsics_arm.h"
26#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "thread.h"
29#include "utils/arm/assembler_arm.h"
30#include "utils/arm/managed_register_arm.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35
36namespace arm {
37
38static bool ExpectedPairLayout(Location location) {
39  // We expected this for both core and fpu register pairs.
40  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
41}
42
43static constexpr int kCurrentMethodStackOffset = 0;
44static constexpr Register kMethodRegisterArgument = R0;
45
46// We unconditionally allocate R5 to ensure we can do long operations
47// with baseline.
48static constexpr Register kCoreSavedRegisterForBaseline = R5;
49static constexpr Register kCoreCalleeSaves[] =
50    { R5, R6, R7, R8, R10, R11, PC };
51static constexpr SRegister kFpuCalleeSaves[] =
52    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
53
54// D31 cannot be split into two S registers, and the register allocator only works on
55// S registers. Therefore there is no need to block it.
56static constexpr DRegister DTMP = D31;
57
58#define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())->
59#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
60
61class NullCheckSlowPathARM : public SlowPathCodeARM {
62 public:
63  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
64
65  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
66    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
67    __ Bind(GetEntryLabel());
68    arm_codegen->InvokeRuntime(
69        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
70  }
71
72 private:
73  HNullCheck* const instruction_;
74  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
75};
76
77class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
78 public:
79  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
80
81  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
82    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
83    __ Bind(GetEntryLabel());
84    arm_codegen->InvokeRuntime(
85        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
86  }
87
88 private:
89  HDivZeroCheck* const instruction_;
90  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
91};
92
93class SuspendCheckSlowPathARM : public SlowPathCodeARM {
94 public:
95  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
96      : instruction_(instruction), successor_(successor) {}
97
98  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
99    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
100    __ Bind(GetEntryLabel());
101    SaveLiveRegisters(codegen, instruction_->GetLocations());
102    arm_codegen->InvokeRuntime(
103        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
104    RestoreLiveRegisters(codegen, instruction_->GetLocations());
105    if (successor_ == nullptr) {
106      __ b(GetReturnLabel());
107    } else {
108      __ b(arm_codegen->GetLabelOf(successor_));
109    }
110  }
111
112  Label* GetReturnLabel() {
113    DCHECK(successor_ == nullptr);
114    return &return_label_;
115  }
116
117  HBasicBlock* GetSuccessor() const {
118    return successor_;
119  }
120
121 private:
122  HSuspendCheck* const instruction_;
123  // If not null, the block to branch to after the suspend check.
124  HBasicBlock* const successor_;
125
126  // If `successor_` is null, the label to branch to after the suspend check.
127  Label return_label_;
128
129  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
130};
131
132class BoundsCheckSlowPathARM : public SlowPathCodeARM {
133 public:
134  BoundsCheckSlowPathARM(HBoundsCheck* instruction,
135                         Location index_location,
136                         Location length_location)
137      : instruction_(instruction),
138        index_location_(index_location),
139        length_location_(length_location) {}
140
141  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
142    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
143    __ Bind(GetEntryLabel());
144    // We're moving two locations to locations that could overlap, so we need a parallel
145    // move resolver.
146    InvokeRuntimeCallingConvention calling_convention;
147    codegen->EmitParallelMoves(
148        index_location_,
149        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
150        Primitive::kPrimInt,
151        length_location_,
152        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
153        Primitive::kPrimInt);
154    arm_codegen->InvokeRuntime(
155        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
156  }
157
158 private:
159  HBoundsCheck* const instruction_;
160  const Location index_location_;
161  const Location length_location_;
162
163  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
164};
165
166class LoadClassSlowPathARM : public SlowPathCodeARM {
167 public:
168  LoadClassSlowPathARM(HLoadClass* cls,
169                       HInstruction* at,
170                       uint32_t dex_pc,
171                       bool do_clinit)
172      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
173    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
174  }
175
176  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
177    LocationSummary* locations = at_->GetLocations();
178
179    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
180    __ Bind(GetEntryLabel());
181    SaveLiveRegisters(codegen, locations);
182
183    InvokeRuntimeCallingConvention calling_convention;
184    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
185    int32_t entry_point_offset = do_clinit_
186        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
187        : QUICK_ENTRY_POINT(pInitializeType);
188    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
189
190    // Move the class to the desired location.
191    Location out = locations->Out();
192    if (out.IsValid()) {
193      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
194      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
195    }
196    RestoreLiveRegisters(codegen, locations);
197    __ b(GetExitLabel());
198  }
199
200 private:
201  // The class this slow path will load.
202  HLoadClass* const cls_;
203
204  // The instruction where this slow path is happening.
205  // (Might be the load class or an initialization check).
206  HInstruction* const at_;
207
208  // The dex PC of `at_`.
209  const uint32_t dex_pc_;
210
211  // Whether to initialize the class.
212  const bool do_clinit_;
213
214  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
215};
216
217class LoadStringSlowPathARM : public SlowPathCodeARM {
218 public:
219  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
220
221  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
222    LocationSummary* locations = instruction_->GetLocations();
223    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
224
225    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
226    __ Bind(GetEntryLabel());
227    SaveLiveRegisters(codegen, locations);
228
229    InvokeRuntimeCallingConvention calling_convention;
230    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
231    arm_codegen->InvokeRuntime(
232        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
233    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
234
235    RestoreLiveRegisters(codegen, locations);
236    __ b(GetExitLabel());
237  }
238
239 private:
240  HLoadString* const instruction_;
241
242  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
243};
244
245class TypeCheckSlowPathARM : public SlowPathCodeARM {
246 public:
247  TypeCheckSlowPathARM(HInstruction* instruction,
248                       Location class_to_check,
249                       Location object_class,
250                       uint32_t dex_pc)
251      : instruction_(instruction),
252        class_to_check_(class_to_check),
253        object_class_(object_class),
254        dex_pc_(dex_pc) {}
255
256  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
257    LocationSummary* locations = instruction_->GetLocations();
258    DCHECK(instruction_->IsCheckCast()
259           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
260
261    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
262    __ Bind(GetEntryLabel());
263    SaveLiveRegisters(codegen, locations);
264
265    // We're moving two locations to locations that could overlap, so we need a parallel
266    // move resolver.
267    InvokeRuntimeCallingConvention calling_convention;
268    codegen->EmitParallelMoves(
269        class_to_check_,
270        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
271        Primitive::kPrimNot,
272        object_class_,
273        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
274        Primitive::kPrimNot);
275
276    if (instruction_->IsInstanceOf()) {
277      arm_codegen->InvokeRuntime(
278          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
279      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
280    } else {
281      DCHECK(instruction_->IsCheckCast());
282      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
283    }
284
285    RestoreLiveRegisters(codegen, locations);
286    __ b(GetExitLabel());
287  }
288
289 private:
290  HInstruction* const instruction_;
291  const Location class_to_check_;
292  const Location object_class_;
293  uint32_t dex_pc_;
294
295  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
296};
297
298class DeoptimizationSlowPathARM : public SlowPathCodeARM {
299 public:
300  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
301    : instruction_(instruction) {}
302
303  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
304    __ Bind(GetEntryLabel());
305    SaveLiveRegisters(codegen, instruction_->GetLocations());
306    DCHECK(instruction_->IsDeoptimize());
307    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
308    uint32_t dex_pc = deoptimize->GetDexPc();
309    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
310    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
311  }
312
313 private:
314  HInstruction* const instruction_;
315  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
316};
317
318#undef __
319
320#undef __
321#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
322
323inline Condition ARMCondition(IfCondition cond) {
324  switch (cond) {
325    case kCondEQ: return EQ;
326    case kCondNE: return NE;
327    case kCondLT: return LT;
328    case kCondLE: return LE;
329    case kCondGT: return GT;
330    case kCondGE: return GE;
331    default:
332      LOG(FATAL) << "Unknown if condition";
333  }
334  return EQ;        // Unreachable.
335}
336
337inline Condition ARMOppositeCondition(IfCondition cond) {
338  switch (cond) {
339    case kCondEQ: return NE;
340    case kCondNE: return EQ;
341    case kCondLT: return GE;
342    case kCondLE: return GT;
343    case kCondGT: return LE;
344    case kCondGE: return LT;
345    default:
346      LOG(FATAL) << "Unknown if condition";
347  }
348  return EQ;        // Unreachable.
349}
350
351void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
352  stream << Register(reg);
353}
354
355void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
356  stream << SRegister(reg);
357}
358
359size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
360  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
361  return kArmWordSize;
362}
363
364size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
365  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
366  return kArmWordSize;
367}
368
369size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
370  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
371  return kArmWordSize;
372}
373
374size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
375  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
376  return kArmWordSize;
377}
378
379CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
380                                   const ArmInstructionSetFeatures& isa_features,
381                                   const CompilerOptions& compiler_options)
382    : CodeGenerator(graph,
383                    kNumberOfCoreRegisters,
384                    kNumberOfSRegisters,
385                    kNumberOfRegisterPairs,
386                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
387                                        arraysize(kCoreCalleeSaves)),
388                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
389                                        arraysize(kFpuCalleeSaves)),
390                    compiler_options),
391      block_labels_(graph->GetArena(), 0),
392      location_builder_(graph, this),
393      instruction_visitor_(graph, this),
394      move_resolver_(graph->GetArena(), this),
395      assembler_(false /* can_relocate_branches */),
396      isa_features_(isa_features) {
397  // Save the PC register to mimic Quick.
398  AddAllocatedRegister(Location::RegisterLocation(PC));
399}
400
401Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
402  switch (type) {
403    case Primitive::kPrimLong: {
404      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
405      ArmManagedRegister pair =
406          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
407      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
408      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
409
410      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
411      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
412      UpdateBlockedPairRegisters();
413      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
414    }
415
416    case Primitive::kPrimByte:
417    case Primitive::kPrimBoolean:
418    case Primitive::kPrimChar:
419    case Primitive::kPrimShort:
420    case Primitive::kPrimInt:
421    case Primitive::kPrimNot: {
422      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
423      // Block all register pairs that contain `reg`.
424      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
425        ArmManagedRegister current =
426            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
427        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
428          blocked_register_pairs_[i] = true;
429        }
430      }
431      return Location::RegisterLocation(reg);
432    }
433
434    case Primitive::kPrimFloat: {
435      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
436      return Location::FpuRegisterLocation(reg);
437    }
438
439    case Primitive::kPrimDouble: {
440      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
441      DCHECK_EQ(reg % 2, 0);
442      return Location::FpuRegisterPairLocation(reg, reg + 1);
443    }
444
445    case Primitive::kPrimVoid:
446      LOG(FATAL) << "Unreachable type " << type;
447  }
448
449  return Location();
450}
451
452void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
453  // Don't allocate the dalvik style register pair passing.
454  blocked_register_pairs_[R1_R2] = true;
455
456  // Stack register, LR and PC are always reserved.
457  blocked_core_registers_[SP] = true;
458  blocked_core_registers_[LR] = true;
459  blocked_core_registers_[PC] = true;
460
461  // Reserve thread register.
462  blocked_core_registers_[TR] = true;
463
464  // Reserve temp register.
465  blocked_core_registers_[IP] = true;
466
467  if (is_baseline) {
468    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
469      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
470    }
471
472    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
473
474    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
475      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
476    }
477  }
478
479  UpdateBlockedPairRegisters();
480}
481
482void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
483  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
484    ArmManagedRegister current =
485        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
486    if (blocked_core_registers_[current.AsRegisterPairLow()]
487        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
488      blocked_register_pairs_[i] = true;
489    }
490  }
491}
492
493InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
494      : HGraphVisitor(graph),
495        assembler_(codegen->GetAssembler()),
496        codegen_(codegen) {}
497
498void CodeGeneratorARM::ComputeSpillMask() {
499  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
500  // Save one extra register for baseline. Note that on thumb2, there is no easy
501  // instruction to restore just the PC, so this actually helps both baseline
502  // and non-baseline to save and restore at least two registers at entry and exit.
503  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
504  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
505  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
506  // We use vpush and vpop for saving and restoring floating point registers, which take
507  // a SRegister and the number of registers to save/restore after that SRegister. We
508  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
509  // but in the range.
510  if (fpu_spill_mask_ != 0) {
511    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
512    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
513    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
514      fpu_spill_mask_ |= (1 << i);
515    }
516  }
517}
518
519static dwarf::Reg DWARFReg(Register reg) {
520  return dwarf::Reg::ArmCore(static_cast<int>(reg));
521}
522
523static dwarf::Reg DWARFReg(SRegister reg) {
524  return dwarf::Reg::ArmFp(static_cast<int>(reg));
525}
526
527void CodeGeneratorARM::GenerateFrameEntry() {
528  bool skip_overflow_check =
529      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
530  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
531  __ Bind(&frame_entry_label_);
532
533  if (HasEmptyFrame()) {
534    return;
535  }
536
537  if (!skip_overflow_check) {
538    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
539    __ LoadFromOffset(kLoadWord, IP, IP, 0);
540    RecordPcInfo(nullptr, 0);
541  }
542
543  // PC is in the list of callee-save to mimic Quick, but we need to push
544  // LR at entry instead.
545  uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
546  __ PushList(push_mask);
547  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
548  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
549  if (fpu_spill_mask_ != 0) {
550    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
551    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
552    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
553    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
554  }
555  int adjust = GetFrameSize() - FrameEntrySpillSize();
556  __ AddConstant(SP, -adjust);
557  __ cfi().AdjustCFAOffset(adjust);
558  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
559}
560
561void CodeGeneratorARM::GenerateFrameExit() {
562  if (HasEmptyFrame()) {
563    __ bx(LR);
564    return;
565  }
566  __ cfi().RememberState();
567  int adjust = GetFrameSize() - FrameEntrySpillSize();
568  __ AddConstant(SP, adjust);
569  __ cfi().AdjustCFAOffset(-adjust);
570  if (fpu_spill_mask_ != 0) {
571    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
572    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
573    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
574    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
575  }
576  __ PopList(core_spill_mask_);
577  __ cfi().RestoreState();
578  __ cfi().DefCFAOffset(GetFrameSize());
579}
580
581void CodeGeneratorARM::Bind(HBasicBlock* block) {
582  __ Bind(GetLabelOf(block));
583}
584
585Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
586  switch (load->GetType()) {
587    case Primitive::kPrimLong:
588    case Primitive::kPrimDouble:
589      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
590
591    case Primitive::kPrimInt:
592    case Primitive::kPrimNot:
593    case Primitive::kPrimFloat:
594      return Location::StackSlot(GetStackSlot(load->GetLocal()));
595
596    case Primitive::kPrimBoolean:
597    case Primitive::kPrimByte:
598    case Primitive::kPrimChar:
599    case Primitive::kPrimShort:
600    case Primitive::kPrimVoid:
601      LOG(FATAL) << "Unexpected type " << load->GetType();
602      UNREACHABLE();
603  }
604
605  LOG(FATAL) << "Unreachable";
606  UNREACHABLE();
607}
608
609Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
610  switch (type) {
611    case Primitive::kPrimBoolean:
612    case Primitive::kPrimByte:
613    case Primitive::kPrimChar:
614    case Primitive::kPrimShort:
615    case Primitive::kPrimInt:
616    case Primitive::kPrimNot: {
617      uint32_t index = gp_index_++;
618      uint32_t stack_index = stack_index_++;
619      if (index < calling_convention.GetNumberOfRegisters()) {
620        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
621      } else {
622        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
623      }
624    }
625
626    case Primitive::kPrimLong: {
627      uint32_t index = gp_index_;
628      uint32_t stack_index = stack_index_;
629      gp_index_ += 2;
630      stack_index_ += 2;
631      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
632        if (calling_convention.GetRegisterAt(index) == R1) {
633          // Skip R1, and use R2_R3 instead.
634          gp_index_++;
635          index++;
636        }
637      }
638      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
639        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
640                  calling_convention.GetRegisterAt(index + 1));
641        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
642                                              calling_convention.GetRegisterAt(index + 1));
643      } else {
644        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
645      }
646    }
647
648    case Primitive::kPrimFloat: {
649      uint32_t stack_index = stack_index_++;
650      if (float_index_ % 2 == 0) {
651        float_index_ = std::max(double_index_, float_index_);
652      }
653      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
654        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
655      } else {
656        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
657      }
658    }
659
660    case Primitive::kPrimDouble: {
661      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
662      uint32_t stack_index = stack_index_;
663      stack_index_ += 2;
664      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
665        uint32_t index = double_index_;
666        double_index_ += 2;
667        Location result = Location::FpuRegisterPairLocation(
668          calling_convention.GetFpuRegisterAt(index),
669          calling_convention.GetFpuRegisterAt(index + 1));
670        DCHECK(ExpectedPairLayout(result));
671        return result;
672      } else {
673        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
674      }
675    }
676
677    case Primitive::kPrimVoid:
678      LOG(FATAL) << "Unexpected parameter type " << type;
679      break;
680  }
681  return Location();
682}
683
684Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) {
685  switch (type) {
686    case Primitive::kPrimBoolean:
687    case Primitive::kPrimByte:
688    case Primitive::kPrimChar:
689    case Primitive::kPrimShort:
690    case Primitive::kPrimInt:
691    case Primitive::kPrimNot: {
692      return Location::RegisterLocation(R0);
693    }
694
695    case Primitive::kPrimFloat: {
696      return Location::FpuRegisterLocation(S0);
697    }
698
699    case Primitive::kPrimLong: {
700      return Location::RegisterPairLocation(R0, R1);
701    }
702
703    case Primitive::kPrimDouble: {
704      return Location::FpuRegisterPairLocation(S0, S1);
705    }
706
707    case Primitive::kPrimVoid:
708      return Location();
709  }
710  UNREACHABLE();
711}
712
713void CodeGeneratorARM::Move32(Location destination, Location source) {
714  if (source.Equals(destination)) {
715    return;
716  }
717  if (destination.IsRegister()) {
718    if (source.IsRegister()) {
719      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
720    } else if (source.IsFpuRegister()) {
721      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
722    } else {
723      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
724    }
725  } else if (destination.IsFpuRegister()) {
726    if (source.IsRegister()) {
727      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
728    } else if (source.IsFpuRegister()) {
729      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
730    } else {
731      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
732    }
733  } else {
734    DCHECK(destination.IsStackSlot()) << destination;
735    if (source.IsRegister()) {
736      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
737    } else if (source.IsFpuRegister()) {
738      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
739    } else {
740      DCHECK(source.IsStackSlot()) << source;
741      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
742      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
743    }
744  }
745}
746
747void CodeGeneratorARM::Move64(Location destination, Location source) {
748  if (source.Equals(destination)) {
749    return;
750  }
751  if (destination.IsRegisterPair()) {
752    if (source.IsRegisterPair()) {
753      EmitParallelMoves(
754          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
755          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
756          Primitive::kPrimInt,
757          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
758          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
759          Primitive::kPrimInt);
760    } else if (source.IsFpuRegister()) {
761      UNIMPLEMENTED(FATAL);
762    } else {
763      DCHECK(source.IsDoubleStackSlot());
764      DCHECK(ExpectedPairLayout(destination));
765      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
766                        SP, source.GetStackIndex());
767    }
768  } else if (destination.IsFpuRegisterPair()) {
769    if (source.IsDoubleStackSlot()) {
770      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
771                         SP,
772                         source.GetStackIndex());
773    } else {
774      UNIMPLEMENTED(FATAL);
775    }
776  } else {
777    DCHECK(destination.IsDoubleStackSlot());
778    if (source.IsRegisterPair()) {
779      // No conflict possible, so just do the moves.
780      if (source.AsRegisterPairLow<Register>() == R1) {
781        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
782        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
783        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
784      } else {
785        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
786                         SP, destination.GetStackIndex());
787      }
788    } else if (source.IsFpuRegisterPair()) {
789      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
790                        SP,
791                        destination.GetStackIndex());
792    } else {
793      DCHECK(source.IsDoubleStackSlot());
794      EmitParallelMoves(
795          Location::StackSlot(source.GetStackIndex()),
796          Location::StackSlot(destination.GetStackIndex()),
797          Primitive::kPrimInt,
798          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
799          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
800          Primitive::kPrimInt);
801    }
802  }
803}
804
805void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
806  LocationSummary* locations = instruction->GetLocations();
807  if (instruction->IsCurrentMethod()) {
808    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
809  } else if (locations != nullptr && locations->Out().Equals(location)) {
810    return;
811  } else if (locations != nullptr && locations->Out().IsConstant()) {
812    HConstant* const_to_move = locations->Out().GetConstant();
813    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
814      int32_t value = GetInt32ValueOf(const_to_move);
815      if (location.IsRegister()) {
816        __ LoadImmediate(location.AsRegister<Register>(), value);
817      } else {
818        DCHECK(location.IsStackSlot());
819        __ LoadImmediate(IP, value);
820        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
821      }
822    } else {
823      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
824      int64_t value = const_to_move->AsLongConstant()->GetValue();
825      if (location.IsRegisterPair()) {
826        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
827        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
828      } else {
829        DCHECK(location.IsDoubleStackSlot());
830        __ LoadImmediate(IP, Low32Bits(value));
831        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
832        __ LoadImmediate(IP, High32Bits(value));
833        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
834      }
835    }
836  } else if (instruction->IsLoadLocal()) {
837    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
838    switch (instruction->GetType()) {
839      case Primitive::kPrimBoolean:
840      case Primitive::kPrimByte:
841      case Primitive::kPrimChar:
842      case Primitive::kPrimShort:
843      case Primitive::kPrimInt:
844      case Primitive::kPrimNot:
845      case Primitive::kPrimFloat:
846        Move32(location, Location::StackSlot(stack_slot));
847        break;
848
849      case Primitive::kPrimLong:
850      case Primitive::kPrimDouble:
851        Move64(location, Location::DoubleStackSlot(stack_slot));
852        break;
853
854      default:
855        LOG(FATAL) << "Unexpected type " << instruction->GetType();
856    }
857  } else if (instruction->IsTemporary()) {
858    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
859    if (temp_location.IsStackSlot()) {
860      Move32(location, temp_location);
861    } else {
862      DCHECK(temp_location.IsDoubleStackSlot());
863      Move64(location, temp_location);
864    }
865  } else {
866    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
867    switch (instruction->GetType()) {
868      case Primitive::kPrimBoolean:
869      case Primitive::kPrimByte:
870      case Primitive::kPrimChar:
871      case Primitive::kPrimShort:
872      case Primitive::kPrimNot:
873      case Primitive::kPrimInt:
874      case Primitive::kPrimFloat:
875        Move32(location, locations->Out());
876        break;
877
878      case Primitive::kPrimLong:
879      case Primitive::kPrimDouble:
880        Move64(location, locations->Out());
881        break;
882
883      default:
884        LOG(FATAL) << "Unexpected type " << instruction->GetType();
885    }
886  }
887}
888
889void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
890                                     HInstruction* instruction,
891                                     uint32_t dex_pc,
892                                     SlowPathCode* slow_path) {
893  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
894  __ blx(LR);
895  RecordPcInfo(instruction, dex_pc, slow_path);
896  DCHECK(instruction->IsSuspendCheck()
897      || instruction->IsBoundsCheck()
898      || instruction->IsNullCheck()
899      || instruction->IsDivZeroCheck()
900      || instruction->GetLocations()->CanCall()
901      || !IsLeafMethod());
902}
903
904void LocationsBuilderARM::VisitGoto(HGoto* got) {
905  got->SetLocations(nullptr);
906}
907
908void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
909  HBasicBlock* successor = got->GetSuccessor();
910  DCHECK(!successor->IsExitBlock());
911
912  HBasicBlock* block = got->GetBlock();
913  HInstruction* previous = got->GetPrevious();
914
915  HLoopInformation* info = block->GetLoopInformation();
916  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
917    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
918    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
919    return;
920  }
921
922  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
923    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
924  }
925  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
926    __ b(codegen_->GetLabelOf(successor));
927  }
928}
929
930void LocationsBuilderARM::VisitExit(HExit* exit) {
931  exit->SetLocations(nullptr);
932}
933
934void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
935  UNUSED(exit);
936}
937
938void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
939                                                        Label* true_target,
940                                                        Label* false_target,
941                                                        Label* always_true_target) {
942  HInstruction* cond = instruction->InputAt(0);
943  if (cond->IsIntConstant()) {
944    // Constant condition, statically compared against 1.
945    int32_t cond_value = cond->AsIntConstant()->GetValue();
946    if (cond_value == 1) {
947      if (always_true_target != nullptr) {
948        __ b(always_true_target);
949      }
950      return;
951    } else {
952      DCHECK_EQ(cond_value, 0);
953    }
954  } else {
955    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
956      // Condition has been materialized, compare the output to 0
957      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
958      __ cmp(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
959             ShifterOperand(0));
960      __ b(true_target, NE);
961    } else {
962      // Condition has not been materialized, use its inputs as the
963      // comparison and its condition as the branch condition.
964      LocationSummary* locations = cond->GetLocations();
965      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
966      Register left = locations->InAt(0).AsRegister<Register>();
967      if (locations->InAt(1).IsRegister()) {
968        __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
969      } else {
970        DCHECK(locations->InAt(1).IsConstant());
971        HConstant* constant = locations->InAt(1).GetConstant();
972        int32_t value = CodeGenerator::GetInt32ValueOf(constant);
973        ShifterOperand operand;
974        if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
975          __ cmp(left, operand);
976        } else {
977          Register temp = IP;
978          __ LoadImmediate(temp, value);
979          __ cmp(left, ShifterOperand(temp));
980        }
981      }
982      __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
983    }
984  }
985  if (false_target != nullptr) {
986    __ b(false_target);
987  }
988}
989
990void LocationsBuilderARM::VisitIf(HIf* if_instr) {
991  LocationSummary* locations =
992      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
993  HInstruction* cond = if_instr->InputAt(0);
994  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
995    locations->SetInAt(0, Location::RequiresRegister());
996  }
997}
998
999void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1000  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1001  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1002  Label* always_true_target = true_target;
1003  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1004                                if_instr->IfTrueSuccessor())) {
1005    always_true_target = nullptr;
1006  }
1007  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1008                                if_instr->IfFalseSuccessor())) {
1009    false_target = nullptr;
1010  }
1011  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1012}
1013
1014void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1015  LocationSummary* locations = new (GetGraph()->GetArena())
1016      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1017  HInstruction* cond = deoptimize->InputAt(0);
1018  DCHECK(cond->IsCondition());
1019  if (cond->AsCondition()->NeedsMaterialization()) {
1020    locations->SetInAt(0, Location::RequiresRegister());
1021  }
1022}
1023
1024void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1025  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
1026      DeoptimizationSlowPathARM(deoptimize);
1027  codegen_->AddSlowPath(slow_path);
1028  Label* slow_path_entry = slow_path->GetEntryLabel();
1029  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1030}
1031
1032void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1033  LocationSummary* locations =
1034      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1035  locations->SetInAt(0, Location::RequiresRegister());
1036  locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1037  if (cond->NeedsMaterialization()) {
1038    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1039  }
1040}
1041
1042void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1043  if (!cond->NeedsMaterialization()) return;
1044  LocationSummary* locations = cond->GetLocations();
1045  Register left = locations->InAt(0).AsRegister<Register>();
1046
1047  if (locations->InAt(1).IsRegister()) {
1048    __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1049  } else {
1050    DCHECK(locations->InAt(1).IsConstant());
1051    int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
1052    ShifterOperand operand;
1053    if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1054      __ cmp(left, operand);
1055    } else {
1056      Register temp = IP;
1057      __ LoadImmediate(temp, value);
1058      __ cmp(left, ShifterOperand(temp));
1059    }
1060  }
1061  __ it(ARMCondition(cond->GetCondition()), kItElse);
1062  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1063         ARMCondition(cond->GetCondition()));
1064  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1065         ARMOppositeCondition(cond->GetCondition()));
1066}
1067
1068void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1069  VisitCondition(comp);
1070}
1071
1072void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1073  VisitCondition(comp);
1074}
1075
1076void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1077  VisitCondition(comp);
1078}
1079
1080void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1081  VisitCondition(comp);
1082}
1083
1084void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1085  VisitCondition(comp);
1086}
1087
1088void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1089  VisitCondition(comp);
1090}
1091
1092void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1093  VisitCondition(comp);
1094}
1095
1096void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1097  VisitCondition(comp);
1098}
1099
1100void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1101  VisitCondition(comp);
1102}
1103
1104void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1105  VisitCondition(comp);
1106}
1107
1108void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1109  VisitCondition(comp);
1110}
1111
1112void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1113  VisitCondition(comp);
1114}
1115
1116void LocationsBuilderARM::VisitLocal(HLocal* local) {
1117  local->SetLocations(nullptr);
1118}
1119
1120void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1121  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1122}
1123
1124void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1125  load->SetLocations(nullptr);
1126}
1127
1128void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1129  // Nothing to do, this is driven by the code generator.
1130  UNUSED(load);
1131}
1132
1133void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1134  LocationSummary* locations =
1135      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1136  switch (store->InputAt(1)->GetType()) {
1137    case Primitive::kPrimBoolean:
1138    case Primitive::kPrimByte:
1139    case Primitive::kPrimChar:
1140    case Primitive::kPrimShort:
1141    case Primitive::kPrimInt:
1142    case Primitive::kPrimNot:
1143    case Primitive::kPrimFloat:
1144      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1145      break;
1146
1147    case Primitive::kPrimLong:
1148    case Primitive::kPrimDouble:
1149      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1150      break;
1151
1152    default:
1153      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1154  }
1155}
1156
1157void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1158  UNUSED(store);
1159}
1160
1161void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1162  LocationSummary* locations =
1163      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1164  locations->SetOut(Location::ConstantLocation(constant));
1165}
1166
1167void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1168  // Will be generated at use site.
1169  UNUSED(constant);
1170}
1171
1172void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1173  LocationSummary* locations =
1174      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1175  locations->SetOut(Location::ConstantLocation(constant));
1176}
1177
1178void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1179  // Will be generated at use site.
1180  UNUSED(constant);
1181}
1182
1183void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1184  LocationSummary* locations =
1185      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1186  locations->SetOut(Location::ConstantLocation(constant));
1187}
1188
1189void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1190  // Will be generated at use site.
1191  UNUSED(constant);
1192}
1193
1194void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1195  LocationSummary* locations =
1196      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1197  locations->SetOut(Location::ConstantLocation(constant));
1198}
1199
1200void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1201  // Will be generated at use site.
1202  UNUSED(constant);
1203}
1204
1205void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1206  LocationSummary* locations =
1207      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1208  locations->SetOut(Location::ConstantLocation(constant));
1209}
1210
1211void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1212  // Will be generated at use site.
1213  UNUSED(constant);
1214}
1215
1216void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1217  memory_barrier->SetLocations(nullptr);
1218}
1219
1220void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1221  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1222}
1223
1224void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1225  ret->SetLocations(nullptr);
1226}
1227
1228void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1229  UNUSED(ret);
1230  codegen_->GenerateFrameExit();
1231}
1232
1233void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1234  LocationSummary* locations =
1235      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1236  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1237}
1238
1239void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1240  UNUSED(ret);
1241  codegen_->GenerateFrameExit();
1242}
1243
1244void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1245  // When we do not run baseline, explicit clinit checks triggered by static
1246  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1247  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1248
1249  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1250                                         codegen_->GetInstructionSetFeatures());
1251  if (intrinsic.TryDispatch(invoke)) {
1252    return;
1253  }
1254
1255  HandleInvoke(invoke);
1256}
1257
1258void CodeGeneratorARM::LoadCurrentMethod(Register reg) {
1259  DCHECK(RequiresCurrentMethod());
1260  __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
1261}
1262
1263static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1264  if (invoke->GetLocations()->Intrinsified()) {
1265    IntrinsicCodeGeneratorARM intrinsic(codegen);
1266    intrinsic.Dispatch(invoke);
1267    return true;
1268  }
1269  return false;
1270}
1271
1272void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1273  // When we do not run baseline, explicit clinit checks triggered by static
1274  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1275  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1276
1277  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1278    return;
1279  }
1280
1281  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1282
1283  codegen_->GenerateStaticOrDirectCall(invoke, temp);
1284  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1285}
1286
1287void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1288  LocationSummary* locations =
1289      new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
1290  locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
1291
1292  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1293  for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
1294    HInstruction* input = invoke->InputAt(i);
1295    locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
1296  }
1297
1298  locations->SetOut(calling_convention_visitor.GetReturnLocation(invoke->GetType()));
1299}
1300
1301void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1302  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1303                                         codegen_->GetInstructionSetFeatures());
1304  if (intrinsic.TryDispatch(invoke)) {
1305    return;
1306  }
1307
1308  HandleInvoke(invoke);
1309}
1310
1311void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1312  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1313    return;
1314  }
1315
1316  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1317  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
1318      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
1319  LocationSummary* locations = invoke->GetLocations();
1320  Location receiver = locations->InAt(0);
1321  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1322  // temp = object->GetClass();
1323  if (receiver.IsStackSlot()) {
1324    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1325    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1326  } else {
1327    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1328  }
1329  codegen_->MaybeRecordImplicitNullCheck(invoke);
1330  // temp = temp->GetMethodAt(method_offset);
1331  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1332      kArmWordSize).Int32Value();
1333  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1334  // LR = temp->GetEntryPoint();
1335  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1336  // LR();
1337  __ blx(LR);
1338  DCHECK(!codegen_->IsLeafMethod());
1339  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1340}
1341
1342void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1343  HandleInvoke(invoke);
1344  // Add the hidden argument.
1345  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1346}
1347
1348void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1349  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1350  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1351  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1352      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1353  LocationSummary* locations = invoke->GetLocations();
1354  Location receiver = locations->InAt(0);
1355  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1356
1357  // Set the hidden argument.
1358  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1359                   invoke->GetDexMethodIndex());
1360
1361  // temp = object->GetClass();
1362  if (receiver.IsStackSlot()) {
1363    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1364    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1365  } else {
1366    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1367  }
1368  codegen_->MaybeRecordImplicitNullCheck(invoke);
1369  // temp = temp->GetImtEntryAt(method_offset);
1370  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1371      kArmWordSize).Int32Value();
1372  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1373  // LR = temp->GetEntryPoint();
1374  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1375  // LR();
1376  __ blx(LR);
1377  DCHECK(!codegen_->IsLeafMethod());
1378  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1379}
1380
1381void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1382  LocationSummary* locations =
1383      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1384  switch (neg->GetResultType()) {
1385    case Primitive::kPrimInt: {
1386      locations->SetInAt(0, Location::RequiresRegister());
1387      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1388      break;
1389    }
1390    case Primitive::kPrimLong: {
1391      locations->SetInAt(0, Location::RequiresRegister());
1392      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1393      break;
1394    }
1395
1396    case Primitive::kPrimFloat:
1397    case Primitive::kPrimDouble:
1398      locations->SetInAt(0, Location::RequiresFpuRegister());
1399      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1400      break;
1401
1402    default:
1403      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1404  }
1405}
1406
1407void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1408  LocationSummary* locations = neg->GetLocations();
1409  Location out = locations->Out();
1410  Location in = locations->InAt(0);
1411  switch (neg->GetResultType()) {
1412    case Primitive::kPrimInt:
1413      DCHECK(in.IsRegister());
1414      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1415      break;
1416
1417    case Primitive::kPrimLong:
1418      DCHECK(in.IsRegisterPair());
1419      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1420      __ rsbs(out.AsRegisterPairLow<Register>(),
1421              in.AsRegisterPairLow<Register>(),
1422              ShifterOperand(0));
1423      // We cannot emit an RSC (Reverse Subtract with Carry)
1424      // instruction here, as it does not exist in the Thumb-2
1425      // instruction set.  We use the following approach
1426      // using SBC and SUB instead.
1427      //
1428      // out.hi = -C
1429      __ sbc(out.AsRegisterPairHigh<Register>(),
1430             out.AsRegisterPairHigh<Register>(),
1431             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1432      // out.hi = out.hi - in.hi
1433      __ sub(out.AsRegisterPairHigh<Register>(),
1434             out.AsRegisterPairHigh<Register>(),
1435             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1436      break;
1437
1438    case Primitive::kPrimFloat:
1439      DCHECK(in.IsFpuRegister());
1440      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1441      break;
1442
1443    case Primitive::kPrimDouble:
1444      DCHECK(in.IsFpuRegisterPair());
1445      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1446               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1447      break;
1448
1449    default:
1450      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1451  }
1452}
1453
1454void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1455  Primitive::Type result_type = conversion->GetResultType();
1456  Primitive::Type input_type = conversion->GetInputType();
1457  DCHECK_NE(result_type, input_type);
1458
1459  // The float-to-long and double-to-long type conversions rely on a
1460  // call to the runtime.
1461  LocationSummary::CallKind call_kind =
1462      ((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1463       && result_type == Primitive::kPrimLong)
1464      ? LocationSummary::kCall
1465      : LocationSummary::kNoCall;
1466  LocationSummary* locations =
1467      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1468
1469  // The Java language does not allow treating boolean as an integral type but
1470  // our bit representation makes it safe.
1471
1472  switch (result_type) {
1473    case Primitive::kPrimByte:
1474      switch (input_type) {
1475        case Primitive::kPrimBoolean:
1476          // Boolean input is a result of code transformations.
1477        case Primitive::kPrimShort:
1478        case Primitive::kPrimInt:
1479        case Primitive::kPrimChar:
1480          // Processing a Dex `int-to-byte' instruction.
1481          locations->SetInAt(0, Location::RequiresRegister());
1482          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1483          break;
1484
1485        default:
1486          LOG(FATAL) << "Unexpected type conversion from " << input_type
1487                     << " to " << result_type;
1488      }
1489      break;
1490
1491    case Primitive::kPrimShort:
1492      switch (input_type) {
1493        case Primitive::kPrimBoolean:
1494          // Boolean input is a result of code transformations.
1495        case Primitive::kPrimByte:
1496        case Primitive::kPrimInt:
1497        case Primitive::kPrimChar:
1498          // Processing a Dex `int-to-short' instruction.
1499          locations->SetInAt(0, Location::RequiresRegister());
1500          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1501          break;
1502
1503        default:
1504          LOG(FATAL) << "Unexpected type conversion from " << input_type
1505                     << " to " << result_type;
1506      }
1507      break;
1508
1509    case Primitive::kPrimInt:
1510      switch (input_type) {
1511        case Primitive::kPrimLong:
1512          // Processing a Dex `long-to-int' instruction.
1513          locations->SetInAt(0, Location::Any());
1514          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1515          break;
1516
1517        case Primitive::kPrimFloat:
1518          // Processing a Dex `float-to-int' instruction.
1519          locations->SetInAt(0, Location::RequiresFpuRegister());
1520          locations->SetOut(Location::RequiresRegister());
1521          locations->AddTemp(Location::RequiresFpuRegister());
1522          break;
1523
1524        case Primitive::kPrimDouble:
1525          // Processing a Dex `double-to-int' instruction.
1526          locations->SetInAt(0, Location::RequiresFpuRegister());
1527          locations->SetOut(Location::RequiresRegister());
1528          locations->AddTemp(Location::RequiresFpuRegister());
1529          break;
1530
1531        default:
1532          LOG(FATAL) << "Unexpected type conversion from " << input_type
1533                     << " to " << result_type;
1534      }
1535      break;
1536
1537    case Primitive::kPrimLong:
1538      switch (input_type) {
1539        case Primitive::kPrimBoolean:
1540          // Boolean input is a result of code transformations.
1541        case Primitive::kPrimByte:
1542        case Primitive::kPrimShort:
1543        case Primitive::kPrimInt:
1544        case Primitive::kPrimChar:
1545          // Processing a Dex `int-to-long' instruction.
1546          locations->SetInAt(0, Location::RequiresRegister());
1547          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1548          break;
1549
1550        case Primitive::kPrimFloat: {
1551          // Processing a Dex `float-to-long' instruction.
1552          InvokeRuntimeCallingConvention calling_convention;
1553          locations->SetInAt(0, Location::FpuRegisterLocation(
1554              calling_convention.GetFpuRegisterAt(0)));
1555          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1556          break;
1557        }
1558
1559        case Primitive::kPrimDouble: {
1560          // Processing a Dex `double-to-long' instruction.
1561          InvokeRuntimeCallingConvention calling_convention;
1562          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1563              calling_convention.GetFpuRegisterAt(0),
1564              calling_convention.GetFpuRegisterAt(1)));
1565          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1566          break;
1567        }
1568
1569        default:
1570          LOG(FATAL) << "Unexpected type conversion from " << input_type
1571                     << " to " << result_type;
1572      }
1573      break;
1574
1575    case Primitive::kPrimChar:
1576      switch (input_type) {
1577        case Primitive::kPrimBoolean:
1578          // Boolean input is a result of code transformations.
1579        case Primitive::kPrimByte:
1580        case Primitive::kPrimShort:
1581        case Primitive::kPrimInt:
1582          // Processing a Dex `int-to-char' instruction.
1583          locations->SetInAt(0, Location::RequiresRegister());
1584          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1585          break;
1586
1587        default:
1588          LOG(FATAL) << "Unexpected type conversion from " << input_type
1589                     << " to " << result_type;
1590      }
1591      break;
1592
1593    case Primitive::kPrimFloat:
1594      switch (input_type) {
1595        case Primitive::kPrimBoolean:
1596          // Boolean input is a result of code transformations.
1597        case Primitive::kPrimByte:
1598        case Primitive::kPrimShort:
1599        case Primitive::kPrimInt:
1600        case Primitive::kPrimChar:
1601          // Processing a Dex `int-to-float' instruction.
1602          locations->SetInAt(0, Location::RequiresRegister());
1603          locations->SetOut(Location::RequiresFpuRegister());
1604          break;
1605
1606        case Primitive::kPrimLong:
1607          // Processing a Dex `long-to-float' instruction.
1608          locations->SetInAt(0, Location::RequiresRegister());
1609          locations->SetOut(Location::RequiresFpuRegister());
1610          locations->AddTemp(Location::RequiresRegister());
1611          locations->AddTemp(Location::RequiresRegister());
1612          locations->AddTemp(Location::RequiresFpuRegister());
1613          locations->AddTemp(Location::RequiresFpuRegister());
1614          break;
1615
1616        case Primitive::kPrimDouble:
1617          // Processing a Dex `double-to-float' instruction.
1618          locations->SetInAt(0, Location::RequiresFpuRegister());
1619          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1620          break;
1621
1622        default:
1623          LOG(FATAL) << "Unexpected type conversion from " << input_type
1624                     << " to " << result_type;
1625      };
1626      break;
1627
1628    case Primitive::kPrimDouble:
1629      switch (input_type) {
1630        case Primitive::kPrimBoolean:
1631          // Boolean input is a result of code transformations.
1632        case Primitive::kPrimByte:
1633        case Primitive::kPrimShort:
1634        case Primitive::kPrimInt:
1635        case Primitive::kPrimChar:
1636          // Processing a Dex `int-to-double' instruction.
1637          locations->SetInAt(0, Location::RequiresRegister());
1638          locations->SetOut(Location::RequiresFpuRegister());
1639          break;
1640
1641        case Primitive::kPrimLong:
1642          // Processing a Dex `long-to-double' instruction.
1643          locations->SetInAt(0, Location::RequiresRegister());
1644          locations->SetOut(Location::RequiresFpuRegister());
1645          locations->AddTemp(Location::RequiresRegister());
1646          locations->AddTemp(Location::RequiresRegister());
1647          locations->AddTemp(Location::RequiresFpuRegister());
1648          break;
1649
1650        case Primitive::kPrimFloat:
1651          // Processing a Dex `float-to-double' instruction.
1652          locations->SetInAt(0, Location::RequiresFpuRegister());
1653          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1654          break;
1655
1656        default:
1657          LOG(FATAL) << "Unexpected type conversion from " << input_type
1658                     << " to " << result_type;
1659      };
1660      break;
1661
1662    default:
1663      LOG(FATAL) << "Unexpected type conversion from " << input_type
1664                 << " to " << result_type;
1665  }
1666}
1667
1668void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1669  LocationSummary* locations = conversion->GetLocations();
1670  Location out = locations->Out();
1671  Location in = locations->InAt(0);
1672  Primitive::Type result_type = conversion->GetResultType();
1673  Primitive::Type input_type = conversion->GetInputType();
1674  DCHECK_NE(result_type, input_type);
1675  switch (result_type) {
1676    case Primitive::kPrimByte:
1677      switch (input_type) {
1678        case Primitive::kPrimBoolean:
1679          // Boolean input is a result of code transformations.
1680        case Primitive::kPrimShort:
1681        case Primitive::kPrimInt:
1682        case Primitive::kPrimChar:
1683          // Processing a Dex `int-to-byte' instruction.
1684          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
1685          break;
1686
1687        default:
1688          LOG(FATAL) << "Unexpected type conversion from " << input_type
1689                     << " to " << result_type;
1690      }
1691      break;
1692
1693    case Primitive::kPrimShort:
1694      switch (input_type) {
1695        case Primitive::kPrimBoolean:
1696          // Boolean input is a result of code transformations.
1697        case Primitive::kPrimByte:
1698        case Primitive::kPrimInt:
1699        case Primitive::kPrimChar:
1700          // Processing a Dex `int-to-short' instruction.
1701          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1702          break;
1703
1704        default:
1705          LOG(FATAL) << "Unexpected type conversion from " << input_type
1706                     << " to " << result_type;
1707      }
1708      break;
1709
1710    case Primitive::kPrimInt:
1711      switch (input_type) {
1712        case Primitive::kPrimLong:
1713          // Processing a Dex `long-to-int' instruction.
1714          DCHECK(out.IsRegister());
1715          if (in.IsRegisterPair()) {
1716            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
1717          } else if (in.IsDoubleStackSlot()) {
1718            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
1719          } else {
1720            DCHECK(in.IsConstant());
1721            DCHECK(in.GetConstant()->IsLongConstant());
1722            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
1723            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
1724          }
1725          break;
1726
1727        case Primitive::kPrimFloat: {
1728          // Processing a Dex `float-to-int' instruction.
1729          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1730          __ vmovs(temp, in.AsFpuRegister<SRegister>());
1731          __ vcvtis(temp, temp);
1732          __ vmovrs(out.AsRegister<Register>(), temp);
1733          break;
1734        }
1735
1736        case Primitive::kPrimDouble: {
1737          // Processing a Dex `double-to-int' instruction.
1738          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1739          DRegister temp_d = FromLowSToD(temp_s);
1740          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1741          __ vcvtid(temp_s, temp_d);
1742          __ vmovrs(out.AsRegister<Register>(), temp_s);
1743          break;
1744        }
1745
1746        default:
1747          LOG(FATAL) << "Unexpected type conversion from " << input_type
1748                     << " to " << result_type;
1749      }
1750      break;
1751
1752    case Primitive::kPrimLong:
1753      switch (input_type) {
1754        case Primitive::kPrimBoolean:
1755          // Boolean input is a result of code transformations.
1756        case Primitive::kPrimByte:
1757        case Primitive::kPrimShort:
1758        case Primitive::kPrimInt:
1759        case Primitive::kPrimChar:
1760          // Processing a Dex `int-to-long' instruction.
1761          DCHECK(out.IsRegisterPair());
1762          DCHECK(in.IsRegister());
1763          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
1764          // Sign extension.
1765          __ Asr(out.AsRegisterPairHigh<Register>(),
1766                 out.AsRegisterPairLow<Register>(),
1767                 31);
1768          break;
1769
1770        case Primitive::kPrimFloat:
1771          // Processing a Dex `float-to-long' instruction.
1772          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
1773                                  conversion,
1774                                  conversion->GetDexPc(),
1775                                  nullptr);
1776          break;
1777
1778        case Primitive::kPrimDouble:
1779          // Processing a Dex `double-to-long' instruction.
1780          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
1781                                  conversion,
1782                                  conversion->GetDexPc(),
1783                                  nullptr);
1784          break;
1785
1786        default:
1787          LOG(FATAL) << "Unexpected type conversion from " << input_type
1788                     << " to " << result_type;
1789      }
1790      break;
1791
1792    case Primitive::kPrimChar:
1793      switch (input_type) {
1794        case Primitive::kPrimBoolean:
1795          // Boolean input is a result of code transformations.
1796        case Primitive::kPrimByte:
1797        case Primitive::kPrimShort:
1798        case Primitive::kPrimInt:
1799          // Processing a Dex `int-to-char' instruction.
1800          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1801          break;
1802
1803        default:
1804          LOG(FATAL) << "Unexpected type conversion from " << input_type
1805                     << " to " << result_type;
1806      }
1807      break;
1808
1809    case Primitive::kPrimFloat:
1810      switch (input_type) {
1811        case Primitive::kPrimBoolean:
1812          // Boolean input is a result of code transformations.
1813        case Primitive::kPrimByte:
1814        case Primitive::kPrimShort:
1815        case Primitive::kPrimInt:
1816        case Primitive::kPrimChar: {
1817          // Processing a Dex `int-to-float' instruction.
1818          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
1819          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
1820          break;
1821        }
1822
1823        case Primitive::kPrimLong: {
1824          // Processing a Dex `long-to-float' instruction.
1825          Register low = in.AsRegisterPairLow<Register>();
1826          Register high = in.AsRegisterPairHigh<Register>();
1827          SRegister output = out.AsFpuRegister<SRegister>();
1828          Register constant_low = locations->GetTemp(0).AsRegister<Register>();
1829          Register constant_high = locations->GetTemp(1).AsRegister<Register>();
1830          SRegister temp1_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
1831          DRegister temp1_d = FromLowSToD(temp1_s);
1832          SRegister temp2_s = locations->GetTemp(3).AsFpuRegisterPairLow<SRegister>();
1833          DRegister temp2_d = FromLowSToD(temp2_s);
1834
1835          // Operations use doubles for precision reasons (each 32-bit
1836          // half of a long fits in the 53-bit mantissa of a double,
1837          // but not in the 24-bit mantissa of a float).  This is
1838          // especially important for the low bits.  The result is
1839          // eventually converted to float.
1840
1841          // temp1_d = int-to-double(high)
1842          __ vmovsr(temp1_s, high);
1843          __ vcvtdi(temp1_d, temp1_s);
1844          // Using vmovd to load the `k2Pow32EncodingForDouble` constant
1845          // as an immediate value into `temp2_d` does not work, as
1846          // this instruction only transfers 8 significant bits of its
1847          // immediate operand.  Instead, use two 32-bit core
1848          // registers to load `k2Pow32EncodingForDouble` into
1849          // `temp2_d`.
1850          __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
1851          __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
1852          __ vmovdrr(temp2_d, constant_low, constant_high);
1853          // temp1_d = temp1_d * 2^32
1854          __ vmuld(temp1_d, temp1_d, temp2_d);
1855          // temp2_d = unsigned-to-double(low)
1856          __ vmovsr(temp2_s, low);
1857          __ vcvtdu(temp2_d, temp2_s);
1858          // temp1_d = temp1_d + temp2_d
1859          __ vaddd(temp1_d, temp1_d, temp2_d);
1860          // output = double-to-float(temp1_d);
1861          __ vcvtsd(output, temp1_d);
1862          break;
1863        }
1864
1865        case Primitive::kPrimDouble:
1866          // Processing a Dex `double-to-float' instruction.
1867          __ vcvtsd(out.AsFpuRegister<SRegister>(),
1868                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1869          break;
1870
1871        default:
1872          LOG(FATAL) << "Unexpected type conversion from " << input_type
1873                     << " to " << result_type;
1874      };
1875      break;
1876
1877    case Primitive::kPrimDouble:
1878      switch (input_type) {
1879        case Primitive::kPrimBoolean:
1880          // Boolean input is a result of code transformations.
1881        case Primitive::kPrimByte:
1882        case Primitive::kPrimShort:
1883        case Primitive::kPrimInt:
1884        case Primitive::kPrimChar: {
1885          // Processing a Dex `int-to-double' instruction.
1886          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
1887          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1888                    out.AsFpuRegisterPairLow<SRegister>());
1889          break;
1890        }
1891
1892        case Primitive::kPrimLong: {
1893          // Processing a Dex `long-to-double' instruction.
1894          Register low = in.AsRegisterPairLow<Register>();
1895          Register high = in.AsRegisterPairHigh<Register>();
1896          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
1897          DRegister out_d = FromLowSToD(out_s);
1898          Register constant_low = locations->GetTemp(0).AsRegister<Register>();
1899          Register constant_high = locations->GetTemp(1).AsRegister<Register>();
1900          SRegister temp_s = locations->GetTemp(2).AsFpuRegisterPairLow<SRegister>();
1901          DRegister temp_d = FromLowSToD(temp_s);
1902
1903          // out_d = int-to-double(high)
1904          __ vmovsr(out_s, high);
1905          __ vcvtdi(out_d, out_s);
1906          // Using vmovd to load the `k2Pow32EncodingForDouble` constant
1907          // as an immediate value into `temp_d` does not work, as
1908          // this instruction only transfers 8 significant bits of its
1909          // immediate operand.  Instead, use two 32-bit core
1910          // registers to load `k2Pow32EncodingForDouble` into `temp_d`.
1911          __ LoadImmediate(constant_low, Low32Bits(k2Pow32EncodingForDouble));
1912          __ LoadImmediate(constant_high, High32Bits(k2Pow32EncodingForDouble));
1913          __ vmovdrr(temp_d, constant_low, constant_high);
1914          // out_d = out_d * 2^32
1915          __ vmuld(out_d, out_d, temp_d);
1916          // temp_d = unsigned-to-double(low)
1917          __ vmovsr(temp_s, low);
1918          __ vcvtdu(temp_d, temp_s);
1919          // out_d = out_d + temp_d
1920          __ vaddd(out_d, out_d, temp_d);
1921          break;
1922        }
1923
1924        case Primitive::kPrimFloat:
1925          // Processing a Dex `float-to-double' instruction.
1926          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1927                    in.AsFpuRegister<SRegister>());
1928          break;
1929
1930        default:
1931          LOG(FATAL) << "Unexpected type conversion from " << input_type
1932                     << " to " << result_type;
1933      };
1934      break;
1935
1936    default:
1937      LOG(FATAL) << "Unexpected type conversion from " << input_type
1938                 << " to " << result_type;
1939  }
1940}
1941
1942void LocationsBuilderARM::VisitAdd(HAdd* add) {
1943  LocationSummary* locations =
1944      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
1945  switch (add->GetResultType()) {
1946    case Primitive::kPrimInt: {
1947      locations->SetInAt(0, Location::RequiresRegister());
1948      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
1949      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1950      break;
1951    }
1952
1953    case Primitive::kPrimLong: {
1954      locations->SetInAt(0, Location::RequiresRegister());
1955      locations->SetInAt(1, Location::RequiresRegister());
1956      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1957      break;
1958    }
1959
1960    case Primitive::kPrimFloat:
1961    case Primitive::kPrimDouble: {
1962      locations->SetInAt(0, Location::RequiresFpuRegister());
1963      locations->SetInAt(1, Location::RequiresFpuRegister());
1964      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1965      break;
1966    }
1967
1968    default:
1969      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
1970  }
1971}
1972
1973void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
1974  LocationSummary* locations = add->GetLocations();
1975  Location out = locations->Out();
1976  Location first = locations->InAt(0);
1977  Location second = locations->InAt(1);
1978  switch (add->GetResultType()) {
1979    case Primitive::kPrimInt:
1980      if (second.IsRegister()) {
1981        __ add(out.AsRegister<Register>(),
1982               first.AsRegister<Register>(),
1983               ShifterOperand(second.AsRegister<Register>()));
1984      } else {
1985        __ AddConstant(out.AsRegister<Register>(),
1986                       first.AsRegister<Register>(),
1987                       second.GetConstant()->AsIntConstant()->GetValue());
1988      }
1989      break;
1990
1991    case Primitive::kPrimLong: {
1992      DCHECK(second.IsRegisterPair());
1993      __ adds(out.AsRegisterPairLow<Register>(),
1994              first.AsRegisterPairLow<Register>(),
1995              ShifterOperand(second.AsRegisterPairLow<Register>()));
1996      __ adc(out.AsRegisterPairHigh<Register>(),
1997             first.AsRegisterPairHigh<Register>(),
1998             ShifterOperand(second.AsRegisterPairHigh<Register>()));
1999      break;
2000    }
2001
2002    case Primitive::kPrimFloat:
2003      __ vadds(out.AsFpuRegister<SRegister>(),
2004               first.AsFpuRegister<SRegister>(),
2005               second.AsFpuRegister<SRegister>());
2006      break;
2007
2008    case Primitive::kPrimDouble:
2009      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2010               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2011               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2012      break;
2013
2014    default:
2015      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2016  }
2017}
2018
2019void LocationsBuilderARM::VisitSub(HSub* sub) {
2020  LocationSummary* locations =
2021      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2022  switch (sub->GetResultType()) {
2023    case Primitive::kPrimInt: {
2024      locations->SetInAt(0, Location::RequiresRegister());
2025      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2026      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2027      break;
2028    }
2029
2030    case Primitive::kPrimLong: {
2031      locations->SetInAt(0, Location::RequiresRegister());
2032      locations->SetInAt(1, Location::RequiresRegister());
2033      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2034      break;
2035    }
2036    case Primitive::kPrimFloat:
2037    case Primitive::kPrimDouble: {
2038      locations->SetInAt(0, Location::RequiresFpuRegister());
2039      locations->SetInAt(1, Location::RequiresFpuRegister());
2040      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2041      break;
2042    }
2043    default:
2044      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2045  }
2046}
2047
2048void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2049  LocationSummary* locations = sub->GetLocations();
2050  Location out = locations->Out();
2051  Location first = locations->InAt(0);
2052  Location second = locations->InAt(1);
2053  switch (sub->GetResultType()) {
2054    case Primitive::kPrimInt: {
2055      if (second.IsRegister()) {
2056        __ sub(out.AsRegister<Register>(),
2057               first.AsRegister<Register>(),
2058               ShifterOperand(second.AsRegister<Register>()));
2059      } else {
2060        __ AddConstant(out.AsRegister<Register>(),
2061                       first.AsRegister<Register>(),
2062                       -second.GetConstant()->AsIntConstant()->GetValue());
2063      }
2064      break;
2065    }
2066
2067    case Primitive::kPrimLong: {
2068      DCHECK(second.IsRegisterPair());
2069      __ subs(out.AsRegisterPairLow<Register>(),
2070              first.AsRegisterPairLow<Register>(),
2071              ShifterOperand(second.AsRegisterPairLow<Register>()));
2072      __ sbc(out.AsRegisterPairHigh<Register>(),
2073             first.AsRegisterPairHigh<Register>(),
2074             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2075      break;
2076    }
2077
2078    case Primitive::kPrimFloat: {
2079      __ vsubs(out.AsFpuRegister<SRegister>(),
2080               first.AsFpuRegister<SRegister>(),
2081               second.AsFpuRegister<SRegister>());
2082      break;
2083    }
2084
2085    case Primitive::kPrimDouble: {
2086      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2087               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2088               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2089      break;
2090    }
2091
2092
2093    default:
2094      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2095  }
2096}
2097
2098void LocationsBuilderARM::VisitMul(HMul* mul) {
2099  LocationSummary* locations =
2100      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2101  switch (mul->GetResultType()) {
2102    case Primitive::kPrimInt:
2103    case Primitive::kPrimLong:  {
2104      locations->SetInAt(0, Location::RequiresRegister());
2105      locations->SetInAt(1, Location::RequiresRegister());
2106      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2107      break;
2108    }
2109
2110    case Primitive::kPrimFloat:
2111    case Primitive::kPrimDouble: {
2112      locations->SetInAt(0, Location::RequiresFpuRegister());
2113      locations->SetInAt(1, Location::RequiresFpuRegister());
2114      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2115      break;
2116    }
2117
2118    default:
2119      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2120  }
2121}
2122
2123void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2124  LocationSummary* locations = mul->GetLocations();
2125  Location out = locations->Out();
2126  Location first = locations->InAt(0);
2127  Location second = locations->InAt(1);
2128  switch (mul->GetResultType()) {
2129    case Primitive::kPrimInt: {
2130      __ mul(out.AsRegister<Register>(),
2131             first.AsRegister<Register>(),
2132             second.AsRegister<Register>());
2133      break;
2134    }
2135    case Primitive::kPrimLong: {
2136      Register out_hi = out.AsRegisterPairHigh<Register>();
2137      Register out_lo = out.AsRegisterPairLow<Register>();
2138      Register in1_hi = first.AsRegisterPairHigh<Register>();
2139      Register in1_lo = first.AsRegisterPairLow<Register>();
2140      Register in2_hi = second.AsRegisterPairHigh<Register>();
2141      Register in2_lo = second.AsRegisterPairLow<Register>();
2142
2143      // Extra checks to protect caused by the existence of R1_R2.
2144      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2145      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2146      DCHECK_NE(out_hi, in1_lo);
2147      DCHECK_NE(out_hi, in2_lo);
2148
2149      // input: in1 - 64 bits, in2 - 64 bits
2150      // output: out
2151      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2152      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2153      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2154
2155      // IP <- in1.lo * in2.hi
2156      __ mul(IP, in1_lo, in2_hi);
2157      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2158      __ mla(out_hi, in1_hi, in2_lo, IP);
2159      // out.lo <- (in1.lo * in2.lo)[31:0];
2160      __ umull(out_lo, IP, in1_lo, in2_lo);
2161      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2162      __ add(out_hi, out_hi, ShifterOperand(IP));
2163      break;
2164    }
2165
2166    case Primitive::kPrimFloat: {
2167      __ vmuls(out.AsFpuRegister<SRegister>(),
2168               first.AsFpuRegister<SRegister>(),
2169               second.AsFpuRegister<SRegister>());
2170      break;
2171    }
2172
2173    case Primitive::kPrimDouble: {
2174      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2175               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2176               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2177      break;
2178    }
2179
2180    default:
2181      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2182  }
2183}
2184
2185void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2186  DCHECK(instruction->IsDiv() || instruction->IsRem());
2187  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2188
2189  LocationSummary* locations = instruction->GetLocations();
2190  Location second = locations->InAt(1);
2191  DCHECK(second.IsConstant());
2192
2193  Register out = locations->Out().AsRegister<Register>();
2194  Register dividend = locations->InAt(0).AsRegister<Register>();
2195  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2196  DCHECK(imm == 1 || imm == -1);
2197
2198  if (instruction->IsRem()) {
2199    __ LoadImmediate(out, 0);
2200  } else {
2201    if (imm == 1) {
2202      __ Mov(out, dividend);
2203    } else {
2204      __ rsb(out, dividend, ShifterOperand(0));
2205    }
2206  }
2207}
2208
2209void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2210  DCHECK(instruction->IsDiv() || instruction->IsRem());
2211  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2212
2213  LocationSummary* locations = instruction->GetLocations();
2214  Location second = locations->InAt(1);
2215  DCHECK(second.IsConstant());
2216
2217  Register out = locations->Out().AsRegister<Register>();
2218  Register dividend = locations->InAt(0).AsRegister<Register>();
2219  Register temp = locations->GetTemp(0).AsRegister<Register>();
2220  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2221  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2222  DCHECK(IsPowerOfTwo(abs_imm));
2223  int ctz_imm = CTZ(abs_imm);
2224
2225  if (ctz_imm == 1) {
2226    __ Lsr(temp, dividend, 32 - ctz_imm);
2227  } else {
2228    __ Asr(temp, dividend, 31);
2229    __ Lsr(temp, temp, 32 - ctz_imm);
2230  }
2231  __ add(out, temp, ShifterOperand(dividend));
2232
2233  if (instruction->IsDiv()) {
2234    __ Asr(out, out, ctz_imm);
2235    if (imm < 0) {
2236      __ rsb(out, out, ShifterOperand(0));
2237    }
2238  } else {
2239    __ ubfx(out, out, 0, ctz_imm);
2240    __ sub(out, out, ShifterOperand(temp));
2241  }
2242}
2243
2244void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2245  DCHECK(instruction->IsDiv() || instruction->IsRem());
2246  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2247
2248  LocationSummary* locations = instruction->GetLocations();
2249  Location second = locations->InAt(1);
2250  DCHECK(second.IsConstant());
2251
2252  Register out = locations->Out().AsRegister<Register>();
2253  Register dividend = locations->InAt(0).AsRegister<Register>();
2254  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2255  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2256  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2257
2258  int64_t magic;
2259  int shift;
2260  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2261
2262  __ LoadImmediate(temp1, magic);
2263  __ smull(temp2, temp1, dividend, temp1);
2264
2265  if (imm > 0 && magic < 0) {
2266    __ add(temp1, temp1, ShifterOperand(dividend));
2267  } else if (imm < 0 && magic > 0) {
2268    __ sub(temp1, temp1, ShifterOperand(dividend));
2269  }
2270
2271  if (shift != 0) {
2272    __ Asr(temp1, temp1, shift);
2273  }
2274
2275  if (instruction->IsDiv()) {
2276    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2277  } else {
2278    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2279    // TODO: Strength reduction for mls.
2280    __ LoadImmediate(temp2, imm);
2281    __ mls(out, temp1, temp2, dividend);
2282  }
2283}
2284
2285void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2286  DCHECK(instruction->IsDiv() || instruction->IsRem());
2287  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2288
2289  LocationSummary* locations = instruction->GetLocations();
2290  Location second = locations->InAt(1);
2291  DCHECK(second.IsConstant());
2292
2293  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2294  if (imm == 0) {
2295    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2296  } else if (imm == 1 || imm == -1) {
2297    DivRemOneOrMinusOne(instruction);
2298  } else if (IsPowerOfTwo(std::abs(imm))) {
2299    DivRemByPowerOfTwo(instruction);
2300  } else {
2301    DCHECK(imm <= -2 || imm >= 2);
2302    GenerateDivRemWithAnyConstant(instruction);
2303  }
2304}
2305
2306void LocationsBuilderARM::VisitDiv(HDiv* div) {
2307  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2308  if (div->GetResultType() == Primitive::kPrimLong) {
2309    // pLdiv runtime call.
2310    call_kind = LocationSummary::kCall;
2311  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2312    // sdiv will be replaced by other instruction sequence.
2313  } else if (div->GetResultType() == Primitive::kPrimInt &&
2314             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2315    // pIdivmod runtime call.
2316    call_kind = LocationSummary::kCall;
2317  }
2318
2319  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2320
2321  switch (div->GetResultType()) {
2322    case Primitive::kPrimInt: {
2323      if (div->InputAt(1)->IsConstant()) {
2324        locations->SetInAt(0, Location::RequiresRegister());
2325        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2326        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2327        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2328        if (abs_imm <= 1) {
2329          // No temp register required.
2330        } else {
2331          locations->AddTemp(Location::RequiresRegister());
2332          if (!IsPowerOfTwo(abs_imm)) {
2333            locations->AddTemp(Location::RequiresRegister());
2334          }
2335        }
2336      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2337        locations->SetInAt(0, Location::RequiresRegister());
2338        locations->SetInAt(1, Location::RequiresRegister());
2339        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2340      } else {
2341        InvokeRuntimeCallingConvention calling_convention;
2342        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2343        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2344        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2345        //       we only need the former.
2346        locations->SetOut(Location::RegisterLocation(R0));
2347      }
2348      break;
2349    }
2350    case Primitive::kPrimLong: {
2351      InvokeRuntimeCallingConvention calling_convention;
2352      locations->SetInAt(0, Location::RegisterPairLocation(
2353          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2354      locations->SetInAt(1, Location::RegisterPairLocation(
2355          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2356      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2357      break;
2358    }
2359    case Primitive::kPrimFloat:
2360    case Primitive::kPrimDouble: {
2361      locations->SetInAt(0, Location::RequiresFpuRegister());
2362      locations->SetInAt(1, Location::RequiresFpuRegister());
2363      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2364      break;
2365    }
2366
2367    default:
2368      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2369  }
2370}
2371
2372void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2373  LocationSummary* locations = div->GetLocations();
2374  Location out = locations->Out();
2375  Location first = locations->InAt(0);
2376  Location second = locations->InAt(1);
2377
2378  switch (div->GetResultType()) {
2379    case Primitive::kPrimInt: {
2380      if (second.IsConstant()) {
2381        GenerateDivRemConstantIntegral(div);
2382      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2383        __ sdiv(out.AsRegister<Register>(),
2384                first.AsRegister<Register>(),
2385                second.AsRegister<Register>());
2386      } else {
2387        InvokeRuntimeCallingConvention calling_convention;
2388        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2389        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2390        DCHECK_EQ(R0, out.AsRegister<Register>());
2391
2392        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2393      }
2394      break;
2395    }
2396
2397    case Primitive::kPrimLong: {
2398      InvokeRuntimeCallingConvention calling_convention;
2399      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2400      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2401      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2402      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2403      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2404      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2405
2406      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2407      break;
2408    }
2409
2410    case Primitive::kPrimFloat: {
2411      __ vdivs(out.AsFpuRegister<SRegister>(),
2412               first.AsFpuRegister<SRegister>(),
2413               second.AsFpuRegister<SRegister>());
2414      break;
2415    }
2416
2417    case Primitive::kPrimDouble: {
2418      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2419               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2420               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2421      break;
2422    }
2423
2424    default:
2425      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2426  }
2427}
2428
2429void LocationsBuilderARM::VisitRem(HRem* rem) {
2430  Primitive::Type type = rem->GetResultType();
2431
2432  // Most remainders are implemented in the runtime.
2433  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2434  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2435    // sdiv will be replaced by other instruction sequence.
2436    call_kind = LocationSummary::kNoCall;
2437  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2438             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2439    // Have hardware divide instruction for int, do it with three instructions.
2440    call_kind = LocationSummary::kNoCall;
2441  }
2442
2443  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2444
2445  switch (type) {
2446    case Primitive::kPrimInt: {
2447      if (rem->InputAt(1)->IsConstant()) {
2448        locations->SetInAt(0, Location::RequiresRegister());
2449        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2450        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2451        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2452        if (abs_imm <= 1) {
2453          // No temp register required.
2454        } else {
2455          locations->AddTemp(Location::RequiresRegister());
2456          if (!IsPowerOfTwo(abs_imm)) {
2457            locations->AddTemp(Location::RequiresRegister());
2458          }
2459        }
2460      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2461        locations->SetInAt(0, Location::RequiresRegister());
2462        locations->SetInAt(1, Location::RequiresRegister());
2463        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2464        locations->AddTemp(Location::RequiresRegister());
2465      } else {
2466        InvokeRuntimeCallingConvention calling_convention;
2467        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2468        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2469        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2470        //       we only need the latter.
2471        locations->SetOut(Location::RegisterLocation(R1));
2472      }
2473      break;
2474    }
2475    case Primitive::kPrimLong: {
2476      InvokeRuntimeCallingConvention calling_convention;
2477      locations->SetInAt(0, Location::RegisterPairLocation(
2478          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2479      locations->SetInAt(1, Location::RegisterPairLocation(
2480          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2481      // The runtime helper puts the output in R2,R3.
2482      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2483      break;
2484    }
2485    case Primitive::kPrimFloat: {
2486      InvokeRuntimeCallingConvention calling_convention;
2487      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2488      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2489      locations->SetOut(Location::FpuRegisterLocation(S0));
2490      break;
2491    }
2492
2493    case Primitive::kPrimDouble: {
2494      InvokeRuntimeCallingConvention calling_convention;
2495      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2496          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2497      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2498          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2499      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2500      break;
2501    }
2502
2503    default:
2504      LOG(FATAL) << "Unexpected rem type " << type;
2505  }
2506}
2507
2508void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2509  LocationSummary* locations = rem->GetLocations();
2510  Location out = locations->Out();
2511  Location first = locations->InAt(0);
2512  Location second = locations->InAt(1);
2513
2514  Primitive::Type type = rem->GetResultType();
2515  switch (type) {
2516    case Primitive::kPrimInt: {
2517        if (second.IsConstant()) {
2518          GenerateDivRemConstantIntegral(rem);
2519        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2520        Register reg1 = first.AsRegister<Register>();
2521        Register reg2 = second.AsRegister<Register>();
2522        Register temp = locations->GetTemp(0).AsRegister<Register>();
2523
2524        // temp = reg1 / reg2  (integer division)
2525        // temp = temp * reg2
2526        // dest = reg1 - temp
2527        __ sdiv(temp, reg1, reg2);
2528        __ mul(temp, temp, reg2);
2529        __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
2530      } else {
2531        InvokeRuntimeCallingConvention calling_convention;
2532        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2533        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2534        DCHECK_EQ(R1, out.AsRegister<Register>());
2535
2536        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2537      }
2538      break;
2539    }
2540
2541    case Primitive::kPrimLong: {
2542      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2543      break;
2544    }
2545
2546    case Primitive::kPrimFloat: {
2547      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2548      break;
2549    }
2550
2551    case Primitive::kPrimDouble: {
2552      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2553      break;
2554    }
2555
2556    default:
2557      LOG(FATAL) << "Unexpected rem type " << type;
2558  }
2559}
2560
2561void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2562  LocationSummary* locations =
2563      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2564  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2565  if (instruction->HasUses()) {
2566    locations->SetOut(Location::SameAsFirstInput());
2567  }
2568}
2569
2570void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2571  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2572  codegen_->AddSlowPath(slow_path);
2573
2574  LocationSummary* locations = instruction->GetLocations();
2575  Location value = locations->InAt(0);
2576
2577  switch (instruction->GetType()) {
2578    case Primitive::kPrimInt: {
2579      if (value.IsRegister()) {
2580        __ cmp(value.AsRegister<Register>(), ShifterOperand(0));
2581        __ b(slow_path->GetEntryLabel(), EQ);
2582      } else {
2583        DCHECK(value.IsConstant()) << value;
2584        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2585          __ b(slow_path->GetEntryLabel());
2586        }
2587      }
2588      break;
2589    }
2590    case Primitive::kPrimLong: {
2591      if (value.IsRegisterPair()) {
2592        __ orrs(IP,
2593                value.AsRegisterPairLow<Register>(),
2594                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2595        __ b(slow_path->GetEntryLabel(), EQ);
2596      } else {
2597        DCHECK(value.IsConstant()) << value;
2598        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2599          __ b(slow_path->GetEntryLabel());
2600        }
2601      }
2602      break;
2603    default:
2604      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2605    }
2606  }
2607}
2608
2609void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2610  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2611
2612  LocationSummary* locations =
2613      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2614
2615  switch (op->GetResultType()) {
2616    case Primitive::kPrimInt: {
2617      locations->SetInAt(0, Location::RequiresRegister());
2618      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2619      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2620      break;
2621    }
2622    case Primitive::kPrimLong: {
2623      locations->SetInAt(0, Location::RequiresRegister());
2624      locations->SetInAt(1, Location::RequiresRegister());
2625      locations->AddTemp(Location::RequiresRegister());
2626      locations->SetOut(Location::RequiresRegister());
2627      break;
2628    }
2629    default:
2630      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2631  }
2632}
2633
2634void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2635  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2636
2637  LocationSummary* locations = op->GetLocations();
2638  Location out = locations->Out();
2639  Location first = locations->InAt(0);
2640  Location second = locations->InAt(1);
2641
2642  Primitive::Type type = op->GetResultType();
2643  switch (type) {
2644    case Primitive::kPrimInt: {
2645      Register out_reg = out.AsRegister<Register>();
2646      Register first_reg = first.AsRegister<Register>();
2647      // Arm doesn't mask the shift count so we need to do it ourselves.
2648      if (second.IsRegister()) {
2649        Register second_reg = second.AsRegister<Register>();
2650        __ and_(second_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2651        if (op->IsShl()) {
2652          __ Lsl(out_reg, first_reg, second_reg);
2653        } else if (op->IsShr()) {
2654          __ Asr(out_reg, first_reg, second_reg);
2655        } else {
2656          __ Lsr(out_reg, first_reg, second_reg);
2657        }
2658      } else {
2659        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2660        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2661        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
2662          __ Mov(out_reg, first_reg);
2663        } else if (op->IsShl()) {
2664          __ Lsl(out_reg, first_reg, shift_value);
2665        } else if (op->IsShr()) {
2666          __ Asr(out_reg, first_reg, shift_value);
2667        } else {
2668          __ Lsr(out_reg, first_reg, shift_value);
2669        }
2670      }
2671      break;
2672    }
2673    case Primitive::kPrimLong: {
2674      Register o_h = out.AsRegisterPairHigh<Register>();
2675      Register o_l = out.AsRegisterPairLow<Register>();
2676
2677      Register temp = locations->GetTemp(0).AsRegister<Register>();
2678
2679      Register high = first.AsRegisterPairHigh<Register>();
2680      Register low = first.AsRegisterPairLow<Register>();
2681
2682      Register second_reg = second.AsRegister<Register>();
2683
2684      if (op->IsShl()) {
2685        // Shift the high part
2686        __ and_(second_reg, second_reg, ShifterOperand(63));
2687        __ Lsl(o_h, high, second_reg);
2688        // Shift the low part and `or` what overflew on the high part
2689        __ rsb(temp, second_reg, ShifterOperand(32));
2690        __ Lsr(temp, low, temp);
2691        __ orr(o_h, o_h, ShifterOperand(temp));
2692        // If the shift is > 32 bits, override the high part
2693        __ subs(temp, second_reg, ShifterOperand(32));
2694        __ it(PL);
2695        __ Lsl(o_h, low, temp, false, PL);
2696        // Shift the low part
2697        __ Lsl(o_l, low, second_reg);
2698      } else if (op->IsShr()) {
2699        // Shift the low part
2700        __ and_(second_reg, second_reg, ShifterOperand(63));
2701        __ Lsr(o_l, low, second_reg);
2702        // Shift the high part and `or` what underflew on the low part
2703        __ rsb(temp, second_reg, ShifterOperand(32));
2704        __ Lsl(temp, high, temp);
2705        __ orr(o_l, o_l, ShifterOperand(temp));
2706        // If the shift is > 32 bits, override the low part
2707        __ subs(temp, second_reg, ShifterOperand(32));
2708        __ it(PL);
2709        __ Asr(o_l, high, temp, false, PL);
2710        // Shift the high part
2711        __ Asr(o_h, high, second_reg);
2712      } else {
2713        // same as Shr except we use `Lsr`s and not `Asr`s
2714        __ and_(second_reg, second_reg, ShifterOperand(63));
2715        __ Lsr(o_l, low, second_reg);
2716        __ rsb(temp, second_reg, ShifterOperand(32));
2717        __ Lsl(temp, high, temp);
2718        __ orr(o_l, o_l, ShifterOperand(temp));
2719        __ subs(temp, second_reg, ShifterOperand(32));
2720        __ it(PL);
2721        __ Lsr(o_l, high, temp, false, PL);
2722        __ Lsr(o_h, high, second_reg);
2723      }
2724      break;
2725    }
2726    default:
2727      LOG(FATAL) << "Unexpected operation type " << type;
2728  }
2729}
2730
2731void LocationsBuilderARM::VisitShl(HShl* shl) {
2732  HandleShift(shl);
2733}
2734
2735void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
2736  HandleShift(shl);
2737}
2738
2739void LocationsBuilderARM::VisitShr(HShr* shr) {
2740  HandleShift(shr);
2741}
2742
2743void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
2744  HandleShift(shr);
2745}
2746
2747void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
2748  HandleShift(ushr);
2749}
2750
2751void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
2752  HandleShift(ushr);
2753}
2754
2755void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
2756  LocationSummary* locations =
2757      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2758  InvokeRuntimeCallingConvention calling_convention;
2759  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2760  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2761  locations->SetOut(Location::RegisterLocation(R0));
2762}
2763
2764void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
2765  InvokeRuntimeCallingConvention calling_convention;
2766  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
2767  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2768  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2769                          instruction,
2770                          instruction->GetDexPc(),
2771                          nullptr);
2772}
2773
2774void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
2775  LocationSummary* locations =
2776      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2777  InvokeRuntimeCallingConvention calling_convention;
2778  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2779  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2780  locations->SetOut(Location::RegisterLocation(R0));
2781  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2782}
2783
2784void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
2785  InvokeRuntimeCallingConvention calling_convention;
2786  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2));
2787  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2788  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2789                          instruction,
2790                          instruction->GetDexPc(),
2791                          nullptr);
2792}
2793
2794void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
2795  LocationSummary* locations =
2796      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2797  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2798  if (location.IsStackSlot()) {
2799    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2800  } else if (location.IsDoubleStackSlot()) {
2801    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2802  }
2803  locations->SetOut(location);
2804}
2805
2806void InstructionCodeGeneratorARM::VisitParameterValue(
2807    HParameterValue* instruction ATTRIBUTE_UNUSED) {
2808  // Nothing to do, the parameter is already at its location.
2809}
2810
2811void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
2812  LocationSummary* locations =
2813      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2814  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2815}
2816
2817void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
2818  // Nothing to do, the method is already at its location.
2819}
2820
2821void LocationsBuilderARM::VisitNot(HNot* not_) {
2822  LocationSummary* locations =
2823      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
2824  locations->SetInAt(0, Location::RequiresRegister());
2825  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2826}
2827
2828void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
2829  LocationSummary* locations = not_->GetLocations();
2830  Location out = locations->Out();
2831  Location in = locations->InAt(0);
2832  switch (not_->GetResultType()) {
2833    case Primitive::kPrimInt:
2834      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
2835      break;
2836
2837    case Primitive::kPrimLong:
2838      __ mvn(out.AsRegisterPairLow<Register>(),
2839             ShifterOperand(in.AsRegisterPairLow<Register>()));
2840      __ mvn(out.AsRegisterPairHigh<Register>(),
2841             ShifterOperand(in.AsRegisterPairHigh<Register>()));
2842      break;
2843
2844    default:
2845      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
2846  }
2847}
2848
2849void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
2850  LocationSummary* locations =
2851      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
2852  locations->SetInAt(0, Location::RequiresRegister());
2853  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2854}
2855
2856void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
2857  LocationSummary* locations = bool_not->GetLocations();
2858  Location out = locations->Out();
2859  Location in = locations->InAt(0);
2860  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
2861}
2862
2863void LocationsBuilderARM::VisitCompare(HCompare* compare) {
2864  LocationSummary* locations =
2865      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
2866  switch (compare->InputAt(0)->GetType()) {
2867    case Primitive::kPrimLong: {
2868      locations->SetInAt(0, Location::RequiresRegister());
2869      locations->SetInAt(1, Location::RequiresRegister());
2870      // Output overlaps because it is written before doing the low comparison.
2871      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2872      break;
2873    }
2874    case Primitive::kPrimFloat:
2875    case Primitive::kPrimDouble: {
2876      locations->SetInAt(0, Location::RequiresFpuRegister());
2877      locations->SetInAt(1, Location::RequiresFpuRegister());
2878      locations->SetOut(Location::RequiresRegister());
2879      break;
2880    }
2881    default:
2882      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
2883  }
2884}
2885
2886void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
2887  LocationSummary* locations = compare->GetLocations();
2888  Register out = locations->Out().AsRegister<Register>();
2889  Location left = locations->InAt(0);
2890  Location right = locations->InAt(1);
2891
2892  NearLabel less, greater, done;
2893  Primitive::Type type = compare->InputAt(0)->GetType();
2894  switch (type) {
2895    case Primitive::kPrimLong: {
2896      __ cmp(left.AsRegisterPairHigh<Register>(),
2897             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
2898      __ b(&less, LT);
2899      __ b(&greater, GT);
2900      // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
2901      __ LoadImmediate(out, 0);
2902      __ cmp(left.AsRegisterPairLow<Register>(),
2903             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
2904      break;
2905    }
2906    case Primitive::kPrimFloat:
2907    case Primitive::kPrimDouble: {
2908      __ LoadImmediate(out, 0);
2909      if (type == Primitive::kPrimFloat) {
2910        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
2911      } else {
2912        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
2913                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
2914      }
2915      __ vmstat();  // transfer FP status register to ARM APSR.
2916      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
2917      break;
2918    }
2919    default:
2920      LOG(FATAL) << "Unexpected compare type " << type;
2921  }
2922  __ b(&done, EQ);
2923  __ b(&less, CC);  // CC is for both: unsigned compare for longs and 'less than' for floats.
2924
2925  __ Bind(&greater);
2926  __ LoadImmediate(out, 1);
2927  __ b(&done);
2928
2929  __ Bind(&less);
2930  __ LoadImmediate(out, -1);
2931
2932  __ Bind(&done);
2933}
2934
2935void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
2936  LocationSummary* locations =
2937      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2938  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2939    locations->SetInAt(i, Location::Any());
2940  }
2941  locations->SetOut(Location::Any());
2942}
2943
2944void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
2945  UNUSED(instruction);
2946  LOG(FATAL) << "Unreachable";
2947}
2948
2949void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
2950  // TODO (ported from quick): revisit Arm barrier kinds
2951  DmbOptions flavour = DmbOptions::ISH;  // quiet c++ warnings
2952  switch (kind) {
2953    case MemBarrierKind::kAnyStore:
2954    case MemBarrierKind::kLoadAny:
2955    case MemBarrierKind::kAnyAny: {
2956      flavour = DmbOptions::ISH;
2957      break;
2958    }
2959    case MemBarrierKind::kStoreStore: {
2960      flavour = DmbOptions::ISHST;
2961      break;
2962    }
2963    default:
2964      LOG(FATAL) << "Unexpected memory barrier " << kind;
2965  }
2966  __ dmb(flavour);
2967}
2968
2969void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
2970                                                         uint32_t offset,
2971                                                         Register out_lo,
2972                                                         Register out_hi) {
2973  if (offset != 0) {
2974    __ LoadImmediate(out_lo, offset);
2975    __ add(IP, addr, ShifterOperand(out_lo));
2976    addr = IP;
2977  }
2978  __ ldrexd(out_lo, out_hi, addr);
2979}
2980
2981void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
2982                                                          uint32_t offset,
2983                                                          Register value_lo,
2984                                                          Register value_hi,
2985                                                          Register temp1,
2986                                                          Register temp2,
2987                                                          HInstruction* instruction) {
2988  NearLabel fail;
2989  if (offset != 0) {
2990    __ LoadImmediate(temp1, offset);
2991    __ add(IP, addr, ShifterOperand(temp1));
2992    addr = IP;
2993  }
2994  __ Bind(&fail);
2995  // We need a load followed by store. (The address used in a STREX instruction must
2996  // be the same as the address in the most recently executed LDREX instruction.)
2997  __ ldrexd(temp1, temp2, addr);
2998  codegen_->MaybeRecordImplicitNullCheck(instruction);
2999  __ strexd(temp1, value_lo, value_hi, addr);
3000  __ cmp(temp1, ShifterOperand(0));
3001  __ b(&fail, NE);
3002}
3003
3004void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
3005  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3006
3007  LocationSummary* locations =
3008      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3009  locations->SetInAt(0, Location::RequiresRegister());
3010
3011  Primitive::Type field_type = field_info.GetFieldType();
3012  if (Primitive::IsFloatingPointType(field_type)) {
3013    locations->SetInAt(1, Location::RequiresFpuRegister());
3014  } else {
3015    locations->SetInAt(1, Location::RequiresRegister());
3016  }
3017
3018  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3019  bool generate_volatile = field_info.IsVolatile()
3020      && is_wide
3021      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3022  // Temporary registers for the write barrier.
3023  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3024  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3025    locations->AddTemp(Location::RequiresRegister());
3026    locations->AddTemp(Location::RequiresRegister());
3027  } else if (generate_volatile) {
3028    // Arm encoding have some additional constraints for ldrexd/strexd:
3029    // - registers need to be consecutive
3030    // - the first register should be even but not R14.
3031    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3032    // enable Arm encoding.
3033    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3034
3035    locations->AddTemp(Location::RequiresRegister());
3036    locations->AddTemp(Location::RequiresRegister());
3037    if (field_type == Primitive::kPrimDouble) {
3038      // For doubles we need two more registers to copy the value.
3039      locations->AddTemp(Location::RegisterLocation(R2));
3040      locations->AddTemp(Location::RegisterLocation(R3));
3041    }
3042  }
3043}
3044
3045void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3046                                                 const FieldInfo& field_info,
3047                                                 bool value_can_be_null) {
3048  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3049
3050  LocationSummary* locations = instruction->GetLocations();
3051  Register base = locations->InAt(0).AsRegister<Register>();
3052  Location value = locations->InAt(1);
3053
3054  bool is_volatile = field_info.IsVolatile();
3055  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3056  Primitive::Type field_type = field_info.GetFieldType();
3057  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3058
3059  if (is_volatile) {
3060    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3061  }
3062
3063  switch (field_type) {
3064    case Primitive::kPrimBoolean:
3065    case Primitive::kPrimByte: {
3066      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3067      break;
3068    }
3069
3070    case Primitive::kPrimShort:
3071    case Primitive::kPrimChar: {
3072      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3073      break;
3074    }
3075
3076    case Primitive::kPrimInt:
3077    case Primitive::kPrimNot: {
3078      __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3079      break;
3080    }
3081
3082    case Primitive::kPrimLong: {
3083      if (is_volatile && !atomic_ldrd_strd) {
3084        GenerateWideAtomicStore(base, offset,
3085                                value.AsRegisterPairLow<Register>(),
3086                                value.AsRegisterPairHigh<Register>(),
3087                                locations->GetTemp(0).AsRegister<Register>(),
3088                                locations->GetTemp(1).AsRegister<Register>(),
3089                                instruction);
3090      } else {
3091        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3092        codegen_->MaybeRecordImplicitNullCheck(instruction);
3093      }
3094      break;
3095    }
3096
3097    case Primitive::kPrimFloat: {
3098      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3099      break;
3100    }
3101
3102    case Primitive::kPrimDouble: {
3103      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3104      if (is_volatile && !atomic_ldrd_strd) {
3105        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3106        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3107
3108        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3109
3110        GenerateWideAtomicStore(base, offset,
3111                                value_reg_lo,
3112                                value_reg_hi,
3113                                locations->GetTemp(2).AsRegister<Register>(),
3114                                locations->GetTemp(3).AsRegister<Register>(),
3115                                instruction);
3116      } else {
3117        __ StoreDToOffset(value_reg, base, offset);
3118        codegen_->MaybeRecordImplicitNullCheck(instruction);
3119      }
3120      break;
3121    }
3122
3123    case Primitive::kPrimVoid:
3124      LOG(FATAL) << "Unreachable type " << field_type;
3125      UNREACHABLE();
3126  }
3127
3128  // Longs and doubles are handled in the switch.
3129  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3130    codegen_->MaybeRecordImplicitNullCheck(instruction);
3131  }
3132
3133  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3134    Register temp = locations->GetTemp(0).AsRegister<Register>();
3135    Register card = locations->GetTemp(1).AsRegister<Register>();
3136    codegen_->MarkGCCard(
3137        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3138  }
3139
3140  if (is_volatile) {
3141    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3142  }
3143}
3144
3145void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3146  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3147  LocationSummary* locations =
3148      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3149  locations->SetInAt(0, Location::RequiresRegister());
3150
3151  bool volatile_for_double = field_info.IsVolatile()
3152      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3153      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3154  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3155
3156  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3157    locations->SetOut(Location::RequiresFpuRegister());
3158  } else {
3159    locations->SetOut(Location::RequiresRegister(),
3160                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3161  }
3162  if (volatile_for_double) {
3163    // Arm encoding have some additional constraints for ldrexd/strexd:
3164    // - registers need to be consecutive
3165    // - the first register should be even but not R14.
3166    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3167    // enable Arm encoding.
3168    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3169    locations->AddTemp(Location::RequiresRegister());
3170    locations->AddTemp(Location::RequiresRegister());
3171  }
3172}
3173
3174void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3175                                                 const FieldInfo& field_info) {
3176  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3177
3178  LocationSummary* locations = instruction->GetLocations();
3179  Register base = locations->InAt(0).AsRegister<Register>();
3180  Location out = locations->Out();
3181  bool is_volatile = field_info.IsVolatile();
3182  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3183  Primitive::Type field_type = field_info.GetFieldType();
3184  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3185
3186  switch (field_type) {
3187    case Primitive::kPrimBoolean: {
3188      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3189      break;
3190    }
3191
3192    case Primitive::kPrimByte: {
3193      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3194      break;
3195    }
3196
3197    case Primitive::kPrimShort: {
3198      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3199      break;
3200    }
3201
3202    case Primitive::kPrimChar: {
3203      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3204      break;
3205    }
3206
3207    case Primitive::kPrimInt:
3208    case Primitive::kPrimNot: {
3209      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3210      break;
3211    }
3212
3213    case Primitive::kPrimLong: {
3214      if (is_volatile && !atomic_ldrd_strd) {
3215        GenerateWideAtomicLoad(base, offset,
3216                               out.AsRegisterPairLow<Register>(),
3217                               out.AsRegisterPairHigh<Register>());
3218      } else {
3219        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3220      }
3221      break;
3222    }
3223
3224    case Primitive::kPrimFloat: {
3225      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3226      break;
3227    }
3228
3229    case Primitive::kPrimDouble: {
3230      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3231      if (is_volatile && !atomic_ldrd_strd) {
3232        Register lo = locations->GetTemp(0).AsRegister<Register>();
3233        Register hi = locations->GetTemp(1).AsRegister<Register>();
3234        GenerateWideAtomicLoad(base, offset, lo, hi);
3235        codegen_->MaybeRecordImplicitNullCheck(instruction);
3236        __ vmovdrr(out_reg, lo, hi);
3237      } else {
3238        __ LoadDFromOffset(out_reg, base, offset);
3239        codegen_->MaybeRecordImplicitNullCheck(instruction);
3240      }
3241      break;
3242    }
3243
3244    case Primitive::kPrimVoid:
3245      LOG(FATAL) << "Unreachable type " << field_type;
3246      UNREACHABLE();
3247  }
3248
3249  // Doubles are handled in the switch.
3250  if (field_type != Primitive::kPrimDouble) {
3251    codegen_->MaybeRecordImplicitNullCheck(instruction);
3252  }
3253
3254  if (is_volatile) {
3255    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3256  }
3257}
3258
3259void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3260  HandleFieldSet(instruction, instruction->GetFieldInfo());
3261}
3262
3263void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3264  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3265}
3266
3267void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3268  HandleFieldGet(instruction, instruction->GetFieldInfo());
3269}
3270
3271void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3272  HandleFieldGet(instruction, instruction->GetFieldInfo());
3273}
3274
3275void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3276  HandleFieldGet(instruction, instruction->GetFieldInfo());
3277}
3278
3279void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3280  HandleFieldGet(instruction, instruction->GetFieldInfo());
3281}
3282
3283void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3284  HandleFieldSet(instruction, instruction->GetFieldInfo());
3285}
3286
3287void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3288  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3289}
3290
3291void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3292  LocationSummary* locations =
3293      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3294  locations->SetInAt(0, Location::RequiresRegister());
3295  if (instruction->HasUses()) {
3296    locations->SetOut(Location::SameAsFirstInput());
3297  }
3298}
3299
3300void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3301  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3302    return;
3303  }
3304  Location obj = instruction->GetLocations()->InAt(0);
3305
3306  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3307  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3308}
3309
3310void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3311  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3312  codegen_->AddSlowPath(slow_path);
3313
3314  LocationSummary* locations = instruction->GetLocations();
3315  Location obj = locations->InAt(0);
3316
3317  __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
3318  __ b(slow_path->GetEntryLabel(), EQ);
3319}
3320
3321void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3322  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
3323    GenerateImplicitNullCheck(instruction);
3324  } else {
3325    GenerateExplicitNullCheck(instruction);
3326  }
3327}
3328
3329void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3330  LocationSummary* locations =
3331      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3332  locations->SetInAt(0, Location::RequiresRegister());
3333  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3334  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3335    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3336  } else {
3337    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3338  }
3339}
3340
3341void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3342  LocationSummary* locations = instruction->GetLocations();
3343  Register obj = locations->InAt(0).AsRegister<Register>();
3344  Location index = locations->InAt(1);
3345
3346  switch (instruction->GetType()) {
3347    case Primitive::kPrimBoolean: {
3348      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3349      Register out = locations->Out().AsRegister<Register>();
3350      if (index.IsConstant()) {
3351        size_t offset =
3352            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3353        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3354      } else {
3355        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3356        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3357      }
3358      break;
3359    }
3360
3361    case Primitive::kPrimByte: {
3362      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3363      Register out = locations->Out().AsRegister<Register>();
3364      if (index.IsConstant()) {
3365        size_t offset =
3366            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3367        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3368      } else {
3369        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3370        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3371      }
3372      break;
3373    }
3374
3375    case Primitive::kPrimShort: {
3376      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3377      Register out = locations->Out().AsRegister<Register>();
3378      if (index.IsConstant()) {
3379        size_t offset =
3380            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3381        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3382      } else {
3383        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3384        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3385      }
3386      break;
3387    }
3388
3389    case Primitive::kPrimChar: {
3390      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3391      Register out = locations->Out().AsRegister<Register>();
3392      if (index.IsConstant()) {
3393        size_t offset =
3394            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3395        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3396      } else {
3397        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3398        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3399      }
3400      break;
3401    }
3402
3403    case Primitive::kPrimInt:
3404    case Primitive::kPrimNot: {
3405      DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
3406      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3407      Register out = locations->Out().AsRegister<Register>();
3408      if (index.IsConstant()) {
3409        size_t offset =
3410            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3411        __ LoadFromOffset(kLoadWord, out, obj, offset);
3412      } else {
3413        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3414        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3415      }
3416      break;
3417    }
3418
3419    case Primitive::kPrimLong: {
3420      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3421      Location out = locations->Out();
3422      if (index.IsConstant()) {
3423        size_t offset =
3424            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3425        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3426      } else {
3427        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3428        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3429      }
3430      break;
3431    }
3432
3433    case Primitive::kPrimFloat: {
3434      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3435      Location out = locations->Out();
3436      DCHECK(out.IsFpuRegister());
3437      if (index.IsConstant()) {
3438        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3439        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3440      } else {
3441        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3442        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3443      }
3444      break;
3445    }
3446
3447    case Primitive::kPrimDouble: {
3448      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3449      Location out = locations->Out();
3450      DCHECK(out.IsFpuRegisterPair());
3451      if (index.IsConstant()) {
3452        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3453        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3454      } else {
3455        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3456        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3457      }
3458      break;
3459    }
3460
3461    case Primitive::kPrimVoid:
3462      LOG(FATAL) << "Unreachable type " << instruction->GetType();
3463      UNREACHABLE();
3464  }
3465  codegen_->MaybeRecordImplicitNullCheck(instruction);
3466}
3467
3468void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3469  Primitive::Type value_type = instruction->GetComponentType();
3470
3471  bool needs_write_barrier =
3472      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3473  bool needs_runtime_call = instruction->NeedsTypeCheck();
3474
3475  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3476      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3477  if (needs_runtime_call) {
3478    InvokeRuntimeCallingConvention calling_convention;
3479    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3480    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3481    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3482  } else {
3483    locations->SetInAt(0, Location::RequiresRegister());
3484    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3485    if (Primitive::IsFloatingPointType(value_type)) {
3486      locations->SetInAt(2, Location::RequiresFpuRegister());
3487    } else {
3488      locations->SetInAt(2, Location::RequiresRegister());
3489    }
3490
3491    if (needs_write_barrier) {
3492      // Temporary registers for the write barrier.
3493      locations->AddTemp(Location::RequiresRegister());
3494      locations->AddTemp(Location::RequiresRegister());
3495    }
3496  }
3497}
3498
3499void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3500  LocationSummary* locations = instruction->GetLocations();
3501  Register obj = locations->InAt(0).AsRegister<Register>();
3502  Location index = locations->InAt(1);
3503  Primitive::Type value_type = instruction->GetComponentType();
3504  bool needs_runtime_call = locations->WillCall();
3505  bool needs_write_barrier =
3506      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3507
3508  switch (value_type) {
3509    case Primitive::kPrimBoolean:
3510    case Primitive::kPrimByte: {
3511      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3512      Register value = locations->InAt(2).AsRegister<Register>();
3513      if (index.IsConstant()) {
3514        size_t offset =
3515            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3516        __ StoreToOffset(kStoreByte, value, obj, offset);
3517      } else {
3518        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3519        __ StoreToOffset(kStoreByte, value, IP, data_offset);
3520      }
3521      break;
3522    }
3523
3524    case Primitive::kPrimShort:
3525    case Primitive::kPrimChar: {
3526      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3527      Register value = locations->InAt(2).AsRegister<Register>();
3528      if (index.IsConstant()) {
3529        size_t offset =
3530            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3531        __ StoreToOffset(kStoreHalfword, value, obj, offset);
3532      } else {
3533        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3534        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3535      }
3536      break;
3537    }
3538
3539    case Primitive::kPrimInt:
3540    case Primitive::kPrimNot: {
3541      if (!needs_runtime_call) {
3542        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3543        Register value = locations->InAt(2).AsRegister<Register>();
3544        if (index.IsConstant()) {
3545          size_t offset =
3546              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3547          __ StoreToOffset(kStoreWord, value, obj, offset);
3548        } else {
3549          DCHECK(index.IsRegister()) << index;
3550          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3551          __ StoreToOffset(kStoreWord, value, IP, data_offset);
3552        }
3553        codegen_->MaybeRecordImplicitNullCheck(instruction);
3554        if (needs_write_barrier) {
3555          DCHECK_EQ(value_type, Primitive::kPrimNot);
3556          Register temp = locations->GetTemp(0).AsRegister<Register>();
3557          Register card = locations->GetTemp(1).AsRegister<Register>();
3558          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
3559        }
3560      } else {
3561        DCHECK_EQ(value_type, Primitive::kPrimNot);
3562        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3563                                instruction,
3564                                instruction->GetDexPc(),
3565                                nullptr);
3566      }
3567      break;
3568    }
3569
3570    case Primitive::kPrimLong: {
3571      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3572      Location value = locations->InAt(2);
3573      if (index.IsConstant()) {
3574        size_t offset =
3575            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3576        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3577      } else {
3578        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3579        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3580      }
3581      break;
3582    }
3583
3584    case Primitive::kPrimFloat: {
3585      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3586      Location value = locations->InAt(2);
3587      DCHECK(value.IsFpuRegister());
3588      if (index.IsConstant()) {
3589        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3590        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3591      } else {
3592        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3593        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3594      }
3595      break;
3596    }
3597
3598    case Primitive::kPrimDouble: {
3599      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3600      Location value = locations->InAt(2);
3601      DCHECK(value.IsFpuRegisterPair());
3602      if (index.IsConstant()) {
3603        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3604        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3605      } else {
3606        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3607        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3608      }
3609
3610      break;
3611    }
3612
3613    case Primitive::kPrimVoid:
3614      LOG(FATAL) << "Unreachable type " << value_type;
3615      UNREACHABLE();
3616  }
3617
3618  // Ints and objects are handled in the switch.
3619  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
3620    codegen_->MaybeRecordImplicitNullCheck(instruction);
3621  }
3622}
3623
3624void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
3625  LocationSummary* locations =
3626      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3627  locations->SetInAt(0, Location::RequiresRegister());
3628  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3629}
3630
3631void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
3632  LocationSummary* locations = instruction->GetLocations();
3633  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
3634  Register obj = locations->InAt(0).AsRegister<Register>();
3635  Register out = locations->Out().AsRegister<Register>();
3636  __ LoadFromOffset(kLoadWord, out, obj, offset);
3637  codegen_->MaybeRecordImplicitNullCheck(instruction);
3638}
3639
3640void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3641  LocationSummary* locations =
3642      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3643  locations->SetInAt(0, Location::RequiresRegister());
3644  locations->SetInAt(1, Location::RequiresRegister());
3645  if (instruction->HasUses()) {
3646    locations->SetOut(Location::SameAsFirstInput());
3647  }
3648}
3649
3650void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3651  LocationSummary* locations = instruction->GetLocations();
3652  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
3653      instruction, locations->InAt(0), locations->InAt(1));
3654  codegen_->AddSlowPath(slow_path);
3655
3656  Register index = locations->InAt(0).AsRegister<Register>();
3657  Register length = locations->InAt(1).AsRegister<Register>();
3658
3659  __ cmp(index, ShifterOperand(length));
3660  __ b(slow_path->GetEntryLabel(), CS);
3661}
3662
3663void CodeGeneratorARM::MarkGCCard(Register temp,
3664                                  Register card,
3665                                  Register object,
3666                                  Register value,
3667                                  bool can_be_null) {
3668  NearLabel is_null;
3669  if (can_be_null) {
3670    __ CompareAndBranchIfZero(value, &is_null);
3671  }
3672  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
3673  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
3674  __ strb(card, Address(card, temp));
3675  if (can_be_null) {
3676    __ Bind(&is_null);
3677  }
3678}
3679
3680void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
3681  temp->SetLocations(nullptr);
3682}
3683
3684void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
3685  // Nothing to do, this is driven by the code generator.
3686  UNUSED(temp);
3687}
3688
3689void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
3690  UNUSED(instruction);
3691  LOG(FATAL) << "Unreachable";
3692}
3693
3694void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
3695  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3696}
3697
3698void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3699  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3700}
3701
3702void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3703  HBasicBlock* block = instruction->GetBlock();
3704  if (block->GetLoopInformation() != nullptr) {
3705    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3706    // The back edge will generate the suspend check.
3707    return;
3708  }
3709  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3710    // The goto will generate the suspend check.
3711    return;
3712  }
3713  GenerateSuspendCheck(instruction, nullptr);
3714}
3715
3716void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
3717                                                       HBasicBlock* successor) {
3718  SuspendCheckSlowPathARM* slow_path =
3719      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
3720  if (slow_path == nullptr) {
3721    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
3722    instruction->SetSlowPath(slow_path);
3723    codegen_->AddSlowPath(slow_path);
3724    if (successor != nullptr) {
3725      DCHECK(successor->IsLoopHeader());
3726      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
3727    }
3728  } else {
3729    DCHECK_EQ(slow_path->GetSuccessor(), successor);
3730  }
3731
3732  __ LoadFromOffset(
3733      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
3734  __ cmp(IP, ShifterOperand(0));
3735  // TODO: Figure out the branch offsets and use cbz/cbnz.
3736  if (successor == nullptr) {
3737    __ b(slow_path->GetEntryLabel(), NE);
3738    __ Bind(slow_path->GetReturnLabel());
3739  } else {
3740    __ b(codegen_->GetLabelOf(successor), EQ);
3741    __ b(slow_path->GetEntryLabel());
3742  }
3743}
3744
3745ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
3746  return codegen_->GetAssembler();
3747}
3748
3749void ParallelMoveResolverARM::EmitMove(size_t index) {
3750  MoveOperands* move = moves_.Get(index);
3751  Location source = move->GetSource();
3752  Location destination = move->GetDestination();
3753
3754  if (source.IsRegister()) {
3755    if (destination.IsRegister()) {
3756      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
3757    } else {
3758      DCHECK(destination.IsStackSlot());
3759      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
3760                       SP, destination.GetStackIndex());
3761    }
3762  } else if (source.IsStackSlot()) {
3763    if (destination.IsRegister()) {
3764      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
3765                        SP, source.GetStackIndex());
3766    } else if (destination.IsFpuRegister()) {
3767      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
3768    } else {
3769      DCHECK(destination.IsStackSlot());
3770      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
3771      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3772    }
3773  } else if (source.IsFpuRegister()) {
3774    if (destination.IsFpuRegister()) {
3775      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
3776    } else {
3777      DCHECK(destination.IsStackSlot());
3778      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
3779    }
3780  } else if (source.IsDoubleStackSlot()) {
3781    if (destination.IsDoubleStackSlot()) {
3782      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
3783      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
3784    } else if (destination.IsRegisterPair()) {
3785      DCHECK(ExpectedPairLayout(destination));
3786      __ LoadFromOffset(
3787          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
3788    } else {
3789      DCHECK(destination.IsFpuRegisterPair()) << destination;
3790      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3791                         SP,
3792                         source.GetStackIndex());
3793    }
3794  } else if (source.IsRegisterPair()) {
3795    if (destination.IsRegisterPair()) {
3796      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
3797      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
3798    } else {
3799      DCHECK(destination.IsDoubleStackSlot()) << destination;
3800      DCHECK(ExpectedPairLayout(source));
3801      __ StoreToOffset(
3802          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
3803    }
3804  } else if (source.IsFpuRegisterPair()) {
3805    if (destination.IsFpuRegisterPair()) {
3806      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3807               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
3808    } else {
3809      DCHECK(destination.IsDoubleStackSlot()) << destination;
3810      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
3811                        SP,
3812                        destination.GetStackIndex());
3813    }
3814  } else {
3815    DCHECK(source.IsConstant()) << source;
3816    HConstant* constant = source.GetConstant();
3817    if (constant->IsIntConstant() || constant->IsNullConstant()) {
3818      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
3819      if (destination.IsRegister()) {
3820        __ LoadImmediate(destination.AsRegister<Register>(), value);
3821      } else {
3822        DCHECK(destination.IsStackSlot());
3823        __ LoadImmediate(IP, value);
3824        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3825      }
3826    } else if (constant->IsLongConstant()) {
3827      int64_t value = constant->AsLongConstant()->GetValue();
3828      if (destination.IsRegisterPair()) {
3829        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
3830        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
3831      } else {
3832        DCHECK(destination.IsDoubleStackSlot()) << destination;
3833        __ LoadImmediate(IP, Low32Bits(value));
3834        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3835        __ LoadImmediate(IP, High32Bits(value));
3836        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3837      }
3838    } else if (constant->IsDoubleConstant()) {
3839      double value = constant->AsDoubleConstant()->GetValue();
3840      if (destination.IsFpuRegisterPair()) {
3841        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
3842      } else {
3843        DCHECK(destination.IsDoubleStackSlot()) << destination;
3844        uint64_t int_value = bit_cast<uint64_t, double>(value);
3845        __ LoadImmediate(IP, Low32Bits(int_value));
3846        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3847        __ LoadImmediate(IP, High32Bits(int_value));
3848        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3849      }
3850    } else {
3851      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
3852      float value = constant->AsFloatConstant()->GetValue();
3853      if (destination.IsFpuRegister()) {
3854        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
3855      } else {
3856        DCHECK(destination.IsStackSlot());
3857        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
3858        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3859      }
3860    }
3861  }
3862}
3863
3864void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
3865  __ Mov(IP, reg);
3866  __ LoadFromOffset(kLoadWord, reg, SP, mem);
3867  __ StoreToOffset(kStoreWord, IP, SP, mem);
3868}
3869
3870void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
3871  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
3872  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
3873  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
3874                    SP, mem1 + stack_offset);
3875  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
3876  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
3877                   SP, mem2 + stack_offset);
3878  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
3879}
3880
3881void ParallelMoveResolverARM::EmitSwap(size_t index) {
3882  MoveOperands* move = moves_.Get(index);
3883  Location source = move->GetSource();
3884  Location destination = move->GetDestination();
3885
3886  if (source.IsRegister() && destination.IsRegister()) {
3887    DCHECK_NE(source.AsRegister<Register>(), IP);
3888    DCHECK_NE(destination.AsRegister<Register>(), IP);
3889    __ Mov(IP, source.AsRegister<Register>());
3890    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
3891    __ Mov(destination.AsRegister<Register>(), IP);
3892  } else if (source.IsRegister() && destination.IsStackSlot()) {
3893    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
3894  } else if (source.IsStackSlot() && destination.IsRegister()) {
3895    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
3896  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
3897    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3898  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
3899    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
3900    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
3901    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
3902  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
3903    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
3904    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
3905    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
3906    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
3907               destination.AsRegisterPairHigh<Register>(),
3908               DTMP);
3909  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
3910    Register low_reg = source.IsRegisterPair()
3911        ? source.AsRegisterPairLow<Register>()
3912        : destination.AsRegisterPairLow<Register>();
3913    int mem = source.IsRegisterPair()
3914        ? destination.GetStackIndex()
3915        : source.GetStackIndex();
3916    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
3917    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
3918    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
3919    __ StoreDToOffset(DTMP, SP, mem);
3920  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
3921    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
3922    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3923    __ vmovd(DTMP, first);
3924    __ vmovd(first, second);
3925    __ vmovd(second, DTMP);
3926  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
3927    DRegister reg = source.IsFpuRegisterPair()
3928        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
3929        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3930    int mem = source.IsFpuRegisterPair()
3931        ? destination.GetStackIndex()
3932        : source.GetStackIndex();
3933    __ vmovd(DTMP, reg);
3934    __ LoadDFromOffset(reg, SP, mem);
3935    __ StoreDToOffset(DTMP, SP, mem);
3936  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
3937    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
3938                                           : destination.AsFpuRegister<SRegister>();
3939    int mem = source.IsFpuRegister()
3940        ? destination.GetStackIndex()
3941        : source.GetStackIndex();
3942
3943    __ vmovrs(IP, reg);
3944    __ LoadSFromOffset(reg, SP, mem);
3945    __ StoreToOffset(kStoreWord, IP, SP, mem);
3946  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
3947    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3948    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
3949  } else {
3950    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
3951  }
3952}
3953
3954void ParallelMoveResolverARM::SpillScratch(int reg) {
3955  __ Push(static_cast<Register>(reg));
3956}
3957
3958void ParallelMoveResolverARM::RestoreScratch(int reg) {
3959  __ Pop(static_cast<Register>(reg));
3960}
3961
3962void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
3963  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
3964      ? LocationSummary::kCallOnSlowPath
3965      : LocationSummary::kNoCall;
3966  LocationSummary* locations =
3967      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
3968  locations->SetInAt(0, Location::RequiresRegister());
3969  locations->SetOut(Location::RequiresRegister());
3970}
3971
3972void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
3973  LocationSummary* locations = cls->GetLocations();
3974  Register out = locations->Out().AsRegister<Register>();
3975  Register current_method = locations->InAt(0).AsRegister<Register>();
3976  if (cls->IsReferrersClass()) {
3977    DCHECK(!cls->CanCallRuntime());
3978    DCHECK(!cls->MustGenerateClinitCheck());
3979    __ LoadFromOffset(
3980        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
3981  } else {
3982    DCHECK(cls->CanCallRuntime());
3983    __ LoadFromOffset(kLoadWord,
3984                      out,
3985                      current_method,
3986                      ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
3987    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
3988
3989    SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3990        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
3991    codegen_->AddSlowPath(slow_path);
3992    __ cmp(out, ShifterOperand(0));
3993    __ b(slow_path->GetEntryLabel(), EQ);
3994    if (cls->MustGenerateClinitCheck()) {
3995      GenerateClassInitializationCheck(slow_path, out);
3996    } else {
3997      __ Bind(slow_path->GetExitLabel());
3998    }
3999  }
4000}
4001
4002void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
4003  LocationSummary* locations =
4004      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
4005  locations->SetInAt(0, Location::RequiresRegister());
4006  if (check->HasUses()) {
4007    locations->SetOut(Location::SameAsFirstInput());
4008  }
4009}
4010
4011void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
4012  // We assume the class is not null.
4013  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4014      check->GetLoadClass(), check, check->GetDexPc(), true);
4015  codegen_->AddSlowPath(slow_path);
4016  GenerateClassInitializationCheck(slow_path,
4017                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4018}
4019
4020void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4021    SlowPathCodeARM* slow_path, Register class_reg) {
4022  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4023  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4024  __ b(slow_path->GetEntryLabel(), LT);
4025  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4026  // properly. Therefore, we do a memory fence.
4027  __ dmb(ISH);
4028  __ Bind(slow_path->GetExitLabel());
4029}
4030
4031void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4032  LocationSummary* locations =
4033      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4034  locations->SetInAt(0, Location::RequiresRegister());
4035  locations->SetOut(Location::RequiresRegister());
4036}
4037
4038void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4039  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4040  codegen_->AddSlowPath(slow_path);
4041
4042  LocationSummary* locations = load->GetLocations();
4043  Register out = locations->Out().AsRegister<Register>();
4044  Register current_method = locations->InAt(0).AsRegister<Register>();
4045  __ LoadFromOffset(
4046      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4047  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4048  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4049  __ cmp(out, ShifterOperand(0));
4050  __ b(slow_path->GetEntryLabel(), EQ);
4051  __ Bind(slow_path->GetExitLabel());
4052}
4053
4054void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4055  LocationSummary* locations =
4056      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4057  locations->SetOut(Location::RequiresRegister());
4058}
4059
4060void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4061  Register out = load->GetLocations()->Out().AsRegister<Register>();
4062  int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4063  __ LoadFromOffset(kLoadWord, out, TR, offset);
4064  __ LoadImmediate(IP, 0);
4065  __ StoreToOffset(kStoreWord, IP, TR, offset);
4066}
4067
4068void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4069  LocationSummary* locations =
4070      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4071  InvokeRuntimeCallingConvention calling_convention;
4072  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4073}
4074
4075void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4076  codegen_->InvokeRuntime(
4077      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4078}
4079
4080void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4081  LocationSummary::CallKind call_kind = instruction->IsClassFinal()
4082      ? LocationSummary::kNoCall
4083      : LocationSummary::kCallOnSlowPath;
4084  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4085  locations->SetInAt(0, Location::RequiresRegister());
4086  locations->SetInAt(1, Location::RequiresRegister());
4087  // The out register is used as a temporary, so it overlaps with the inputs.
4088  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4089}
4090
4091void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4092  LocationSummary* locations = instruction->GetLocations();
4093  Register obj = locations->InAt(0).AsRegister<Register>();
4094  Register cls = locations->InAt(1).AsRegister<Register>();
4095  Register out = locations->Out().AsRegister<Register>();
4096  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4097  NearLabel done, zero;
4098  SlowPathCodeARM* slow_path = nullptr;
4099
4100  // Return 0 if `obj` is null.
4101  // avoid null check if we know obj is not null.
4102  if (instruction->MustDoNullCheck()) {
4103    __ CompareAndBranchIfZero(obj, &zero);
4104  }
4105  // Compare the class of `obj` with `cls`.
4106  __ LoadFromOffset(kLoadWord, out, obj, class_offset);
4107  __ cmp(out, ShifterOperand(cls));
4108  if (instruction->IsClassFinal()) {
4109    // Classes must be equal for the instanceof to succeed.
4110    __ b(&zero, NE);
4111    __ LoadImmediate(out, 1);
4112    __ b(&done);
4113  } else {
4114    // If the classes are not equal, we go into a slow path.
4115    DCHECK(locations->OnlyCallsOnSlowPath());
4116    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4117        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
4118    codegen_->AddSlowPath(slow_path);
4119    __ b(slow_path->GetEntryLabel(), NE);
4120    __ LoadImmediate(out, 1);
4121    __ b(&done);
4122  }
4123
4124  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
4125    __ Bind(&zero);
4126    __ LoadImmediate(out, 0);
4127  }
4128
4129  if (slow_path != nullptr) {
4130    __ Bind(slow_path->GetExitLabel());
4131  }
4132  __ Bind(&done);
4133}
4134
4135void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4136  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4137      instruction, LocationSummary::kCallOnSlowPath);
4138  locations->SetInAt(0, Location::RequiresRegister());
4139  locations->SetInAt(1, Location::RequiresRegister());
4140  locations->AddTemp(Location::RequiresRegister());
4141}
4142
4143void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4144  LocationSummary* locations = instruction->GetLocations();
4145  Register obj = locations->InAt(0).AsRegister<Register>();
4146  Register cls = locations->InAt(1).AsRegister<Register>();
4147  Register temp = locations->GetTemp(0).AsRegister<Register>();
4148  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4149
4150  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4151      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
4152  codegen_->AddSlowPath(slow_path);
4153
4154  NearLabel done;
4155  // avoid null check if we know obj is not null.
4156  if (instruction->MustDoNullCheck()) {
4157    __ CompareAndBranchIfZero(obj, &done);
4158  }
4159  // Compare the class of `obj` with `cls`.
4160  __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4161  __ cmp(temp, ShifterOperand(cls));
4162  __ b(slow_path->GetEntryLabel(), NE);
4163  __ Bind(slow_path->GetExitLabel());
4164  if (instruction->MustDoNullCheck()) {
4165    __ Bind(&done);
4166  }
4167}
4168
4169void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4170  LocationSummary* locations =
4171      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4172  InvokeRuntimeCallingConvention calling_convention;
4173  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4174}
4175
4176void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4177  codegen_->InvokeRuntime(instruction->IsEnter()
4178        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4179      instruction,
4180      instruction->GetDexPc(),
4181      nullptr);
4182}
4183
4184void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
4185void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
4186void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4187
4188void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4189  LocationSummary* locations =
4190      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4191  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4192         || instruction->GetResultType() == Primitive::kPrimLong);
4193  locations->SetInAt(0, Location::RequiresRegister());
4194  locations->SetInAt(1, Location::RequiresRegister());
4195  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4196}
4197
4198void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4199  HandleBitwiseOperation(instruction);
4200}
4201
4202void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4203  HandleBitwiseOperation(instruction);
4204}
4205
4206void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4207  HandleBitwiseOperation(instruction);
4208}
4209
4210void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4211  LocationSummary* locations = instruction->GetLocations();
4212
4213  if (instruction->GetResultType() == Primitive::kPrimInt) {
4214    Register first = locations->InAt(0).AsRegister<Register>();
4215    Register second = locations->InAt(1).AsRegister<Register>();
4216    Register out = locations->Out().AsRegister<Register>();
4217    if (instruction->IsAnd()) {
4218      __ and_(out, first, ShifterOperand(second));
4219    } else if (instruction->IsOr()) {
4220      __ orr(out, first, ShifterOperand(second));
4221    } else {
4222      DCHECK(instruction->IsXor());
4223      __ eor(out, first, ShifterOperand(second));
4224    }
4225  } else {
4226    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4227    Location first = locations->InAt(0);
4228    Location second = locations->InAt(1);
4229    Location out = locations->Out();
4230    if (instruction->IsAnd()) {
4231      __ and_(out.AsRegisterPairLow<Register>(),
4232              first.AsRegisterPairLow<Register>(),
4233              ShifterOperand(second.AsRegisterPairLow<Register>()));
4234      __ and_(out.AsRegisterPairHigh<Register>(),
4235              first.AsRegisterPairHigh<Register>(),
4236              ShifterOperand(second.AsRegisterPairHigh<Register>()));
4237    } else if (instruction->IsOr()) {
4238      __ orr(out.AsRegisterPairLow<Register>(),
4239             first.AsRegisterPairLow<Register>(),
4240             ShifterOperand(second.AsRegisterPairLow<Register>()));
4241      __ orr(out.AsRegisterPairHigh<Register>(),
4242             first.AsRegisterPairHigh<Register>(),
4243             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4244    } else {
4245      DCHECK(instruction->IsXor());
4246      __ eor(out.AsRegisterPairLow<Register>(),
4247             first.AsRegisterPairLow<Register>(),
4248             ShifterOperand(second.AsRegisterPairLow<Register>()));
4249      __ eor(out.AsRegisterPairHigh<Register>(),
4250             first.AsRegisterPairHigh<Register>(),
4251             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4252    }
4253  }
4254}
4255
4256void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
4257  DCHECK_EQ(temp, kArtMethodRegister);
4258
4259  // TODO: Implement all kinds of calls:
4260  // 1) boot -> boot
4261  // 2) app -> boot
4262  // 3) app -> app
4263  //
4264  // Currently we implement the app -> app logic, which looks up in the resolve cache.
4265
4266  if (invoke->IsStringInit()) {
4267    // temp = thread->string_init_entrypoint
4268    __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
4269    // LR = temp[offset_of_quick_compiled_code]
4270    __ LoadFromOffset(kLoadWord, LR, temp,
4271                      ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4272                          kArmWordSize).Int32Value());
4273    // LR()
4274    __ blx(LR);
4275  } else {
4276    // temp = method;
4277    LoadCurrentMethod(temp);
4278    if (!invoke->IsRecursive()) {
4279      // temp = temp->dex_cache_resolved_methods_;
4280      __ LoadFromOffset(
4281          kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
4282      // temp = temp[index_in_cache]
4283      __ LoadFromOffset(
4284          kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
4285      // LR = temp[offset_of_quick_compiled_code]
4286      __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4287          kArmWordSize).Int32Value());
4288      // LR()
4289      __ blx(LR);
4290    } else {
4291      __ bl(GetFrameEntryLabel());
4292    }
4293  }
4294
4295  DCHECK(!IsLeafMethod());
4296}
4297
4298void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4299  // Nothing to do, this should be removed during prepare for register allocator.
4300  UNUSED(instruction);
4301  LOG(FATAL) << "Unreachable";
4302}
4303
4304void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4305  // Nothing to do, this should be removed during prepare for register allocator.
4306  UNUSED(instruction);
4307  LOG(FATAL) << "Unreachable";
4308}
4309
4310}  // namespace arm
4311}  // namespace art
4312