code_generator_arm.cc revision 9931f319cf86c56c2855d800339a3410697633a6
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "gc/accounting/card_table.h"
24#include "intrinsics.h"
25#include "intrinsics_arm.h"
26#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "thread.h"
29#include "utils/arm/assembler_arm.h"
30#include "utils/arm/managed_register_arm.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35
36namespace arm {
37
38static bool ExpectedPairLayout(Location location) {
39  // We expected this for both core and fpu register pairs.
40  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
41}
42
43static constexpr int kCurrentMethodStackOffset = 0;
44static constexpr Register kMethodRegisterArgument = R0;
45
46// We unconditionally allocate R5 to ensure we can do long operations
47// with baseline.
48static constexpr Register kCoreSavedRegisterForBaseline = R5;
49static constexpr Register kCoreCalleeSaves[] =
50    { R5, R6, R7, R8, R10, R11, PC };
51static constexpr SRegister kFpuCalleeSaves[] =
52    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
53
54// D31 cannot be split into two S registers, and the register allocator only works on
55// S registers. Therefore there is no need to block it.
56static constexpr DRegister DTMP = D31;
57
58#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
59#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
60
61class NullCheckSlowPathARM : public SlowPathCodeARM {
62 public:
63  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
64
65  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
66    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
67    __ Bind(GetEntryLabel());
68    arm_codegen->InvokeRuntime(
69        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
70  }
71
72  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
73
74 private:
75  HNullCheck* const instruction_;
76  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
77};
78
79class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
80 public:
81  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
82
83  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
84    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
85    __ Bind(GetEntryLabel());
86    arm_codegen->InvokeRuntime(
87        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
88  }
89
90  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
91
92 private:
93  HDivZeroCheck* const instruction_;
94  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
95};
96
97class SuspendCheckSlowPathARM : public SlowPathCodeARM {
98 public:
99  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
100      : instruction_(instruction), successor_(successor) {}
101
102  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
103    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
104    __ Bind(GetEntryLabel());
105    SaveLiveRegisters(codegen, instruction_->GetLocations());
106    arm_codegen->InvokeRuntime(
107        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
108    RestoreLiveRegisters(codegen, instruction_->GetLocations());
109    if (successor_ == nullptr) {
110      __ b(GetReturnLabel());
111    } else {
112      __ b(arm_codegen->GetLabelOf(successor_));
113    }
114  }
115
116  Label* GetReturnLabel() {
117    DCHECK(successor_ == nullptr);
118    return &return_label_;
119  }
120
121  HBasicBlock* GetSuccessor() const {
122    return successor_;
123  }
124
125  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; }
126
127 private:
128  HSuspendCheck* const instruction_;
129  // If not null, the block to branch to after the suspend check.
130  HBasicBlock* const successor_;
131
132  // If `successor_` is null, the label to branch to after the suspend check.
133  Label return_label_;
134
135  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
136};
137
138class BoundsCheckSlowPathARM : public SlowPathCodeARM {
139 public:
140  BoundsCheckSlowPathARM(HBoundsCheck* instruction,
141                         Location index_location,
142                         Location length_location)
143      : instruction_(instruction),
144        index_location_(index_location),
145        length_location_(length_location) {}
146
147  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
148    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
149    __ Bind(GetEntryLabel());
150    // We're moving two locations to locations that could overlap, so we need a parallel
151    // move resolver.
152    InvokeRuntimeCallingConvention calling_convention;
153    codegen->EmitParallelMoves(
154        index_location_,
155        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
156        Primitive::kPrimInt,
157        length_location_,
158        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
159        Primitive::kPrimInt);
160    arm_codegen->InvokeRuntime(
161        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
162  }
163
164  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
165
166 private:
167  HBoundsCheck* const instruction_;
168  const Location index_location_;
169  const Location length_location_;
170
171  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
172};
173
174class LoadClassSlowPathARM : public SlowPathCodeARM {
175 public:
176  LoadClassSlowPathARM(HLoadClass* cls,
177                       HInstruction* at,
178                       uint32_t dex_pc,
179                       bool do_clinit)
180      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
181    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
182  }
183
184  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
185    LocationSummary* locations = at_->GetLocations();
186
187    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
188    __ Bind(GetEntryLabel());
189    SaveLiveRegisters(codegen, locations);
190
191    InvokeRuntimeCallingConvention calling_convention;
192    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
193    int32_t entry_point_offset = do_clinit_
194        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
195        : QUICK_ENTRY_POINT(pInitializeType);
196    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
197
198    // Move the class to the desired location.
199    Location out = locations->Out();
200    if (out.IsValid()) {
201      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
202      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
203    }
204    RestoreLiveRegisters(codegen, locations);
205    __ b(GetExitLabel());
206  }
207
208  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM"; }
209
210 private:
211  // The class this slow path will load.
212  HLoadClass* const cls_;
213
214  // The instruction where this slow path is happening.
215  // (Might be the load class or an initialization check).
216  HInstruction* const at_;
217
218  // The dex PC of `at_`.
219  const uint32_t dex_pc_;
220
221  // Whether to initialize the class.
222  const bool do_clinit_;
223
224  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
225};
226
227class LoadStringSlowPathARM : public SlowPathCodeARM {
228 public:
229  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
230
231  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
232    LocationSummary* locations = instruction_->GetLocations();
233    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
234
235    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
236    __ Bind(GetEntryLabel());
237    SaveLiveRegisters(codegen, locations);
238
239    InvokeRuntimeCallingConvention calling_convention;
240    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
241    arm_codegen->InvokeRuntime(
242        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
243    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
244
245    RestoreLiveRegisters(codegen, locations);
246    __ b(GetExitLabel());
247  }
248
249  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
250
251 private:
252  HLoadString* const instruction_;
253
254  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
255};
256
257class TypeCheckSlowPathARM : public SlowPathCodeARM {
258 public:
259  TypeCheckSlowPathARM(HInstruction* instruction,
260                       Location class_to_check,
261                       Location object_class,
262                       uint32_t dex_pc)
263      : instruction_(instruction),
264        class_to_check_(class_to_check),
265        object_class_(object_class),
266        dex_pc_(dex_pc) {}
267
268  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
269    LocationSummary* locations = instruction_->GetLocations();
270    DCHECK(instruction_->IsCheckCast()
271           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
272
273    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
274    __ Bind(GetEntryLabel());
275    SaveLiveRegisters(codegen, locations);
276
277    // We're moving two locations to locations that could overlap, so we need a parallel
278    // move resolver.
279    InvokeRuntimeCallingConvention calling_convention;
280    codegen->EmitParallelMoves(
281        class_to_check_,
282        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
283        Primitive::kPrimNot,
284        object_class_,
285        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
286        Primitive::kPrimNot);
287
288    if (instruction_->IsInstanceOf()) {
289      arm_codegen->InvokeRuntime(
290          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
291      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
292    } else {
293      DCHECK(instruction_->IsCheckCast());
294      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
295    }
296
297    RestoreLiveRegisters(codegen, locations);
298    __ b(GetExitLabel());
299  }
300
301  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; }
302
303 private:
304  HInstruction* const instruction_;
305  const Location class_to_check_;
306  const Location object_class_;
307  uint32_t dex_pc_;
308
309  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
310};
311
312class DeoptimizationSlowPathARM : public SlowPathCodeARM {
313 public:
314  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
315    : instruction_(instruction) {}
316
317  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
318    __ Bind(GetEntryLabel());
319    SaveLiveRegisters(codegen, instruction_->GetLocations());
320    DCHECK(instruction_->IsDeoptimize());
321    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
322    uint32_t dex_pc = deoptimize->GetDexPc();
323    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
324    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
325  }
326
327  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
328
329 private:
330  HInstruction* const instruction_;
331  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
332};
333
334#undef __
335
336#undef __
337#define __ down_cast<ArmAssembler*>(GetAssembler())->
338
339inline Condition ARMCondition(IfCondition cond) {
340  switch (cond) {
341    case kCondEQ: return EQ;
342    case kCondNE: return NE;
343    case kCondLT: return LT;
344    case kCondLE: return LE;
345    case kCondGT: return GT;
346    case kCondGE: return GE;
347    default:
348      LOG(FATAL) << "Unknown if condition";
349  }
350  return EQ;        // Unreachable.
351}
352
353inline Condition ARMOppositeCondition(IfCondition cond) {
354  switch (cond) {
355    case kCondEQ: return NE;
356    case kCondNE: return EQ;
357    case kCondLT: return GE;
358    case kCondLE: return GT;
359    case kCondGT: return LE;
360    case kCondGE: return LT;
361    default:
362      LOG(FATAL) << "Unknown if condition";
363  }
364  return EQ;        // Unreachable.
365}
366
367void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
368  stream << Register(reg);
369}
370
371void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
372  stream << SRegister(reg);
373}
374
375size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
376  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
377  return kArmWordSize;
378}
379
380size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
381  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
382  return kArmWordSize;
383}
384
385size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
386  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
387  return kArmWordSize;
388}
389
390size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
391  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
392  return kArmWordSize;
393}
394
395CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
396                                   const ArmInstructionSetFeatures& isa_features,
397                                   const CompilerOptions& compiler_options)
398    : CodeGenerator(graph,
399                    kNumberOfCoreRegisters,
400                    kNumberOfSRegisters,
401                    kNumberOfRegisterPairs,
402                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
403                                        arraysize(kCoreCalleeSaves)),
404                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
405                                        arraysize(kFpuCalleeSaves)),
406                    compiler_options),
407      block_labels_(graph->GetArena(), 0),
408      location_builder_(graph, this),
409      instruction_visitor_(graph, this),
410      move_resolver_(graph->GetArena(), this),
411      assembler_(),
412      isa_features_(isa_features) {
413  // Save the PC register to mimic Quick.
414  AddAllocatedRegister(Location::RegisterLocation(PC));
415}
416
417void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
418  // Ensure that we fix up branches and literal loads and emit the literal pool.
419  __ FinalizeCode();
420
421  // Adjust native pc offsets in stack maps.
422  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
423    uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
424    uint32_t new_position = __ GetAdjustedPosition(old_position);
425    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
426  }
427  // Adjust native pc offsets of block labels.
428  for (size_t block_idx = 0u, end = block_order_->Size(); block_idx != end; ++block_idx) {
429    HBasicBlock* block = block_order_->Get(block_idx);
430    // Get the label directly from block_labels_ rather than through GetLabelOf() to avoid
431    // FirstNonEmptyBlock() which could lead to adjusting a label more than once.
432    DCHECK_LT(static_cast<size_t>(block->GetBlockId()), block_labels_.Size());
433    Label* block_label = &block_labels_.GetRawStorage()[block->GetBlockId()];
434    DCHECK_EQ(block_label->IsBound(), !block->IsSingleGoto());
435    if (block_label->IsBound()) {
436      __ AdjustLabelPosition(block_label);
437    }
438  }
439
440  CodeGenerator::Finalize(allocator);
441}
442
443Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
444  switch (type) {
445    case Primitive::kPrimLong: {
446      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
447      ArmManagedRegister pair =
448          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
449      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
450      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
451
452      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
453      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
454      UpdateBlockedPairRegisters();
455      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
456    }
457
458    case Primitive::kPrimByte:
459    case Primitive::kPrimBoolean:
460    case Primitive::kPrimChar:
461    case Primitive::kPrimShort:
462    case Primitive::kPrimInt:
463    case Primitive::kPrimNot: {
464      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
465      // Block all register pairs that contain `reg`.
466      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
467        ArmManagedRegister current =
468            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
469        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
470          blocked_register_pairs_[i] = true;
471        }
472      }
473      return Location::RegisterLocation(reg);
474    }
475
476    case Primitive::kPrimFloat: {
477      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
478      return Location::FpuRegisterLocation(reg);
479    }
480
481    case Primitive::kPrimDouble: {
482      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
483      DCHECK_EQ(reg % 2, 0);
484      return Location::FpuRegisterPairLocation(reg, reg + 1);
485    }
486
487    case Primitive::kPrimVoid:
488      LOG(FATAL) << "Unreachable type " << type;
489  }
490
491  return Location();
492}
493
494void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
495  // Don't allocate the dalvik style register pair passing.
496  blocked_register_pairs_[R1_R2] = true;
497
498  // Stack register, LR and PC are always reserved.
499  blocked_core_registers_[SP] = true;
500  blocked_core_registers_[LR] = true;
501  blocked_core_registers_[PC] = true;
502
503  // Reserve thread register.
504  blocked_core_registers_[TR] = true;
505
506  // Reserve temp register.
507  blocked_core_registers_[IP] = true;
508
509  if (is_baseline) {
510    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
511      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
512    }
513
514    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
515
516    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
517      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
518    }
519  }
520
521  UpdateBlockedPairRegisters();
522}
523
524void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
525  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
526    ArmManagedRegister current =
527        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
528    if (blocked_core_registers_[current.AsRegisterPairLow()]
529        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
530      blocked_register_pairs_[i] = true;
531    }
532  }
533}
534
535InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
536      : HGraphVisitor(graph),
537        assembler_(codegen->GetAssembler()),
538        codegen_(codegen) {}
539
540void CodeGeneratorARM::ComputeSpillMask() {
541  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
542  // Save one extra register for baseline. Note that on thumb2, there is no easy
543  // instruction to restore just the PC, so this actually helps both baseline
544  // and non-baseline to save and restore at least two registers at entry and exit.
545  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
546  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
547  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
548  // We use vpush and vpop for saving and restoring floating point registers, which take
549  // a SRegister and the number of registers to save/restore after that SRegister. We
550  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
551  // but in the range.
552  if (fpu_spill_mask_ != 0) {
553    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
554    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
555    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
556      fpu_spill_mask_ |= (1 << i);
557    }
558  }
559}
560
561static dwarf::Reg DWARFReg(Register reg) {
562  return dwarf::Reg::ArmCore(static_cast<int>(reg));
563}
564
565static dwarf::Reg DWARFReg(SRegister reg) {
566  return dwarf::Reg::ArmFp(static_cast<int>(reg));
567}
568
569void CodeGeneratorARM::GenerateFrameEntry() {
570  bool skip_overflow_check =
571      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
572  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
573  __ Bind(&frame_entry_label_);
574
575  if (HasEmptyFrame()) {
576    return;
577  }
578
579  if (!skip_overflow_check) {
580    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
581    __ LoadFromOffset(kLoadWord, IP, IP, 0);
582    RecordPcInfo(nullptr, 0);
583  }
584
585  // PC is in the list of callee-save to mimic Quick, but we need to push
586  // LR at entry instead.
587  uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
588  __ PushList(push_mask);
589  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
590  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
591  if (fpu_spill_mask_ != 0) {
592    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
593    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
594    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
595    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
596  }
597  int adjust = GetFrameSize() - FrameEntrySpillSize();
598  __ AddConstant(SP, -adjust);
599  __ cfi().AdjustCFAOffset(adjust);
600  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
601}
602
603void CodeGeneratorARM::GenerateFrameExit() {
604  if (HasEmptyFrame()) {
605    __ bx(LR);
606    return;
607  }
608  __ cfi().RememberState();
609  int adjust = GetFrameSize() - FrameEntrySpillSize();
610  __ AddConstant(SP, adjust);
611  __ cfi().AdjustCFAOffset(-adjust);
612  if (fpu_spill_mask_ != 0) {
613    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
614    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
615    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
616    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
617  }
618  __ PopList(core_spill_mask_);
619  __ cfi().RestoreState();
620  __ cfi().DefCFAOffset(GetFrameSize());
621}
622
623void CodeGeneratorARM::Bind(HBasicBlock* block) {
624  __ Bind(GetLabelOf(block));
625}
626
627Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
628  switch (load->GetType()) {
629    case Primitive::kPrimLong:
630    case Primitive::kPrimDouble:
631      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
632
633    case Primitive::kPrimInt:
634    case Primitive::kPrimNot:
635    case Primitive::kPrimFloat:
636      return Location::StackSlot(GetStackSlot(load->GetLocal()));
637
638    case Primitive::kPrimBoolean:
639    case Primitive::kPrimByte:
640    case Primitive::kPrimChar:
641    case Primitive::kPrimShort:
642    case Primitive::kPrimVoid:
643      LOG(FATAL) << "Unexpected type " << load->GetType();
644      UNREACHABLE();
645  }
646
647  LOG(FATAL) << "Unreachable";
648  UNREACHABLE();
649}
650
651Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
652  switch (type) {
653    case Primitive::kPrimBoolean:
654    case Primitive::kPrimByte:
655    case Primitive::kPrimChar:
656    case Primitive::kPrimShort:
657    case Primitive::kPrimInt:
658    case Primitive::kPrimNot: {
659      uint32_t index = gp_index_++;
660      uint32_t stack_index = stack_index_++;
661      if (index < calling_convention.GetNumberOfRegisters()) {
662        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
663      } else {
664        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
665      }
666    }
667
668    case Primitive::kPrimLong: {
669      uint32_t index = gp_index_;
670      uint32_t stack_index = stack_index_;
671      gp_index_ += 2;
672      stack_index_ += 2;
673      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
674        if (calling_convention.GetRegisterAt(index) == R1) {
675          // Skip R1, and use R2_R3 instead.
676          gp_index_++;
677          index++;
678        }
679      }
680      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
681        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
682                  calling_convention.GetRegisterAt(index + 1));
683        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
684                                              calling_convention.GetRegisterAt(index + 1));
685      } else {
686        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
687      }
688    }
689
690    case Primitive::kPrimFloat: {
691      uint32_t stack_index = stack_index_++;
692      if (float_index_ % 2 == 0) {
693        float_index_ = std::max(double_index_, float_index_);
694      }
695      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
696        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
697      } else {
698        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
699      }
700    }
701
702    case Primitive::kPrimDouble: {
703      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
704      uint32_t stack_index = stack_index_;
705      stack_index_ += 2;
706      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
707        uint32_t index = double_index_;
708        double_index_ += 2;
709        Location result = Location::FpuRegisterPairLocation(
710          calling_convention.GetFpuRegisterAt(index),
711          calling_convention.GetFpuRegisterAt(index + 1));
712        DCHECK(ExpectedPairLayout(result));
713        return result;
714      } else {
715        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
716      }
717    }
718
719    case Primitive::kPrimVoid:
720      LOG(FATAL) << "Unexpected parameter type " << type;
721      break;
722  }
723  return Location();
724}
725
726Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
727  switch (type) {
728    case Primitive::kPrimBoolean:
729    case Primitive::kPrimByte:
730    case Primitive::kPrimChar:
731    case Primitive::kPrimShort:
732    case Primitive::kPrimInt:
733    case Primitive::kPrimNot: {
734      return Location::RegisterLocation(R0);
735    }
736
737    case Primitive::kPrimFloat: {
738      return Location::FpuRegisterLocation(S0);
739    }
740
741    case Primitive::kPrimLong: {
742      return Location::RegisterPairLocation(R0, R1);
743    }
744
745    case Primitive::kPrimDouble: {
746      return Location::FpuRegisterPairLocation(S0, S1);
747    }
748
749    case Primitive::kPrimVoid:
750      return Location();
751  }
752
753  UNREACHABLE();
754}
755
756Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
757  return Location::RegisterLocation(kMethodRegisterArgument);
758}
759
760void CodeGeneratorARM::Move32(Location destination, Location source) {
761  if (source.Equals(destination)) {
762    return;
763  }
764  if (destination.IsRegister()) {
765    if (source.IsRegister()) {
766      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
767    } else if (source.IsFpuRegister()) {
768      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
769    } else {
770      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
771    }
772  } else if (destination.IsFpuRegister()) {
773    if (source.IsRegister()) {
774      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
775    } else if (source.IsFpuRegister()) {
776      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
777    } else {
778      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
779    }
780  } else {
781    DCHECK(destination.IsStackSlot()) << destination;
782    if (source.IsRegister()) {
783      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
784    } else if (source.IsFpuRegister()) {
785      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
786    } else {
787      DCHECK(source.IsStackSlot()) << source;
788      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
789      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
790    }
791  }
792}
793
794void CodeGeneratorARM::Move64(Location destination, Location source) {
795  if (source.Equals(destination)) {
796    return;
797  }
798  if (destination.IsRegisterPair()) {
799    if (source.IsRegisterPair()) {
800      EmitParallelMoves(
801          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
802          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
803          Primitive::kPrimInt,
804          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
805          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
806          Primitive::kPrimInt);
807    } else if (source.IsFpuRegister()) {
808      UNIMPLEMENTED(FATAL);
809    } else {
810      DCHECK(source.IsDoubleStackSlot());
811      DCHECK(ExpectedPairLayout(destination));
812      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
813                        SP, source.GetStackIndex());
814    }
815  } else if (destination.IsFpuRegisterPair()) {
816    if (source.IsDoubleStackSlot()) {
817      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
818                         SP,
819                         source.GetStackIndex());
820    } else {
821      UNIMPLEMENTED(FATAL);
822    }
823  } else {
824    DCHECK(destination.IsDoubleStackSlot());
825    if (source.IsRegisterPair()) {
826      // No conflict possible, so just do the moves.
827      if (source.AsRegisterPairLow<Register>() == R1) {
828        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
829        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
830        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
831      } else {
832        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
833                         SP, destination.GetStackIndex());
834      }
835    } else if (source.IsFpuRegisterPair()) {
836      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
837                        SP,
838                        destination.GetStackIndex());
839    } else {
840      DCHECK(source.IsDoubleStackSlot());
841      EmitParallelMoves(
842          Location::StackSlot(source.GetStackIndex()),
843          Location::StackSlot(destination.GetStackIndex()),
844          Primitive::kPrimInt,
845          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
846          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
847          Primitive::kPrimInt);
848    }
849  }
850}
851
852void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
853  LocationSummary* locations = instruction->GetLocations();
854  if (instruction->IsCurrentMethod()) {
855    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
856  } else if (locations != nullptr && locations->Out().Equals(location)) {
857    return;
858  } else if (locations != nullptr && locations->Out().IsConstant()) {
859    HConstant* const_to_move = locations->Out().GetConstant();
860    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
861      int32_t value = GetInt32ValueOf(const_to_move);
862      if (location.IsRegister()) {
863        __ LoadImmediate(location.AsRegister<Register>(), value);
864      } else {
865        DCHECK(location.IsStackSlot());
866        __ LoadImmediate(IP, value);
867        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
868      }
869    } else {
870      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
871      int64_t value = const_to_move->AsLongConstant()->GetValue();
872      if (location.IsRegisterPair()) {
873        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
874        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
875      } else {
876        DCHECK(location.IsDoubleStackSlot());
877        __ LoadImmediate(IP, Low32Bits(value));
878        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
879        __ LoadImmediate(IP, High32Bits(value));
880        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
881      }
882    }
883  } else if (instruction->IsLoadLocal()) {
884    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
885    switch (instruction->GetType()) {
886      case Primitive::kPrimBoolean:
887      case Primitive::kPrimByte:
888      case Primitive::kPrimChar:
889      case Primitive::kPrimShort:
890      case Primitive::kPrimInt:
891      case Primitive::kPrimNot:
892      case Primitive::kPrimFloat:
893        Move32(location, Location::StackSlot(stack_slot));
894        break;
895
896      case Primitive::kPrimLong:
897      case Primitive::kPrimDouble:
898        Move64(location, Location::DoubleStackSlot(stack_slot));
899        break;
900
901      default:
902        LOG(FATAL) << "Unexpected type " << instruction->GetType();
903    }
904  } else if (instruction->IsTemporary()) {
905    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
906    if (temp_location.IsStackSlot()) {
907      Move32(location, temp_location);
908    } else {
909      DCHECK(temp_location.IsDoubleStackSlot());
910      Move64(location, temp_location);
911    }
912  } else {
913    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
914    switch (instruction->GetType()) {
915      case Primitive::kPrimBoolean:
916      case Primitive::kPrimByte:
917      case Primitive::kPrimChar:
918      case Primitive::kPrimShort:
919      case Primitive::kPrimNot:
920      case Primitive::kPrimInt:
921      case Primitive::kPrimFloat:
922        Move32(location, locations->Out());
923        break;
924
925      case Primitive::kPrimLong:
926      case Primitive::kPrimDouble:
927        Move64(location, locations->Out());
928        break;
929
930      default:
931        LOG(FATAL) << "Unexpected type " << instruction->GetType();
932    }
933  }
934}
935
936void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
937                                     HInstruction* instruction,
938                                     uint32_t dex_pc,
939                                     SlowPathCode* slow_path) {
940  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
941  __ blx(LR);
942  RecordPcInfo(instruction, dex_pc, slow_path);
943  DCHECK(instruction->IsSuspendCheck()
944      || instruction->IsBoundsCheck()
945      || instruction->IsNullCheck()
946      || instruction->IsDivZeroCheck()
947      || instruction->GetLocations()->CanCall()
948      || !IsLeafMethod());
949}
950
951void LocationsBuilderARM::VisitGoto(HGoto* got) {
952  got->SetLocations(nullptr);
953}
954
955void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
956  HBasicBlock* successor = got->GetSuccessor();
957  DCHECK(!successor->IsExitBlock());
958
959  HBasicBlock* block = got->GetBlock();
960  HInstruction* previous = got->GetPrevious();
961
962  HLoopInformation* info = block->GetLoopInformation();
963  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
964    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
965    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
966    return;
967  }
968
969  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
970    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
971  }
972  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
973    __ b(codegen_->GetLabelOf(successor));
974  }
975}
976
977void LocationsBuilderARM::VisitExit(HExit* exit) {
978  exit->SetLocations(nullptr);
979}
980
981void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
982  UNUSED(exit);
983}
984
985void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
986                                                        Label* true_target,
987                                                        Label* false_target,
988                                                        Label* always_true_target) {
989  HInstruction* cond = instruction->InputAt(0);
990  if (cond->IsIntConstant()) {
991    // Constant condition, statically compared against 1.
992    int32_t cond_value = cond->AsIntConstant()->GetValue();
993    if (cond_value == 1) {
994      if (always_true_target != nullptr) {
995        __ b(always_true_target);
996      }
997      return;
998    } else {
999      DCHECK_EQ(cond_value, 0);
1000    }
1001  } else {
1002    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1003      // Condition has been materialized, compare the output to 0
1004      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
1005      __ cmp(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
1006             ShifterOperand(0));
1007      __ b(true_target, NE);
1008    } else {
1009      // Condition has not been materialized, use its inputs as the
1010      // comparison and its condition as the branch condition.
1011      LocationSummary* locations = cond->GetLocations();
1012      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
1013      Register left = locations->InAt(0).AsRegister<Register>();
1014      if (locations->InAt(1).IsRegister()) {
1015        __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1016      } else {
1017        DCHECK(locations->InAt(1).IsConstant());
1018        HConstant* constant = locations->InAt(1).GetConstant();
1019        int32_t value = CodeGenerator::GetInt32ValueOf(constant);
1020        ShifterOperand operand;
1021        if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1022          __ cmp(left, operand);
1023        } else {
1024          Register temp = IP;
1025          __ LoadImmediate(temp, value);
1026          __ cmp(left, ShifterOperand(temp));
1027        }
1028      }
1029      __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
1030    }
1031  }
1032  if (false_target != nullptr) {
1033    __ b(false_target);
1034  }
1035}
1036
1037void LocationsBuilderARM::VisitIf(HIf* if_instr) {
1038  LocationSummary* locations =
1039      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
1040  HInstruction* cond = if_instr->InputAt(0);
1041  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1042    locations->SetInAt(0, Location::RequiresRegister());
1043  }
1044}
1045
1046void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1047  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1048  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1049  Label* always_true_target = true_target;
1050  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1051                                if_instr->IfTrueSuccessor())) {
1052    always_true_target = nullptr;
1053  }
1054  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1055                                if_instr->IfFalseSuccessor())) {
1056    false_target = nullptr;
1057  }
1058  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1059}
1060
1061void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1062  LocationSummary* locations = new (GetGraph()->GetArena())
1063      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1064  HInstruction* cond = deoptimize->InputAt(0);
1065  DCHECK(cond->IsCondition());
1066  if (cond->AsCondition()->NeedsMaterialization()) {
1067    locations->SetInAt(0, Location::RequiresRegister());
1068  }
1069}
1070
1071void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1072  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
1073      DeoptimizationSlowPathARM(deoptimize);
1074  codegen_->AddSlowPath(slow_path);
1075  Label* slow_path_entry = slow_path->GetEntryLabel();
1076  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1077}
1078
1079void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1080  LocationSummary* locations =
1081      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1082  locations->SetInAt(0, Location::RequiresRegister());
1083  locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1084  if (cond->NeedsMaterialization()) {
1085    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1086  }
1087}
1088
1089void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1090  if (!cond->NeedsMaterialization()) return;
1091  LocationSummary* locations = cond->GetLocations();
1092  Register left = locations->InAt(0).AsRegister<Register>();
1093
1094  if (locations->InAt(1).IsRegister()) {
1095    __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1096  } else {
1097    DCHECK(locations->InAt(1).IsConstant());
1098    int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
1099    ShifterOperand operand;
1100    if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1101      __ cmp(left, operand);
1102    } else {
1103      Register temp = IP;
1104      __ LoadImmediate(temp, value);
1105      __ cmp(left, ShifterOperand(temp));
1106    }
1107  }
1108  __ it(ARMCondition(cond->GetCondition()), kItElse);
1109  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1110         ARMCondition(cond->GetCondition()));
1111  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1112         ARMOppositeCondition(cond->GetCondition()));
1113}
1114
1115void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1116  VisitCondition(comp);
1117}
1118
1119void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1120  VisitCondition(comp);
1121}
1122
1123void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1124  VisitCondition(comp);
1125}
1126
1127void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1128  VisitCondition(comp);
1129}
1130
1131void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1132  VisitCondition(comp);
1133}
1134
1135void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1136  VisitCondition(comp);
1137}
1138
1139void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1140  VisitCondition(comp);
1141}
1142
1143void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1144  VisitCondition(comp);
1145}
1146
1147void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1148  VisitCondition(comp);
1149}
1150
1151void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1152  VisitCondition(comp);
1153}
1154
1155void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1156  VisitCondition(comp);
1157}
1158
1159void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1160  VisitCondition(comp);
1161}
1162
1163void LocationsBuilderARM::VisitLocal(HLocal* local) {
1164  local->SetLocations(nullptr);
1165}
1166
1167void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1168  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1169}
1170
1171void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1172  load->SetLocations(nullptr);
1173}
1174
1175void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1176  // Nothing to do, this is driven by the code generator.
1177  UNUSED(load);
1178}
1179
1180void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1181  LocationSummary* locations =
1182      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1183  switch (store->InputAt(1)->GetType()) {
1184    case Primitive::kPrimBoolean:
1185    case Primitive::kPrimByte:
1186    case Primitive::kPrimChar:
1187    case Primitive::kPrimShort:
1188    case Primitive::kPrimInt:
1189    case Primitive::kPrimNot:
1190    case Primitive::kPrimFloat:
1191      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1192      break;
1193
1194    case Primitive::kPrimLong:
1195    case Primitive::kPrimDouble:
1196      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1197      break;
1198
1199    default:
1200      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1201  }
1202}
1203
1204void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1205  UNUSED(store);
1206}
1207
1208void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1209  LocationSummary* locations =
1210      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1211  locations->SetOut(Location::ConstantLocation(constant));
1212}
1213
1214void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1215  // Will be generated at use site.
1216  UNUSED(constant);
1217}
1218
1219void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1220  LocationSummary* locations =
1221      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1222  locations->SetOut(Location::ConstantLocation(constant));
1223}
1224
1225void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1226  // Will be generated at use site.
1227  UNUSED(constant);
1228}
1229
1230void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1231  LocationSummary* locations =
1232      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1233  locations->SetOut(Location::ConstantLocation(constant));
1234}
1235
1236void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1237  // Will be generated at use site.
1238  UNUSED(constant);
1239}
1240
1241void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1242  LocationSummary* locations =
1243      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1244  locations->SetOut(Location::ConstantLocation(constant));
1245}
1246
1247void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1248  // Will be generated at use site.
1249  UNUSED(constant);
1250}
1251
1252void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1253  LocationSummary* locations =
1254      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1255  locations->SetOut(Location::ConstantLocation(constant));
1256}
1257
1258void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1259  // Will be generated at use site.
1260  UNUSED(constant);
1261}
1262
1263void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1264  memory_barrier->SetLocations(nullptr);
1265}
1266
1267void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1268  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1269}
1270
1271void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1272  ret->SetLocations(nullptr);
1273}
1274
1275void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1276  UNUSED(ret);
1277  codegen_->GenerateFrameExit();
1278}
1279
1280void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1281  LocationSummary* locations =
1282      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1283  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1284}
1285
1286void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1287  UNUSED(ret);
1288  codegen_->GenerateFrameExit();
1289}
1290
1291void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1292  // When we do not run baseline, explicit clinit checks triggered by static
1293  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1294  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1295
1296  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1297                                         codegen_->GetInstructionSetFeatures());
1298  if (intrinsic.TryDispatch(invoke)) {
1299    return;
1300  }
1301
1302  HandleInvoke(invoke);
1303}
1304
1305static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1306  if (invoke->GetLocations()->Intrinsified()) {
1307    IntrinsicCodeGeneratorARM intrinsic(codegen);
1308    intrinsic.Dispatch(invoke);
1309    return true;
1310  }
1311  return false;
1312}
1313
1314void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1315  // When we do not run baseline, explicit clinit checks triggered by static
1316  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1317  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1318
1319  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1320    return;
1321  }
1322
1323  LocationSummary* locations = invoke->GetLocations();
1324  codegen_->GenerateStaticOrDirectCall(
1325      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
1326  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1327}
1328
1329void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1330  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1331  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
1332}
1333
1334void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1335  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1336                                         codegen_->GetInstructionSetFeatures());
1337  if (intrinsic.TryDispatch(invoke)) {
1338    return;
1339  }
1340
1341  HandleInvoke(invoke);
1342}
1343
1344void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1345  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1346    return;
1347  }
1348
1349  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1350  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
1351      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
1352  LocationSummary* locations = invoke->GetLocations();
1353  Location receiver = locations->InAt(0);
1354  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1355  // temp = object->GetClass();
1356  DCHECK(receiver.IsRegister());
1357  __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1358  codegen_->MaybeRecordImplicitNullCheck(invoke);
1359  // temp = temp->GetMethodAt(method_offset);
1360  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1361      kArmWordSize).Int32Value();
1362  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1363  // LR = temp->GetEntryPoint();
1364  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1365  // LR();
1366  __ blx(LR);
1367  DCHECK(!codegen_->IsLeafMethod());
1368  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1369}
1370
1371void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1372  HandleInvoke(invoke);
1373  // Add the hidden argument.
1374  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1375}
1376
1377void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1378  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1379  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1380  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1381      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1382  LocationSummary* locations = invoke->GetLocations();
1383  Location receiver = locations->InAt(0);
1384  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1385
1386  // Set the hidden argument.
1387  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1388                   invoke->GetDexMethodIndex());
1389
1390  // temp = object->GetClass();
1391  if (receiver.IsStackSlot()) {
1392    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1393    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1394  } else {
1395    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1396  }
1397  codegen_->MaybeRecordImplicitNullCheck(invoke);
1398  // temp = temp->GetImtEntryAt(method_offset);
1399  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1400      kArmWordSize).Int32Value();
1401  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1402  // LR = temp->GetEntryPoint();
1403  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1404  // LR();
1405  __ blx(LR);
1406  DCHECK(!codegen_->IsLeafMethod());
1407  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1408}
1409
1410void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1411  LocationSummary* locations =
1412      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1413  switch (neg->GetResultType()) {
1414    case Primitive::kPrimInt: {
1415      locations->SetInAt(0, Location::RequiresRegister());
1416      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1417      break;
1418    }
1419    case Primitive::kPrimLong: {
1420      locations->SetInAt(0, Location::RequiresRegister());
1421      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1422      break;
1423    }
1424
1425    case Primitive::kPrimFloat:
1426    case Primitive::kPrimDouble:
1427      locations->SetInAt(0, Location::RequiresFpuRegister());
1428      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1429      break;
1430
1431    default:
1432      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1433  }
1434}
1435
1436void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1437  LocationSummary* locations = neg->GetLocations();
1438  Location out = locations->Out();
1439  Location in = locations->InAt(0);
1440  switch (neg->GetResultType()) {
1441    case Primitive::kPrimInt:
1442      DCHECK(in.IsRegister());
1443      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1444      break;
1445
1446    case Primitive::kPrimLong:
1447      DCHECK(in.IsRegisterPair());
1448      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1449      __ rsbs(out.AsRegisterPairLow<Register>(),
1450              in.AsRegisterPairLow<Register>(),
1451              ShifterOperand(0));
1452      // We cannot emit an RSC (Reverse Subtract with Carry)
1453      // instruction here, as it does not exist in the Thumb-2
1454      // instruction set.  We use the following approach
1455      // using SBC and SUB instead.
1456      //
1457      // out.hi = -C
1458      __ sbc(out.AsRegisterPairHigh<Register>(),
1459             out.AsRegisterPairHigh<Register>(),
1460             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1461      // out.hi = out.hi - in.hi
1462      __ sub(out.AsRegisterPairHigh<Register>(),
1463             out.AsRegisterPairHigh<Register>(),
1464             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1465      break;
1466
1467    case Primitive::kPrimFloat:
1468      DCHECK(in.IsFpuRegister());
1469      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1470      break;
1471
1472    case Primitive::kPrimDouble:
1473      DCHECK(in.IsFpuRegisterPair());
1474      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1475               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1476      break;
1477
1478    default:
1479      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1480  }
1481}
1482
1483void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1484  Primitive::Type result_type = conversion->GetResultType();
1485  Primitive::Type input_type = conversion->GetInputType();
1486  DCHECK_NE(result_type, input_type);
1487
1488  // The float-to-long, double-to-long and long-to-float type conversions
1489  // rely on a call to the runtime.
1490  LocationSummary::CallKind call_kind =
1491      (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1492        && result_type == Primitive::kPrimLong)
1493       || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
1494      ? LocationSummary::kCall
1495      : LocationSummary::kNoCall;
1496  LocationSummary* locations =
1497      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1498
1499  // The Java language does not allow treating boolean as an integral type but
1500  // our bit representation makes it safe.
1501
1502  switch (result_type) {
1503    case Primitive::kPrimByte:
1504      switch (input_type) {
1505        case Primitive::kPrimBoolean:
1506          // Boolean input is a result of code transformations.
1507        case Primitive::kPrimShort:
1508        case Primitive::kPrimInt:
1509        case Primitive::kPrimChar:
1510          // Processing a Dex `int-to-byte' instruction.
1511          locations->SetInAt(0, Location::RequiresRegister());
1512          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1513          break;
1514
1515        default:
1516          LOG(FATAL) << "Unexpected type conversion from " << input_type
1517                     << " to " << result_type;
1518      }
1519      break;
1520
1521    case Primitive::kPrimShort:
1522      switch (input_type) {
1523        case Primitive::kPrimBoolean:
1524          // Boolean input is a result of code transformations.
1525        case Primitive::kPrimByte:
1526        case Primitive::kPrimInt:
1527        case Primitive::kPrimChar:
1528          // Processing a Dex `int-to-short' instruction.
1529          locations->SetInAt(0, Location::RequiresRegister());
1530          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1531          break;
1532
1533        default:
1534          LOG(FATAL) << "Unexpected type conversion from " << input_type
1535                     << " to " << result_type;
1536      }
1537      break;
1538
1539    case Primitive::kPrimInt:
1540      switch (input_type) {
1541        case Primitive::kPrimLong:
1542          // Processing a Dex `long-to-int' instruction.
1543          locations->SetInAt(0, Location::Any());
1544          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1545          break;
1546
1547        case Primitive::kPrimFloat:
1548          // Processing a Dex `float-to-int' instruction.
1549          locations->SetInAt(0, Location::RequiresFpuRegister());
1550          locations->SetOut(Location::RequiresRegister());
1551          locations->AddTemp(Location::RequiresFpuRegister());
1552          break;
1553
1554        case Primitive::kPrimDouble:
1555          // Processing a Dex `double-to-int' instruction.
1556          locations->SetInAt(0, Location::RequiresFpuRegister());
1557          locations->SetOut(Location::RequiresRegister());
1558          locations->AddTemp(Location::RequiresFpuRegister());
1559          break;
1560
1561        default:
1562          LOG(FATAL) << "Unexpected type conversion from " << input_type
1563                     << " to " << result_type;
1564      }
1565      break;
1566
1567    case Primitive::kPrimLong:
1568      switch (input_type) {
1569        case Primitive::kPrimBoolean:
1570          // Boolean input is a result of code transformations.
1571        case Primitive::kPrimByte:
1572        case Primitive::kPrimShort:
1573        case Primitive::kPrimInt:
1574        case Primitive::kPrimChar:
1575          // Processing a Dex `int-to-long' instruction.
1576          locations->SetInAt(0, Location::RequiresRegister());
1577          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1578          break;
1579
1580        case Primitive::kPrimFloat: {
1581          // Processing a Dex `float-to-long' instruction.
1582          InvokeRuntimeCallingConvention calling_convention;
1583          locations->SetInAt(0, Location::FpuRegisterLocation(
1584              calling_convention.GetFpuRegisterAt(0)));
1585          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1586          break;
1587        }
1588
1589        case Primitive::kPrimDouble: {
1590          // Processing a Dex `double-to-long' instruction.
1591          InvokeRuntimeCallingConvention calling_convention;
1592          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1593              calling_convention.GetFpuRegisterAt(0),
1594              calling_convention.GetFpuRegisterAt(1)));
1595          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1596          break;
1597        }
1598
1599        default:
1600          LOG(FATAL) << "Unexpected type conversion from " << input_type
1601                     << " to " << result_type;
1602      }
1603      break;
1604
1605    case Primitive::kPrimChar:
1606      switch (input_type) {
1607        case Primitive::kPrimBoolean:
1608          // Boolean input is a result of code transformations.
1609        case Primitive::kPrimByte:
1610        case Primitive::kPrimShort:
1611        case Primitive::kPrimInt:
1612          // Processing a Dex `int-to-char' instruction.
1613          locations->SetInAt(0, Location::RequiresRegister());
1614          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1615          break;
1616
1617        default:
1618          LOG(FATAL) << "Unexpected type conversion from " << input_type
1619                     << " to " << result_type;
1620      }
1621      break;
1622
1623    case Primitive::kPrimFloat:
1624      switch (input_type) {
1625        case Primitive::kPrimBoolean:
1626          // Boolean input is a result of code transformations.
1627        case Primitive::kPrimByte:
1628        case Primitive::kPrimShort:
1629        case Primitive::kPrimInt:
1630        case Primitive::kPrimChar:
1631          // Processing a Dex `int-to-float' instruction.
1632          locations->SetInAt(0, Location::RequiresRegister());
1633          locations->SetOut(Location::RequiresFpuRegister());
1634          break;
1635
1636        case Primitive::kPrimLong: {
1637          // Processing a Dex `long-to-float' instruction.
1638          InvokeRuntimeCallingConvention calling_convention;
1639          locations->SetInAt(0, Location::RegisterPairLocation(
1640              calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
1641          locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1642          break;
1643        }
1644
1645        case Primitive::kPrimDouble:
1646          // Processing a Dex `double-to-float' instruction.
1647          locations->SetInAt(0, Location::RequiresFpuRegister());
1648          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1649          break;
1650
1651        default:
1652          LOG(FATAL) << "Unexpected type conversion from " << input_type
1653                     << " to " << result_type;
1654      };
1655      break;
1656
1657    case Primitive::kPrimDouble:
1658      switch (input_type) {
1659        case Primitive::kPrimBoolean:
1660          // Boolean input is a result of code transformations.
1661        case Primitive::kPrimByte:
1662        case Primitive::kPrimShort:
1663        case Primitive::kPrimInt:
1664        case Primitive::kPrimChar:
1665          // Processing a Dex `int-to-double' instruction.
1666          locations->SetInAt(0, Location::RequiresRegister());
1667          locations->SetOut(Location::RequiresFpuRegister());
1668          break;
1669
1670        case Primitive::kPrimLong:
1671          // Processing a Dex `long-to-double' instruction.
1672          locations->SetInAt(0, Location::RequiresRegister());
1673          locations->SetOut(Location::RequiresFpuRegister());
1674          locations->AddTemp(Location::RequiresFpuRegister());
1675          locations->AddTemp(Location::RequiresFpuRegister());
1676          break;
1677
1678        case Primitive::kPrimFloat:
1679          // Processing a Dex `float-to-double' instruction.
1680          locations->SetInAt(0, Location::RequiresFpuRegister());
1681          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1682          break;
1683
1684        default:
1685          LOG(FATAL) << "Unexpected type conversion from " << input_type
1686                     << " to " << result_type;
1687      };
1688      break;
1689
1690    default:
1691      LOG(FATAL) << "Unexpected type conversion from " << input_type
1692                 << " to " << result_type;
1693  }
1694}
1695
1696void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1697  LocationSummary* locations = conversion->GetLocations();
1698  Location out = locations->Out();
1699  Location in = locations->InAt(0);
1700  Primitive::Type result_type = conversion->GetResultType();
1701  Primitive::Type input_type = conversion->GetInputType();
1702  DCHECK_NE(result_type, input_type);
1703  switch (result_type) {
1704    case Primitive::kPrimByte:
1705      switch (input_type) {
1706        case Primitive::kPrimBoolean:
1707          // Boolean input is a result of code transformations.
1708        case Primitive::kPrimShort:
1709        case Primitive::kPrimInt:
1710        case Primitive::kPrimChar:
1711          // Processing a Dex `int-to-byte' instruction.
1712          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
1713          break;
1714
1715        default:
1716          LOG(FATAL) << "Unexpected type conversion from " << input_type
1717                     << " to " << result_type;
1718      }
1719      break;
1720
1721    case Primitive::kPrimShort:
1722      switch (input_type) {
1723        case Primitive::kPrimBoolean:
1724          // Boolean input is a result of code transformations.
1725        case Primitive::kPrimByte:
1726        case Primitive::kPrimInt:
1727        case Primitive::kPrimChar:
1728          // Processing a Dex `int-to-short' instruction.
1729          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1730          break;
1731
1732        default:
1733          LOG(FATAL) << "Unexpected type conversion from " << input_type
1734                     << " to " << result_type;
1735      }
1736      break;
1737
1738    case Primitive::kPrimInt:
1739      switch (input_type) {
1740        case Primitive::kPrimLong:
1741          // Processing a Dex `long-to-int' instruction.
1742          DCHECK(out.IsRegister());
1743          if (in.IsRegisterPair()) {
1744            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
1745          } else if (in.IsDoubleStackSlot()) {
1746            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
1747          } else {
1748            DCHECK(in.IsConstant());
1749            DCHECK(in.GetConstant()->IsLongConstant());
1750            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
1751            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
1752          }
1753          break;
1754
1755        case Primitive::kPrimFloat: {
1756          // Processing a Dex `float-to-int' instruction.
1757          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1758          __ vmovs(temp, in.AsFpuRegister<SRegister>());
1759          __ vcvtis(temp, temp);
1760          __ vmovrs(out.AsRegister<Register>(), temp);
1761          break;
1762        }
1763
1764        case Primitive::kPrimDouble: {
1765          // Processing a Dex `double-to-int' instruction.
1766          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1767          DRegister temp_d = FromLowSToD(temp_s);
1768          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1769          __ vcvtid(temp_s, temp_d);
1770          __ vmovrs(out.AsRegister<Register>(), temp_s);
1771          break;
1772        }
1773
1774        default:
1775          LOG(FATAL) << "Unexpected type conversion from " << input_type
1776                     << " to " << result_type;
1777      }
1778      break;
1779
1780    case Primitive::kPrimLong:
1781      switch (input_type) {
1782        case Primitive::kPrimBoolean:
1783          // Boolean input is a result of code transformations.
1784        case Primitive::kPrimByte:
1785        case Primitive::kPrimShort:
1786        case Primitive::kPrimInt:
1787        case Primitive::kPrimChar:
1788          // Processing a Dex `int-to-long' instruction.
1789          DCHECK(out.IsRegisterPair());
1790          DCHECK(in.IsRegister());
1791          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
1792          // Sign extension.
1793          __ Asr(out.AsRegisterPairHigh<Register>(),
1794                 out.AsRegisterPairLow<Register>(),
1795                 31);
1796          break;
1797
1798        case Primitive::kPrimFloat:
1799          // Processing a Dex `float-to-long' instruction.
1800          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
1801                                  conversion,
1802                                  conversion->GetDexPc(),
1803                                  nullptr);
1804          break;
1805
1806        case Primitive::kPrimDouble:
1807          // Processing a Dex `double-to-long' instruction.
1808          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
1809                                  conversion,
1810                                  conversion->GetDexPc(),
1811                                  nullptr);
1812          break;
1813
1814        default:
1815          LOG(FATAL) << "Unexpected type conversion from " << input_type
1816                     << " to " << result_type;
1817      }
1818      break;
1819
1820    case Primitive::kPrimChar:
1821      switch (input_type) {
1822        case Primitive::kPrimBoolean:
1823          // Boolean input is a result of code transformations.
1824        case Primitive::kPrimByte:
1825        case Primitive::kPrimShort:
1826        case Primitive::kPrimInt:
1827          // Processing a Dex `int-to-char' instruction.
1828          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1829          break;
1830
1831        default:
1832          LOG(FATAL) << "Unexpected type conversion from " << input_type
1833                     << " to " << result_type;
1834      }
1835      break;
1836
1837    case Primitive::kPrimFloat:
1838      switch (input_type) {
1839        case Primitive::kPrimBoolean:
1840          // Boolean input is a result of code transformations.
1841        case Primitive::kPrimByte:
1842        case Primitive::kPrimShort:
1843        case Primitive::kPrimInt:
1844        case Primitive::kPrimChar: {
1845          // Processing a Dex `int-to-float' instruction.
1846          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
1847          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
1848          break;
1849        }
1850
1851        case Primitive::kPrimLong:
1852          // Processing a Dex `long-to-float' instruction.
1853          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
1854                                  conversion,
1855                                  conversion->GetDexPc(),
1856                                  nullptr);
1857          break;
1858
1859        case Primitive::kPrimDouble:
1860          // Processing a Dex `double-to-float' instruction.
1861          __ vcvtsd(out.AsFpuRegister<SRegister>(),
1862                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1863          break;
1864
1865        default:
1866          LOG(FATAL) << "Unexpected type conversion from " << input_type
1867                     << " to " << result_type;
1868      };
1869      break;
1870
1871    case Primitive::kPrimDouble:
1872      switch (input_type) {
1873        case Primitive::kPrimBoolean:
1874          // Boolean input is a result of code transformations.
1875        case Primitive::kPrimByte:
1876        case Primitive::kPrimShort:
1877        case Primitive::kPrimInt:
1878        case Primitive::kPrimChar: {
1879          // Processing a Dex `int-to-double' instruction.
1880          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
1881          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1882                    out.AsFpuRegisterPairLow<SRegister>());
1883          break;
1884        }
1885
1886        case Primitive::kPrimLong: {
1887          // Processing a Dex `long-to-double' instruction.
1888          Register low = in.AsRegisterPairLow<Register>();
1889          Register high = in.AsRegisterPairHigh<Register>();
1890          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
1891          DRegister out_d = FromLowSToD(out_s);
1892          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1893          DRegister temp_d = FromLowSToD(temp_s);
1894          SRegister constant_s = locations->GetTemp(1).AsFpuRegisterPairLow<SRegister>();
1895          DRegister constant_d = FromLowSToD(constant_s);
1896
1897          // temp_d = int-to-double(high)
1898          __ vmovsr(temp_s, high);
1899          __ vcvtdi(temp_d, temp_s);
1900          // constant_d = k2Pow32EncodingForDouble
1901          __ LoadDImmediate(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
1902          // out_d = unsigned-to-double(low)
1903          __ vmovsr(out_s, low);
1904          __ vcvtdu(out_d, out_s);
1905          // out_d += temp_d * constant_d
1906          __ vmlad(out_d, temp_d, constant_d);
1907          break;
1908        }
1909
1910        case Primitive::kPrimFloat:
1911          // Processing a Dex `float-to-double' instruction.
1912          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1913                    in.AsFpuRegister<SRegister>());
1914          break;
1915
1916        default:
1917          LOG(FATAL) << "Unexpected type conversion from " << input_type
1918                     << " to " << result_type;
1919      };
1920      break;
1921
1922    default:
1923      LOG(FATAL) << "Unexpected type conversion from " << input_type
1924                 << " to " << result_type;
1925  }
1926}
1927
1928void LocationsBuilderARM::VisitAdd(HAdd* add) {
1929  LocationSummary* locations =
1930      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
1931  switch (add->GetResultType()) {
1932    case Primitive::kPrimInt: {
1933      locations->SetInAt(0, Location::RequiresRegister());
1934      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
1935      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1936      break;
1937    }
1938
1939    case Primitive::kPrimLong: {
1940      locations->SetInAt(0, Location::RequiresRegister());
1941      locations->SetInAt(1, Location::RequiresRegister());
1942      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1943      break;
1944    }
1945
1946    case Primitive::kPrimFloat:
1947    case Primitive::kPrimDouble: {
1948      locations->SetInAt(0, Location::RequiresFpuRegister());
1949      locations->SetInAt(1, Location::RequiresFpuRegister());
1950      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1951      break;
1952    }
1953
1954    default:
1955      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
1956  }
1957}
1958
1959void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
1960  LocationSummary* locations = add->GetLocations();
1961  Location out = locations->Out();
1962  Location first = locations->InAt(0);
1963  Location second = locations->InAt(1);
1964  switch (add->GetResultType()) {
1965    case Primitive::kPrimInt:
1966      if (second.IsRegister()) {
1967        __ add(out.AsRegister<Register>(),
1968               first.AsRegister<Register>(),
1969               ShifterOperand(second.AsRegister<Register>()));
1970      } else {
1971        __ AddConstant(out.AsRegister<Register>(),
1972                       first.AsRegister<Register>(),
1973                       second.GetConstant()->AsIntConstant()->GetValue());
1974      }
1975      break;
1976
1977    case Primitive::kPrimLong: {
1978      DCHECK(second.IsRegisterPair());
1979      __ adds(out.AsRegisterPairLow<Register>(),
1980              first.AsRegisterPairLow<Register>(),
1981              ShifterOperand(second.AsRegisterPairLow<Register>()));
1982      __ adc(out.AsRegisterPairHigh<Register>(),
1983             first.AsRegisterPairHigh<Register>(),
1984             ShifterOperand(second.AsRegisterPairHigh<Register>()));
1985      break;
1986    }
1987
1988    case Primitive::kPrimFloat:
1989      __ vadds(out.AsFpuRegister<SRegister>(),
1990               first.AsFpuRegister<SRegister>(),
1991               second.AsFpuRegister<SRegister>());
1992      break;
1993
1994    case Primitive::kPrimDouble:
1995      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1996               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
1997               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
1998      break;
1999
2000    default:
2001      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2002  }
2003}
2004
2005void LocationsBuilderARM::VisitSub(HSub* sub) {
2006  LocationSummary* locations =
2007      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2008  switch (sub->GetResultType()) {
2009    case Primitive::kPrimInt: {
2010      locations->SetInAt(0, Location::RequiresRegister());
2011      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2012      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2013      break;
2014    }
2015
2016    case Primitive::kPrimLong: {
2017      locations->SetInAt(0, Location::RequiresRegister());
2018      locations->SetInAt(1, Location::RequiresRegister());
2019      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2020      break;
2021    }
2022    case Primitive::kPrimFloat:
2023    case Primitive::kPrimDouble: {
2024      locations->SetInAt(0, Location::RequiresFpuRegister());
2025      locations->SetInAt(1, Location::RequiresFpuRegister());
2026      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2027      break;
2028    }
2029    default:
2030      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2031  }
2032}
2033
2034void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2035  LocationSummary* locations = sub->GetLocations();
2036  Location out = locations->Out();
2037  Location first = locations->InAt(0);
2038  Location second = locations->InAt(1);
2039  switch (sub->GetResultType()) {
2040    case Primitive::kPrimInt: {
2041      if (second.IsRegister()) {
2042        __ sub(out.AsRegister<Register>(),
2043               first.AsRegister<Register>(),
2044               ShifterOperand(second.AsRegister<Register>()));
2045      } else {
2046        __ AddConstant(out.AsRegister<Register>(),
2047                       first.AsRegister<Register>(),
2048                       -second.GetConstant()->AsIntConstant()->GetValue());
2049      }
2050      break;
2051    }
2052
2053    case Primitive::kPrimLong: {
2054      DCHECK(second.IsRegisterPair());
2055      __ subs(out.AsRegisterPairLow<Register>(),
2056              first.AsRegisterPairLow<Register>(),
2057              ShifterOperand(second.AsRegisterPairLow<Register>()));
2058      __ sbc(out.AsRegisterPairHigh<Register>(),
2059             first.AsRegisterPairHigh<Register>(),
2060             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2061      break;
2062    }
2063
2064    case Primitive::kPrimFloat: {
2065      __ vsubs(out.AsFpuRegister<SRegister>(),
2066               first.AsFpuRegister<SRegister>(),
2067               second.AsFpuRegister<SRegister>());
2068      break;
2069    }
2070
2071    case Primitive::kPrimDouble: {
2072      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2073               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2074               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2075      break;
2076    }
2077
2078
2079    default:
2080      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2081  }
2082}
2083
2084void LocationsBuilderARM::VisitMul(HMul* mul) {
2085  LocationSummary* locations =
2086      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2087  switch (mul->GetResultType()) {
2088    case Primitive::kPrimInt:
2089    case Primitive::kPrimLong:  {
2090      locations->SetInAt(0, Location::RequiresRegister());
2091      locations->SetInAt(1, Location::RequiresRegister());
2092      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2093      break;
2094    }
2095
2096    case Primitive::kPrimFloat:
2097    case Primitive::kPrimDouble: {
2098      locations->SetInAt(0, Location::RequiresFpuRegister());
2099      locations->SetInAt(1, Location::RequiresFpuRegister());
2100      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2101      break;
2102    }
2103
2104    default:
2105      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2106  }
2107}
2108
2109void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2110  LocationSummary* locations = mul->GetLocations();
2111  Location out = locations->Out();
2112  Location first = locations->InAt(0);
2113  Location second = locations->InAt(1);
2114  switch (mul->GetResultType()) {
2115    case Primitive::kPrimInt: {
2116      __ mul(out.AsRegister<Register>(),
2117             first.AsRegister<Register>(),
2118             second.AsRegister<Register>());
2119      break;
2120    }
2121    case Primitive::kPrimLong: {
2122      Register out_hi = out.AsRegisterPairHigh<Register>();
2123      Register out_lo = out.AsRegisterPairLow<Register>();
2124      Register in1_hi = first.AsRegisterPairHigh<Register>();
2125      Register in1_lo = first.AsRegisterPairLow<Register>();
2126      Register in2_hi = second.AsRegisterPairHigh<Register>();
2127      Register in2_lo = second.AsRegisterPairLow<Register>();
2128
2129      // Extra checks to protect caused by the existence of R1_R2.
2130      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2131      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2132      DCHECK_NE(out_hi, in1_lo);
2133      DCHECK_NE(out_hi, in2_lo);
2134
2135      // input: in1 - 64 bits, in2 - 64 bits
2136      // output: out
2137      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2138      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2139      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2140
2141      // IP <- in1.lo * in2.hi
2142      __ mul(IP, in1_lo, in2_hi);
2143      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2144      __ mla(out_hi, in1_hi, in2_lo, IP);
2145      // out.lo <- (in1.lo * in2.lo)[31:0];
2146      __ umull(out_lo, IP, in1_lo, in2_lo);
2147      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2148      __ add(out_hi, out_hi, ShifterOperand(IP));
2149      break;
2150    }
2151
2152    case Primitive::kPrimFloat: {
2153      __ vmuls(out.AsFpuRegister<SRegister>(),
2154               first.AsFpuRegister<SRegister>(),
2155               second.AsFpuRegister<SRegister>());
2156      break;
2157    }
2158
2159    case Primitive::kPrimDouble: {
2160      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2161               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2162               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2163      break;
2164    }
2165
2166    default:
2167      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2168  }
2169}
2170
2171void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2172  DCHECK(instruction->IsDiv() || instruction->IsRem());
2173  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2174
2175  LocationSummary* locations = instruction->GetLocations();
2176  Location second = locations->InAt(1);
2177  DCHECK(second.IsConstant());
2178
2179  Register out = locations->Out().AsRegister<Register>();
2180  Register dividend = locations->InAt(0).AsRegister<Register>();
2181  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2182  DCHECK(imm == 1 || imm == -1);
2183
2184  if (instruction->IsRem()) {
2185    __ LoadImmediate(out, 0);
2186  } else {
2187    if (imm == 1) {
2188      __ Mov(out, dividend);
2189    } else {
2190      __ rsb(out, dividend, ShifterOperand(0));
2191    }
2192  }
2193}
2194
2195void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2196  DCHECK(instruction->IsDiv() || instruction->IsRem());
2197  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2198
2199  LocationSummary* locations = instruction->GetLocations();
2200  Location second = locations->InAt(1);
2201  DCHECK(second.IsConstant());
2202
2203  Register out = locations->Out().AsRegister<Register>();
2204  Register dividend = locations->InAt(0).AsRegister<Register>();
2205  Register temp = locations->GetTemp(0).AsRegister<Register>();
2206  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2207  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2208  DCHECK(IsPowerOfTwo(abs_imm));
2209  int ctz_imm = CTZ(abs_imm);
2210
2211  if (ctz_imm == 1) {
2212    __ Lsr(temp, dividend, 32 - ctz_imm);
2213  } else {
2214    __ Asr(temp, dividend, 31);
2215    __ Lsr(temp, temp, 32 - ctz_imm);
2216  }
2217  __ add(out, temp, ShifterOperand(dividend));
2218
2219  if (instruction->IsDiv()) {
2220    __ Asr(out, out, ctz_imm);
2221    if (imm < 0) {
2222      __ rsb(out, out, ShifterOperand(0));
2223    }
2224  } else {
2225    __ ubfx(out, out, 0, ctz_imm);
2226    __ sub(out, out, ShifterOperand(temp));
2227  }
2228}
2229
2230void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2231  DCHECK(instruction->IsDiv() || instruction->IsRem());
2232  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2233
2234  LocationSummary* locations = instruction->GetLocations();
2235  Location second = locations->InAt(1);
2236  DCHECK(second.IsConstant());
2237
2238  Register out = locations->Out().AsRegister<Register>();
2239  Register dividend = locations->InAt(0).AsRegister<Register>();
2240  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2241  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2242  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2243
2244  int64_t magic;
2245  int shift;
2246  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2247
2248  __ LoadImmediate(temp1, magic);
2249  __ smull(temp2, temp1, dividend, temp1);
2250
2251  if (imm > 0 && magic < 0) {
2252    __ add(temp1, temp1, ShifterOperand(dividend));
2253  } else if (imm < 0 && magic > 0) {
2254    __ sub(temp1, temp1, ShifterOperand(dividend));
2255  }
2256
2257  if (shift != 0) {
2258    __ Asr(temp1, temp1, shift);
2259  }
2260
2261  if (instruction->IsDiv()) {
2262    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2263  } else {
2264    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2265    // TODO: Strength reduction for mls.
2266    __ LoadImmediate(temp2, imm);
2267    __ mls(out, temp1, temp2, dividend);
2268  }
2269}
2270
2271void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2272  DCHECK(instruction->IsDiv() || instruction->IsRem());
2273  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2274
2275  LocationSummary* locations = instruction->GetLocations();
2276  Location second = locations->InAt(1);
2277  DCHECK(second.IsConstant());
2278
2279  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2280  if (imm == 0) {
2281    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2282  } else if (imm == 1 || imm == -1) {
2283    DivRemOneOrMinusOne(instruction);
2284  } else if (IsPowerOfTwo(std::abs(imm))) {
2285    DivRemByPowerOfTwo(instruction);
2286  } else {
2287    DCHECK(imm <= -2 || imm >= 2);
2288    GenerateDivRemWithAnyConstant(instruction);
2289  }
2290}
2291
2292void LocationsBuilderARM::VisitDiv(HDiv* div) {
2293  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2294  if (div->GetResultType() == Primitive::kPrimLong) {
2295    // pLdiv runtime call.
2296    call_kind = LocationSummary::kCall;
2297  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2298    // sdiv will be replaced by other instruction sequence.
2299  } else if (div->GetResultType() == Primitive::kPrimInt &&
2300             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2301    // pIdivmod runtime call.
2302    call_kind = LocationSummary::kCall;
2303  }
2304
2305  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2306
2307  switch (div->GetResultType()) {
2308    case Primitive::kPrimInt: {
2309      if (div->InputAt(1)->IsConstant()) {
2310        locations->SetInAt(0, Location::RequiresRegister());
2311        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2312        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2313        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2314        if (abs_imm <= 1) {
2315          // No temp register required.
2316        } else {
2317          locations->AddTemp(Location::RequiresRegister());
2318          if (!IsPowerOfTwo(abs_imm)) {
2319            locations->AddTemp(Location::RequiresRegister());
2320          }
2321        }
2322      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2323        locations->SetInAt(0, Location::RequiresRegister());
2324        locations->SetInAt(1, Location::RequiresRegister());
2325        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2326      } else {
2327        InvokeRuntimeCallingConvention calling_convention;
2328        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2329        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2330        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2331        //       we only need the former.
2332        locations->SetOut(Location::RegisterLocation(R0));
2333      }
2334      break;
2335    }
2336    case Primitive::kPrimLong: {
2337      InvokeRuntimeCallingConvention calling_convention;
2338      locations->SetInAt(0, Location::RegisterPairLocation(
2339          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2340      locations->SetInAt(1, Location::RegisterPairLocation(
2341          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2342      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2343      break;
2344    }
2345    case Primitive::kPrimFloat:
2346    case Primitive::kPrimDouble: {
2347      locations->SetInAt(0, Location::RequiresFpuRegister());
2348      locations->SetInAt(1, Location::RequiresFpuRegister());
2349      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2350      break;
2351    }
2352
2353    default:
2354      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2355  }
2356}
2357
2358void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2359  LocationSummary* locations = div->GetLocations();
2360  Location out = locations->Out();
2361  Location first = locations->InAt(0);
2362  Location second = locations->InAt(1);
2363
2364  switch (div->GetResultType()) {
2365    case Primitive::kPrimInt: {
2366      if (second.IsConstant()) {
2367        GenerateDivRemConstantIntegral(div);
2368      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2369        __ sdiv(out.AsRegister<Register>(),
2370                first.AsRegister<Register>(),
2371                second.AsRegister<Register>());
2372      } else {
2373        InvokeRuntimeCallingConvention calling_convention;
2374        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2375        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2376        DCHECK_EQ(R0, out.AsRegister<Register>());
2377
2378        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2379      }
2380      break;
2381    }
2382
2383    case Primitive::kPrimLong: {
2384      InvokeRuntimeCallingConvention calling_convention;
2385      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2386      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2387      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2388      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2389      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2390      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2391
2392      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2393      break;
2394    }
2395
2396    case Primitive::kPrimFloat: {
2397      __ vdivs(out.AsFpuRegister<SRegister>(),
2398               first.AsFpuRegister<SRegister>(),
2399               second.AsFpuRegister<SRegister>());
2400      break;
2401    }
2402
2403    case Primitive::kPrimDouble: {
2404      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2405               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2406               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2407      break;
2408    }
2409
2410    default:
2411      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2412  }
2413}
2414
2415void LocationsBuilderARM::VisitRem(HRem* rem) {
2416  Primitive::Type type = rem->GetResultType();
2417
2418  // Most remainders are implemented in the runtime.
2419  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2420  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2421    // sdiv will be replaced by other instruction sequence.
2422    call_kind = LocationSummary::kNoCall;
2423  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2424             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2425    // Have hardware divide instruction for int, do it with three instructions.
2426    call_kind = LocationSummary::kNoCall;
2427  }
2428
2429  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2430
2431  switch (type) {
2432    case Primitive::kPrimInt: {
2433      if (rem->InputAt(1)->IsConstant()) {
2434        locations->SetInAt(0, Location::RequiresRegister());
2435        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2436        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2437        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2438        if (abs_imm <= 1) {
2439          // No temp register required.
2440        } else {
2441          locations->AddTemp(Location::RequiresRegister());
2442          if (!IsPowerOfTwo(abs_imm)) {
2443            locations->AddTemp(Location::RequiresRegister());
2444          }
2445        }
2446      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2447        locations->SetInAt(0, Location::RequiresRegister());
2448        locations->SetInAt(1, Location::RequiresRegister());
2449        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2450        locations->AddTemp(Location::RequiresRegister());
2451      } else {
2452        InvokeRuntimeCallingConvention calling_convention;
2453        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2454        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2455        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2456        //       we only need the latter.
2457        locations->SetOut(Location::RegisterLocation(R1));
2458      }
2459      break;
2460    }
2461    case Primitive::kPrimLong: {
2462      InvokeRuntimeCallingConvention calling_convention;
2463      locations->SetInAt(0, Location::RegisterPairLocation(
2464          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2465      locations->SetInAt(1, Location::RegisterPairLocation(
2466          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2467      // The runtime helper puts the output in R2,R3.
2468      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2469      break;
2470    }
2471    case Primitive::kPrimFloat: {
2472      InvokeRuntimeCallingConvention calling_convention;
2473      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2474      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2475      locations->SetOut(Location::FpuRegisterLocation(S0));
2476      break;
2477    }
2478
2479    case Primitive::kPrimDouble: {
2480      InvokeRuntimeCallingConvention calling_convention;
2481      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2482          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2483      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2484          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2485      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2486      break;
2487    }
2488
2489    default:
2490      LOG(FATAL) << "Unexpected rem type " << type;
2491  }
2492}
2493
2494void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2495  LocationSummary* locations = rem->GetLocations();
2496  Location out = locations->Out();
2497  Location first = locations->InAt(0);
2498  Location second = locations->InAt(1);
2499
2500  Primitive::Type type = rem->GetResultType();
2501  switch (type) {
2502    case Primitive::kPrimInt: {
2503        if (second.IsConstant()) {
2504          GenerateDivRemConstantIntegral(rem);
2505        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2506        Register reg1 = first.AsRegister<Register>();
2507        Register reg2 = second.AsRegister<Register>();
2508        Register temp = locations->GetTemp(0).AsRegister<Register>();
2509
2510        // temp = reg1 / reg2  (integer division)
2511        // temp = temp * reg2
2512        // dest = reg1 - temp
2513        __ sdiv(temp, reg1, reg2);
2514        __ mul(temp, temp, reg2);
2515        __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
2516      } else {
2517        InvokeRuntimeCallingConvention calling_convention;
2518        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2519        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2520        DCHECK_EQ(R1, out.AsRegister<Register>());
2521
2522        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2523      }
2524      break;
2525    }
2526
2527    case Primitive::kPrimLong: {
2528      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2529      break;
2530    }
2531
2532    case Primitive::kPrimFloat: {
2533      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2534      break;
2535    }
2536
2537    case Primitive::kPrimDouble: {
2538      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2539      break;
2540    }
2541
2542    default:
2543      LOG(FATAL) << "Unexpected rem type " << type;
2544  }
2545}
2546
2547void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2548  LocationSummary* locations =
2549      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2550  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2551  if (instruction->HasUses()) {
2552    locations->SetOut(Location::SameAsFirstInput());
2553  }
2554}
2555
2556void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2557  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2558  codegen_->AddSlowPath(slow_path);
2559
2560  LocationSummary* locations = instruction->GetLocations();
2561  Location value = locations->InAt(0);
2562
2563  switch (instruction->GetType()) {
2564    case Primitive::kPrimInt: {
2565      if (value.IsRegister()) {
2566        __ cmp(value.AsRegister<Register>(), ShifterOperand(0));
2567        __ b(slow_path->GetEntryLabel(), EQ);
2568      } else {
2569        DCHECK(value.IsConstant()) << value;
2570        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2571          __ b(slow_path->GetEntryLabel());
2572        }
2573      }
2574      break;
2575    }
2576    case Primitive::kPrimLong: {
2577      if (value.IsRegisterPair()) {
2578        __ orrs(IP,
2579                value.AsRegisterPairLow<Register>(),
2580                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2581        __ b(slow_path->GetEntryLabel(), EQ);
2582      } else {
2583        DCHECK(value.IsConstant()) << value;
2584        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2585          __ b(slow_path->GetEntryLabel());
2586        }
2587      }
2588      break;
2589    default:
2590      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2591    }
2592  }
2593}
2594
2595void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2596  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2597
2598  LocationSummary* locations =
2599      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2600
2601  switch (op->GetResultType()) {
2602    case Primitive::kPrimInt: {
2603      locations->SetInAt(0, Location::RequiresRegister());
2604      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2605      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2606      break;
2607    }
2608    case Primitive::kPrimLong: {
2609      locations->SetInAt(0, Location::RequiresRegister());
2610      locations->SetInAt(1, Location::RequiresRegister());
2611      locations->AddTemp(Location::RequiresRegister());
2612      locations->SetOut(Location::RequiresRegister());
2613      break;
2614    }
2615    default:
2616      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2617  }
2618}
2619
2620void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2621  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2622
2623  LocationSummary* locations = op->GetLocations();
2624  Location out = locations->Out();
2625  Location first = locations->InAt(0);
2626  Location second = locations->InAt(1);
2627
2628  Primitive::Type type = op->GetResultType();
2629  switch (type) {
2630    case Primitive::kPrimInt: {
2631      Register out_reg = out.AsRegister<Register>();
2632      Register first_reg = first.AsRegister<Register>();
2633      // Arm doesn't mask the shift count so we need to do it ourselves.
2634      if (second.IsRegister()) {
2635        Register second_reg = second.AsRegister<Register>();
2636        __ and_(second_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2637        if (op->IsShl()) {
2638          __ Lsl(out_reg, first_reg, second_reg);
2639        } else if (op->IsShr()) {
2640          __ Asr(out_reg, first_reg, second_reg);
2641        } else {
2642          __ Lsr(out_reg, first_reg, second_reg);
2643        }
2644      } else {
2645        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2646        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2647        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
2648          __ Mov(out_reg, first_reg);
2649        } else if (op->IsShl()) {
2650          __ Lsl(out_reg, first_reg, shift_value);
2651        } else if (op->IsShr()) {
2652          __ Asr(out_reg, first_reg, shift_value);
2653        } else {
2654          __ Lsr(out_reg, first_reg, shift_value);
2655        }
2656      }
2657      break;
2658    }
2659    case Primitive::kPrimLong: {
2660      Register o_h = out.AsRegisterPairHigh<Register>();
2661      Register o_l = out.AsRegisterPairLow<Register>();
2662
2663      Register temp = locations->GetTemp(0).AsRegister<Register>();
2664
2665      Register high = first.AsRegisterPairHigh<Register>();
2666      Register low = first.AsRegisterPairLow<Register>();
2667
2668      Register second_reg = second.AsRegister<Register>();
2669
2670      if (op->IsShl()) {
2671        // Shift the high part
2672        __ and_(second_reg, second_reg, ShifterOperand(63));
2673        __ Lsl(o_h, high, second_reg);
2674        // Shift the low part and `or` what overflew on the high part
2675        __ rsb(temp, second_reg, ShifterOperand(32));
2676        __ Lsr(temp, low, temp);
2677        __ orr(o_h, o_h, ShifterOperand(temp));
2678        // If the shift is > 32 bits, override the high part
2679        __ subs(temp, second_reg, ShifterOperand(32));
2680        __ it(PL);
2681        __ Lsl(o_h, low, temp, false, PL);
2682        // Shift the low part
2683        __ Lsl(o_l, low, second_reg);
2684      } else if (op->IsShr()) {
2685        // Shift the low part
2686        __ and_(second_reg, second_reg, ShifterOperand(63));
2687        __ Lsr(o_l, low, second_reg);
2688        // Shift the high part and `or` what underflew on the low part
2689        __ rsb(temp, second_reg, ShifterOperand(32));
2690        __ Lsl(temp, high, temp);
2691        __ orr(o_l, o_l, ShifterOperand(temp));
2692        // If the shift is > 32 bits, override the low part
2693        __ subs(temp, second_reg, ShifterOperand(32));
2694        __ it(PL);
2695        __ Asr(o_l, high, temp, false, PL);
2696        // Shift the high part
2697        __ Asr(o_h, high, second_reg);
2698      } else {
2699        // same as Shr except we use `Lsr`s and not `Asr`s
2700        __ and_(second_reg, second_reg, ShifterOperand(63));
2701        __ Lsr(o_l, low, second_reg);
2702        __ rsb(temp, second_reg, ShifterOperand(32));
2703        __ Lsl(temp, high, temp);
2704        __ orr(o_l, o_l, ShifterOperand(temp));
2705        __ subs(temp, second_reg, ShifterOperand(32));
2706        __ it(PL);
2707        __ Lsr(o_l, high, temp, false, PL);
2708        __ Lsr(o_h, high, second_reg);
2709      }
2710      break;
2711    }
2712    default:
2713      LOG(FATAL) << "Unexpected operation type " << type;
2714  }
2715}
2716
2717void LocationsBuilderARM::VisitShl(HShl* shl) {
2718  HandleShift(shl);
2719}
2720
2721void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
2722  HandleShift(shl);
2723}
2724
2725void LocationsBuilderARM::VisitShr(HShr* shr) {
2726  HandleShift(shr);
2727}
2728
2729void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
2730  HandleShift(shr);
2731}
2732
2733void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
2734  HandleShift(ushr);
2735}
2736
2737void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
2738  HandleShift(ushr);
2739}
2740
2741void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
2742  LocationSummary* locations =
2743      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2744  InvokeRuntimeCallingConvention calling_convention;
2745  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2746  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2747  locations->SetOut(Location::RegisterLocation(R0));
2748}
2749
2750void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
2751  InvokeRuntimeCallingConvention calling_convention;
2752  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2753  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2754                          instruction,
2755                          instruction->GetDexPc(),
2756                          nullptr);
2757}
2758
2759void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
2760  LocationSummary* locations =
2761      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2762  InvokeRuntimeCallingConvention calling_convention;
2763  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2764  locations->SetOut(Location::RegisterLocation(R0));
2765  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2766  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2767}
2768
2769void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
2770  InvokeRuntimeCallingConvention calling_convention;
2771  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2772  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2773                          instruction,
2774                          instruction->GetDexPc(),
2775                          nullptr);
2776}
2777
2778void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
2779  LocationSummary* locations =
2780      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2781  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2782  if (location.IsStackSlot()) {
2783    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2784  } else if (location.IsDoubleStackSlot()) {
2785    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2786  }
2787  locations->SetOut(location);
2788}
2789
2790void InstructionCodeGeneratorARM::VisitParameterValue(
2791    HParameterValue* instruction ATTRIBUTE_UNUSED) {
2792  // Nothing to do, the parameter is already at its location.
2793}
2794
2795void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
2796  LocationSummary* locations =
2797      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2798  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2799}
2800
2801void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
2802  // Nothing to do, the method is already at its location.
2803}
2804
2805void LocationsBuilderARM::VisitNot(HNot* not_) {
2806  LocationSummary* locations =
2807      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
2808  locations->SetInAt(0, Location::RequiresRegister());
2809  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2810}
2811
2812void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
2813  LocationSummary* locations = not_->GetLocations();
2814  Location out = locations->Out();
2815  Location in = locations->InAt(0);
2816  switch (not_->GetResultType()) {
2817    case Primitive::kPrimInt:
2818      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
2819      break;
2820
2821    case Primitive::kPrimLong:
2822      __ mvn(out.AsRegisterPairLow<Register>(),
2823             ShifterOperand(in.AsRegisterPairLow<Register>()));
2824      __ mvn(out.AsRegisterPairHigh<Register>(),
2825             ShifterOperand(in.AsRegisterPairHigh<Register>()));
2826      break;
2827
2828    default:
2829      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
2830  }
2831}
2832
2833void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
2834  LocationSummary* locations =
2835      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
2836  locations->SetInAt(0, Location::RequiresRegister());
2837  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2838}
2839
2840void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
2841  LocationSummary* locations = bool_not->GetLocations();
2842  Location out = locations->Out();
2843  Location in = locations->InAt(0);
2844  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
2845}
2846
2847void LocationsBuilderARM::VisitCompare(HCompare* compare) {
2848  LocationSummary* locations =
2849      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
2850  switch (compare->InputAt(0)->GetType()) {
2851    case Primitive::kPrimLong: {
2852      locations->SetInAt(0, Location::RequiresRegister());
2853      locations->SetInAt(1, Location::RequiresRegister());
2854      // Output overlaps because it is written before doing the low comparison.
2855      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2856      break;
2857    }
2858    case Primitive::kPrimFloat:
2859    case Primitive::kPrimDouble: {
2860      locations->SetInAt(0, Location::RequiresFpuRegister());
2861      locations->SetInAt(1, Location::RequiresFpuRegister());
2862      locations->SetOut(Location::RequiresRegister());
2863      break;
2864    }
2865    default:
2866      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
2867  }
2868}
2869
2870void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
2871  LocationSummary* locations = compare->GetLocations();
2872  Register out = locations->Out().AsRegister<Register>();
2873  Location left = locations->InAt(0);
2874  Location right = locations->InAt(1);
2875
2876  Label less, greater, done;
2877  Primitive::Type type = compare->InputAt(0)->GetType();
2878  switch (type) {
2879    case Primitive::kPrimLong: {
2880      __ cmp(left.AsRegisterPairHigh<Register>(),
2881             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
2882      __ b(&less, LT);
2883      __ b(&greater, GT);
2884      // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
2885      __ LoadImmediate(out, 0);
2886      __ cmp(left.AsRegisterPairLow<Register>(),
2887             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
2888      break;
2889    }
2890    case Primitive::kPrimFloat:
2891    case Primitive::kPrimDouble: {
2892      __ LoadImmediate(out, 0);
2893      if (type == Primitive::kPrimFloat) {
2894        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
2895      } else {
2896        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
2897                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
2898      }
2899      __ vmstat();  // transfer FP status register to ARM APSR.
2900      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
2901      break;
2902    }
2903    default:
2904      LOG(FATAL) << "Unexpected compare type " << type;
2905  }
2906  __ b(&done, EQ);
2907  __ b(&less, CC);  // CC is for both: unsigned compare for longs and 'less than' for floats.
2908
2909  __ Bind(&greater);
2910  __ LoadImmediate(out, 1);
2911  __ b(&done);
2912
2913  __ Bind(&less);
2914  __ LoadImmediate(out, -1);
2915
2916  __ Bind(&done);
2917}
2918
2919void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
2920  LocationSummary* locations =
2921      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2922  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2923    locations->SetInAt(i, Location::Any());
2924  }
2925  locations->SetOut(Location::Any());
2926}
2927
2928void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
2929  UNUSED(instruction);
2930  LOG(FATAL) << "Unreachable";
2931}
2932
2933void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
2934  // TODO (ported from quick): revisit Arm barrier kinds
2935  DmbOptions flavor = DmbOptions::ISH;  // quiet c++ warnings
2936  switch (kind) {
2937    case MemBarrierKind::kAnyStore:
2938    case MemBarrierKind::kLoadAny:
2939    case MemBarrierKind::kAnyAny: {
2940      flavor = DmbOptions::ISH;
2941      break;
2942    }
2943    case MemBarrierKind::kStoreStore: {
2944      flavor = DmbOptions::ISHST;
2945      break;
2946    }
2947    default:
2948      LOG(FATAL) << "Unexpected memory barrier " << kind;
2949  }
2950  __ dmb(flavor);
2951}
2952
2953void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
2954                                                         uint32_t offset,
2955                                                         Register out_lo,
2956                                                         Register out_hi) {
2957  if (offset != 0) {
2958    __ LoadImmediate(out_lo, offset);
2959    __ add(IP, addr, ShifterOperand(out_lo));
2960    addr = IP;
2961  }
2962  __ ldrexd(out_lo, out_hi, addr);
2963}
2964
2965void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
2966                                                          uint32_t offset,
2967                                                          Register value_lo,
2968                                                          Register value_hi,
2969                                                          Register temp1,
2970                                                          Register temp2,
2971                                                          HInstruction* instruction) {
2972  Label fail;
2973  if (offset != 0) {
2974    __ LoadImmediate(temp1, offset);
2975    __ add(IP, addr, ShifterOperand(temp1));
2976    addr = IP;
2977  }
2978  __ Bind(&fail);
2979  // We need a load followed by store. (The address used in a STREX instruction must
2980  // be the same as the address in the most recently executed LDREX instruction.)
2981  __ ldrexd(temp1, temp2, addr);
2982  codegen_->MaybeRecordImplicitNullCheck(instruction);
2983  __ strexd(temp1, value_lo, value_hi, addr);
2984  __ cmp(temp1, ShifterOperand(0));
2985  __ b(&fail, NE);
2986}
2987
2988void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
2989  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
2990
2991  LocationSummary* locations =
2992      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2993  locations->SetInAt(0, Location::RequiresRegister());
2994
2995  Primitive::Type field_type = field_info.GetFieldType();
2996  if (Primitive::IsFloatingPointType(field_type)) {
2997    locations->SetInAt(1, Location::RequiresFpuRegister());
2998  } else {
2999    locations->SetInAt(1, Location::RequiresRegister());
3000  }
3001
3002  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3003  bool generate_volatile = field_info.IsVolatile()
3004      && is_wide
3005      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3006  // Temporary registers for the write barrier.
3007  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3008  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3009    locations->AddTemp(Location::RequiresRegister());
3010    locations->AddTemp(Location::RequiresRegister());
3011  } else if (generate_volatile) {
3012    // Arm encoding have some additional constraints for ldrexd/strexd:
3013    // - registers need to be consecutive
3014    // - the first register should be even but not R14.
3015    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3016    // enable Arm encoding.
3017    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3018
3019    locations->AddTemp(Location::RequiresRegister());
3020    locations->AddTemp(Location::RequiresRegister());
3021    if (field_type == Primitive::kPrimDouble) {
3022      // For doubles we need two more registers to copy the value.
3023      locations->AddTemp(Location::RegisterLocation(R2));
3024      locations->AddTemp(Location::RegisterLocation(R3));
3025    }
3026  }
3027}
3028
3029void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3030                                                 const FieldInfo& field_info,
3031                                                 bool value_can_be_null) {
3032  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3033
3034  LocationSummary* locations = instruction->GetLocations();
3035  Register base = locations->InAt(0).AsRegister<Register>();
3036  Location value = locations->InAt(1);
3037
3038  bool is_volatile = field_info.IsVolatile();
3039  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3040  Primitive::Type field_type = field_info.GetFieldType();
3041  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3042
3043  if (is_volatile) {
3044    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3045  }
3046
3047  switch (field_type) {
3048    case Primitive::kPrimBoolean:
3049    case Primitive::kPrimByte: {
3050      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3051      break;
3052    }
3053
3054    case Primitive::kPrimShort:
3055    case Primitive::kPrimChar: {
3056      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3057      break;
3058    }
3059
3060    case Primitive::kPrimInt:
3061    case Primitive::kPrimNot: {
3062      __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3063      break;
3064    }
3065
3066    case Primitive::kPrimLong: {
3067      if (is_volatile && !atomic_ldrd_strd) {
3068        GenerateWideAtomicStore(base, offset,
3069                                value.AsRegisterPairLow<Register>(),
3070                                value.AsRegisterPairHigh<Register>(),
3071                                locations->GetTemp(0).AsRegister<Register>(),
3072                                locations->GetTemp(1).AsRegister<Register>(),
3073                                instruction);
3074      } else {
3075        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3076        codegen_->MaybeRecordImplicitNullCheck(instruction);
3077      }
3078      break;
3079    }
3080
3081    case Primitive::kPrimFloat: {
3082      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3083      break;
3084    }
3085
3086    case Primitive::kPrimDouble: {
3087      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3088      if (is_volatile && !atomic_ldrd_strd) {
3089        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3090        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3091
3092        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3093
3094        GenerateWideAtomicStore(base, offset,
3095                                value_reg_lo,
3096                                value_reg_hi,
3097                                locations->GetTemp(2).AsRegister<Register>(),
3098                                locations->GetTemp(3).AsRegister<Register>(),
3099                                instruction);
3100      } else {
3101        __ StoreDToOffset(value_reg, base, offset);
3102        codegen_->MaybeRecordImplicitNullCheck(instruction);
3103      }
3104      break;
3105    }
3106
3107    case Primitive::kPrimVoid:
3108      LOG(FATAL) << "Unreachable type " << field_type;
3109      UNREACHABLE();
3110  }
3111
3112  // Longs and doubles are handled in the switch.
3113  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3114    codegen_->MaybeRecordImplicitNullCheck(instruction);
3115  }
3116
3117  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3118    Register temp = locations->GetTemp(0).AsRegister<Register>();
3119    Register card = locations->GetTemp(1).AsRegister<Register>();
3120    codegen_->MarkGCCard(
3121        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3122  }
3123
3124  if (is_volatile) {
3125    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3126  }
3127}
3128
3129void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3130  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3131  LocationSummary* locations =
3132      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3133  locations->SetInAt(0, Location::RequiresRegister());
3134
3135  bool volatile_for_double = field_info.IsVolatile()
3136      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3137      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3138  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3139
3140  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3141    locations->SetOut(Location::RequiresFpuRegister());
3142  } else {
3143    locations->SetOut(Location::RequiresRegister(),
3144                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3145  }
3146  if (volatile_for_double) {
3147    // Arm encoding have some additional constraints for ldrexd/strexd:
3148    // - registers need to be consecutive
3149    // - the first register should be even but not R14.
3150    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3151    // enable Arm encoding.
3152    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3153    locations->AddTemp(Location::RequiresRegister());
3154    locations->AddTemp(Location::RequiresRegister());
3155  }
3156}
3157
3158void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3159                                                 const FieldInfo& field_info) {
3160  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3161
3162  LocationSummary* locations = instruction->GetLocations();
3163  Register base = locations->InAt(0).AsRegister<Register>();
3164  Location out = locations->Out();
3165  bool is_volatile = field_info.IsVolatile();
3166  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3167  Primitive::Type field_type = field_info.GetFieldType();
3168  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3169
3170  switch (field_type) {
3171    case Primitive::kPrimBoolean: {
3172      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3173      break;
3174    }
3175
3176    case Primitive::kPrimByte: {
3177      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3178      break;
3179    }
3180
3181    case Primitive::kPrimShort: {
3182      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3183      break;
3184    }
3185
3186    case Primitive::kPrimChar: {
3187      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3188      break;
3189    }
3190
3191    case Primitive::kPrimInt:
3192    case Primitive::kPrimNot: {
3193      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3194      break;
3195    }
3196
3197    case Primitive::kPrimLong: {
3198      if (is_volatile && !atomic_ldrd_strd) {
3199        GenerateWideAtomicLoad(base, offset,
3200                               out.AsRegisterPairLow<Register>(),
3201                               out.AsRegisterPairHigh<Register>());
3202      } else {
3203        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3204      }
3205      break;
3206    }
3207
3208    case Primitive::kPrimFloat: {
3209      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3210      break;
3211    }
3212
3213    case Primitive::kPrimDouble: {
3214      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3215      if (is_volatile && !atomic_ldrd_strd) {
3216        Register lo = locations->GetTemp(0).AsRegister<Register>();
3217        Register hi = locations->GetTemp(1).AsRegister<Register>();
3218        GenerateWideAtomicLoad(base, offset, lo, hi);
3219        codegen_->MaybeRecordImplicitNullCheck(instruction);
3220        __ vmovdrr(out_reg, lo, hi);
3221      } else {
3222        __ LoadDFromOffset(out_reg, base, offset);
3223        codegen_->MaybeRecordImplicitNullCheck(instruction);
3224      }
3225      break;
3226    }
3227
3228    case Primitive::kPrimVoid:
3229      LOG(FATAL) << "Unreachable type " << field_type;
3230      UNREACHABLE();
3231  }
3232
3233  // Doubles are handled in the switch.
3234  if (field_type != Primitive::kPrimDouble) {
3235    codegen_->MaybeRecordImplicitNullCheck(instruction);
3236  }
3237
3238  if (is_volatile) {
3239    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3240  }
3241}
3242
3243void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3244  HandleFieldSet(instruction, instruction->GetFieldInfo());
3245}
3246
3247void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3248  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3249}
3250
3251void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3252  HandleFieldGet(instruction, instruction->GetFieldInfo());
3253}
3254
3255void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3256  HandleFieldGet(instruction, instruction->GetFieldInfo());
3257}
3258
3259void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3260  HandleFieldGet(instruction, instruction->GetFieldInfo());
3261}
3262
3263void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3264  HandleFieldGet(instruction, instruction->GetFieldInfo());
3265}
3266
3267void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3268  HandleFieldSet(instruction, instruction->GetFieldInfo());
3269}
3270
3271void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3272  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3273}
3274
3275void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3276  LocationSummary* locations =
3277      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3278  locations->SetInAt(0, Location::RequiresRegister());
3279  if (instruction->HasUses()) {
3280    locations->SetOut(Location::SameAsFirstInput());
3281  }
3282}
3283
3284void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3285  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3286    return;
3287  }
3288  Location obj = instruction->GetLocations()->InAt(0);
3289
3290  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3291  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3292}
3293
3294void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3295  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3296  codegen_->AddSlowPath(slow_path);
3297
3298  LocationSummary* locations = instruction->GetLocations();
3299  Location obj = locations->InAt(0);
3300
3301  __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
3302  __ b(slow_path->GetEntryLabel(), EQ);
3303}
3304
3305void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3306  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
3307    GenerateImplicitNullCheck(instruction);
3308  } else {
3309    GenerateExplicitNullCheck(instruction);
3310  }
3311}
3312
3313void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3314  LocationSummary* locations =
3315      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3316  locations->SetInAt(0, Location::RequiresRegister());
3317  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3318  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3319    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3320  } else {
3321    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3322  }
3323}
3324
3325void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3326  LocationSummary* locations = instruction->GetLocations();
3327  Register obj = locations->InAt(0).AsRegister<Register>();
3328  Location index = locations->InAt(1);
3329
3330  switch (instruction->GetType()) {
3331    case Primitive::kPrimBoolean: {
3332      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3333      Register out = locations->Out().AsRegister<Register>();
3334      if (index.IsConstant()) {
3335        size_t offset =
3336            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3337        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3338      } else {
3339        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3340        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3341      }
3342      break;
3343    }
3344
3345    case Primitive::kPrimByte: {
3346      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3347      Register out = locations->Out().AsRegister<Register>();
3348      if (index.IsConstant()) {
3349        size_t offset =
3350            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3351        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3352      } else {
3353        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3354        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3355      }
3356      break;
3357    }
3358
3359    case Primitive::kPrimShort: {
3360      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3361      Register out = locations->Out().AsRegister<Register>();
3362      if (index.IsConstant()) {
3363        size_t offset =
3364            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3365        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3366      } else {
3367        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3368        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3369      }
3370      break;
3371    }
3372
3373    case Primitive::kPrimChar: {
3374      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3375      Register out = locations->Out().AsRegister<Register>();
3376      if (index.IsConstant()) {
3377        size_t offset =
3378            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3379        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3380      } else {
3381        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3382        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3383      }
3384      break;
3385    }
3386
3387    case Primitive::kPrimInt:
3388    case Primitive::kPrimNot: {
3389      DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
3390      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3391      Register out = locations->Out().AsRegister<Register>();
3392      if (index.IsConstant()) {
3393        size_t offset =
3394            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3395        __ LoadFromOffset(kLoadWord, out, obj, offset);
3396      } else {
3397        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3398        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3399      }
3400      break;
3401    }
3402
3403    case Primitive::kPrimLong: {
3404      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3405      Location out = locations->Out();
3406      if (index.IsConstant()) {
3407        size_t offset =
3408            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3409        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3410      } else {
3411        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3412        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3413      }
3414      break;
3415    }
3416
3417    case Primitive::kPrimFloat: {
3418      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3419      Location out = locations->Out();
3420      DCHECK(out.IsFpuRegister());
3421      if (index.IsConstant()) {
3422        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3423        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3424      } else {
3425        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3426        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3427      }
3428      break;
3429    }
3430
3431    case Primitive::kPrimDouble: {
3432      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3433      Location out = locations->Out();
3434      DCHECK(out.IsFpuRegisterPair());
3435      if (index.IsConstant()) {
3436        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3437        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3438      } else {
3439        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3440        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3441      }
3442      break;
3443    }
3444
3445    case Primitive::kPrimVoid:
3446      LOG(FATAL) << "Unreachable type " << instruction->GetType();
3447      UNREACHABLE();
3448  }
3449  codegen_->MaybeRecordImplicitNullCheck(instruction);
3450}
3451
3452void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3453  Primitive::Type value_type = instruction->GetComponentType();
3454
3455  bool needs_write_barrier =
3456      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3457  bool needs_runtime_call = instruction->NeedsTypeCheck();
3458
3459  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3460      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3461  if (needs_runtime_call) {
3462    InvokeRuntimeCallingConvention calling_convention;
3463    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3464    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3465    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3466  } else {
3467    locations->SetInAt(0, Location::RequiresRegister());
3468    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3469    if (Primitive::IsFloatingPointType(value_type)) {
3470      locations->SetInAt(2, Location::RequiresFpuRegister());
3471    } else {
3472      locations->SetInAt(2, Location::RequiresRegister());
3473    }
3474
3475    if (needs_write_barrier) {
3476      // Temporary registers for the write barrier.
3477      locations->AddTemp(Location::RequiresRegister());
3478      locations->AddTemp(Location::RequiresRegister());
3479    }
3480  }
3481}
3482
3483void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3484  LocationSummary* locations = instruction->GetLocations();
3485  Register obj = locations->InAt(0).AsRegister<Register>();
3486  Location index = locations->InAt(1);
3487  Primitive::Type value_type = instruction->GetComponentType();
3488  bool needs_runtime_call = locations->WillCall();
3489  bool needs_write_barrier =
3490      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3491
3492  switch (value_type) {
3493    case Primitive::kPrimBoolean:
3494    case Primitive::kPrimByte: {
3495      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3496      Register value = locations->InAt(2).AsRegister<Register>();
3497      if (index.IsConstant()) {
3498        size_t offset =
3499            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3500        __ StoreToOffset(kStoreByte, value, obj, offset);
3501      } else {
3502        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3503        __ StoreToOffset(kStoreByte, value, IP, data_offset);
3504      }
3505      break;
3506    }
3507
3508    case Primitive::kPrimShort:
3509    case Primitive::kPrimChar: {
3510      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3511      Register value = locations->InAt(2).AsRegister<Register>();
3512      if (index.IsConstant()) {
3513        size_t offset =
3514            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3515        __ StoreToOffset(kStoreHalfword, value, obj, offset);
3516      } else {
3517        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3518        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3519      }
3520      break;
3521    }
3522
3523    case Primitive::kPrimInt:
3524    case Primitive::kPrimNot: {
3525      if (!needs_runtime_call) {
3526        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3527        Register value = locations->InAt(2).AsRegister<Register>();
3528        if (index.IsConstant()) {
3529          size_t offset =
3530              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3531          __ StoreToOffset(kStoreWord, value, obj, offset);
3532        } else {
3533          DCHECK(index.IsRegister()) << index;
3534          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3535          __ StoreToOffset(kStoreWord, value, IP, data_offset);
3536        }
3537        codegen_->MaybeRecordImplicitNullCheck(instruction);
3538        if (needs_write_barrier) {
3539          DCHECK_EQ(value_type, Primitive::kPrimNot);
3540          Register temp = locations->GetTemp(0).AsRegister<Register>();
3541          Register card = locations->GetTemp(1).AsRegister<Register>();
3542          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
3543        }
3544      } else {
3545        DCHECK_EQ(value_type, Primitive::kPrimNot);
3546        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3547                                instruction,
3548                                instruction->GetDexPc(),
3549                                nullptr);
3550      }
3551      break;
3552    }
3553
3554    case Primitive::kPrimLong: {
3555      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3556      Location value = locations->InAt(2);
3557      if (index.IsConstant()) {
3558        size_t offset =
3559            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3560        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3561      } else {
3562        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3563        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3564      }
3565      break;
3566    }
3567
3568    case Primitive::kPrimFloat: {
3569      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3570      Location value = locations->InAt(2);
3571      DCHECK(value.IsFpuRegister());
3572      if (index.IsConstant()) {
3573        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3574        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3575      } else {
3576        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3577        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3578      }
3579      break;
3580    }
3581
3582    case Primitive::kPrimDouble: {
3583      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3584      Location value = locations->InAt(2);
3585      DCHECK(value.IsFpuRegisterPair());
3586      if (index.IsConstant()) {
3587        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3588        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3589      } else {
3590        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3591        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3592      }
3593
3594      break;
3595    }
3596
3597    case Primitive::kPrimVoid:
3598      LOG(FATAL) << "Unreachable type " << value_type;
3599      UNREACHABLE();
3600  }
3601
3602  // Ints and objects are handled in the switch.
3603  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
3604    codegen_->MaybeRecordImplicitNullCheck(instruction);
3605  }
3606}
3607
3608void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
3609  LocationSummary* locations =
3610      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3611  locations->SetInAt(0, Location::RequiresRegister());
3612  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3613}
3614
3615void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
3616  LocationSummary* locations = instruction->GetLocations();
3617  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
3618  Register obj = locations->InAt(0).AsRegister<Register>();
3619  Register out = locations->Out().AsRegister<Register>();
3620  __ LoadFromOffset(kLoadWord, out, obj, offset);
3621  codegen_->MaybeRecordImplicitNullCheck(instruction);
3622}
3623
3624void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3625  LocationSummary* locations =
3626      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3627  locations->SetInAt(0, Location::RequiresRegister());
3628  locations->SetInAt(1, Location::RequiresRegister());
3629  if (instruction->HasUses()) {
3630    locations->SetOut(Location::SameAsFirstInput());
3631  }
3632}
3633
3634void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3635  LocationSummary* locations = instruction->GetLocations();
3636  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
3637      instruction, locations->InAt(0), locations->InAt(1));
3638  codegen_->AddSlowPath(slow_path);
3639
3640  Register index = locations->InAt(0).AsRegister<Register>();
3641  Register length = locations->InAt(1).AsRegister<Register>();
3642
3643  __ cmp(index, ShifterOperand(length));
3644  __ b(slow_path->GetEntryLabel(), CS);
3645}
3646
3647void CodeGeneratorARM::MarkGCCard(Register temp,
3648                                  Register card,
3649                                  Register object,
3650                                  Register value,
3651                                  bool can_be_null) {
3652  Label is_null;
3653  if (can_be_null) {
3654    __ CompareAndBranchIfZero(value, &is_null);
3655  }
3656  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
3657  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
3658  __ strb(card, Address(card, temp));
3659  if (can_be_null) {
3660    __ Bind(&is_null);
3661  }
3662}
3663
3664void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
3665  temp->SetLocations(nullptr);
3666}
3667
3668void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
3669  // Nothing to do, this is driven by the code generator.
3670  UNUSED(temp);
3671}
3672
3673void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
3674  UNUSED(instruction);
3675  LOG(FATAL) << "Unreachable";
3676}
3677
3678void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
3679  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3680}
3681
3682void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3683  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3684}
3685
3686void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3687  HBasicBlock* block = instruction->GetBlock();
3688  if (block->GetLoopInformation() != nullptr) {
3689    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3690    // The back edge will generate the suspend check.
3691    return;
3692  }
3693  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3694    // The goto will generate the suspend check.
3695    return;
3696  }
3697  GenerateSuspendCheck(instruction, nullptr);
3698}
3699
3700void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
3701                                                       HBasicBlock* successor) {
3702  SuspendCheckSlowPathARM* slow_path =
3703      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
3704  if (slow_path == nullptr) {
3705    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
3706    instruction->SetSlowPath(slow_path);
3707    codegen_->AddSlowPath(slow_path);
3708    if (successor != nullptr) {
3709      DCHECK(successor->IsLoopHeader());
3710      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
3711    }
3712  } else {
3713    DCHECK_EQ(slow_path->GetSuccessor(), successor);
3714  }
3715
3716  __ LoadFromOffset(
3717      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
3718  __ cmp(IP, ShifterOperand(0));
3719  // TODO: Figure out the branch offsets and use cbz/cbnz.
3720  if (successor == nullptr) {
3721    __ b(slow_path->GetEntryLabel(), NE);
3722    __ Bind(slow_path->GetReturnLabel());
3723  } else {
3724    __ b(codegen_->GetLabelOf(successor), EQ);
3725    __ b(slow_path->GetEntryLabel());
3726  }
3727}
3728
3729ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
3730  return codegen_->GetAssembler();
3731}
3732
3733void ParallelMoveResolverARM::EmitMove(size_t index) {
3734  MoveOperands* move = moves_.Get(index);
3735  Location source = move->GetSource();
3736  Location destination = move->GetDestination();
3737
3738  if (source.IsRegister()) {
3739    if (destination.IsRegister()) {
3740      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
3741    } else {
3742      DCHECK(destination.IsStackSlot());
3743      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
3744                       SP, destination.GetStackIndex());
3745    }
3746  } else if (source.IsStackSlot()) {
3747    if (destination.IsRegister()) {
3748      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
3749                        SP, source.GetStackIndex());
3750    } else if (destination.IsFpuRegister()) {
3751      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
3752    } else {
3753      DCHECK(destination.IsStackSlot());
3754      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
3755      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3756    }
3757  } else if (source.IsFpuRegister()) {
3758    if (destination.IsFpuRegister()) {
3759      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
3760    } else {
3761      DCHECK(destination.IsStackSlot());
3762      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
3763    }
3764  } else if (source.IsDoubleStackSlot()) {
3765    if (destination.IsDoubleStackSlot()) {
3766      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
3767      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
3768    } else if (destination.IsRegisterPair()) {
3769      DCHECK(ExpectedPairLayout(destination));
3770      __ LoadFromOffset(
3771          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
3772    } else {
3773      DCHECK(destination.IsFpuRegisterPair()) << destination;
3774      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3775                         SP,
3776                         source.GetStackIndex());
3777    }
3778  } else if (source.IsRegisterPair()) {
3779    if (destination.IsRegisterPair()) {
3780      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
3781      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
3782    } else {
3783      DCHECK(destination.IsDoubleStackSlot()) << destination;
3784      DCHECK(ExpectedPairLayout(source));
3785      __ StoreToOffset(
3786          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
3787    }
3788  } else if (source.IsFpuRegisterPair()) {
3789    if (destination.IsFpuRegisterPair()) {
3790      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3791               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
3792    } else {
3793      DCHECK(destination.IsDoubleStackSlot()) << destination;
3794      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
3795                        SP,
3796                        destination.GetStackIndex());
3797    }
3798  } else {
3799    DCHECK(source.IsConstant()) << source;
3800    HConstant* constant = source.GetConstant();
3801    if (constant->IsIntConstant() || constant->IsNullConstant()) {
3802      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
3803      if (destination.IsRegister()) {
3804        __ LoadImmediate(destination.AsRegister<Register>(), value);
3805      } else {
3806        DCHECK(destination.IsStackSlot());
3807        __ LoadImmediate(IP, value);
3808        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3809      }
3810    } else if (constant->IsLongConstant()) {
3811      int64_t value = constant->AsLongConstant()->GetValue();
3812      if (destination.IsRegisterPair()) {
3813        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
3814        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
3815      } else {
3816        DCHECK(destination.IsDoubleStackSlot()) << destination;
3817        __ LoadImmediate(IP, Low32Bits(value));
3818        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3819        __ LoadImmediate(IP, High32Bits(value));
3820        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3821      }
3822    } else if (constant->IsDoubleConstant()) {
3823      double value = constant->AsDoubleConstant()->GetValue();
3824      if (destination.IsFpuRegisterPair()) {
3825        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
3826      } else {
3827        DCHECK(destination.IsDoubleStackSlot()) << destination;
3828        uint64_t int_value = bit_cast<uint64_t, double>(value);
3829        __ LoadImmediate(IP, Low32Bits(int_value));
3830        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3831        __ LoadImmediate(IP, High32Bits(int_value));
3832        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3833      }
3834    } else {
3835      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
3836      float value = constant->AsFloatConstant()->GetValue();
3837      if (destination.IsFpuRegister()) {
3838        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
3839      } else {
3840        DCHECK(destination.IsStackSlot());
3841        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
3842        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3843      }
3844    }
3845  }
3846}
3847
3848void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
3849  __ Mov(IP, reg);
3850  __ LoadFromOffset(kLoadWord, reg, SP, mem);
3851  __ StoreToOffset(kStoreWord, IP, SP, mem);
3852}
3853
3854void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
3855  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
3856  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
3857  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
3858                    SP, mem1 + stack_offset);
3859  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
3860  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
3861                   SP, mem2 + stack_offset);
3862  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
3863}
3864
3865void ParallelMoveResolverARM::EmitSwap(size_t index) {
3866  MoveOperands* move = moves_.Get(index);
3867  Location source = move->GetSource();
3868  Location destination = move->GetDestination();
3869
3870  if (source.IsRegister() && destination.IsRegister()) {
3871    DCHECK_NE(source.AsRegister<Register>(), IP);
3872    DCHECK_NE(destination.AsRegister<Register>(), IP);
3873    __ Mov(IP, source.AsRegister<Register>());
3874    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
3875    __ Mov(destination.AsRegister<Register>(), IP);
3876  } else if (source.IsRegister() && destination.IsStackSlot()) {
3877    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
3878  } else if (source.IsStackSlot() && destination.IsRegister()) {
3879    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
3880  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
3881    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3882  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
3883    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
3884    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
3885    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
3886  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
3887    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
3888    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
3889    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
3890    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
3891               destination.AsRegisterPairHigh<Register>(),
3892               DTMP);
3893  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
3894    Register low_reg = source.IsRegisterPair()
3895        ? source.AsRegisterPairLow<Register>()
3896        : destination.AsRegisterPairLow<Register>();
3897    int mem = source.IsRegisterPair()
3898        ? destination.GetStackIndex()
3899        : source.GetStackIndex();
3900    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
3901    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
3902    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
3903    __ StoreDToOffset(DTMP, SP, mem);
3904  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
3905    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
3906    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3907    __ vmovd(DTMP, first);
3908    __ vmovd(first, second);
3909    __ vmovd(second, DTMP);
3910  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
3911    DRegister reg = source.IsFpuRegisterPair()
3912        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
3913        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3914    int mem = source.IsFpuRegisterPair()
3915        ? destination.GetStackIndex()
3916        : source.GetStackIndex();
3917    __ vmovd(DTMP, reg);
3918    __ LoadDFromOffset(reg, SP, mem);
3919    __ StoreDToOffset(DTMP, SP, mem);
3920  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
3921    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
3922                                           : destination.AsFpuRegister<SRegister>();
3923    int mem = source.IsFpuRegister()
3924        ? destination.GetStackIndex()
3925        : source.GetStackIndex();
3926
3927    __ vmovrs(IP, reg);
3928    __ LoadSFromOffset(reg, SP, mem);
3929    __ StoreToOffset(kStoreWord, IP, SP, mem);
3930  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
3931    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3932    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
3933  } else {
3934    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
3935  }
3936}
3937
3938void ParallelMoveResolverARM::SpillScratch(int reg) {
3939  __ Push(static_cast<Register>(reg));
3940}
3941
3942void ParallelMoveResolverARM::RestoreScratch(int reg) {
3943  __ Pop(static_cast<Register>(reg));
3944}
3945
3946void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
3947  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
3948      ? LocationSummary::kCallOnSlowPath
3949      : LocationSummary::kNoCall;
3950  LocationSummary* locations =
3951      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
3952  locations->SetInAt(0, Location::RequiresRegister());
3953  locations->SetOut(Location::RequiresRegister());
3954}
3955
3956void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
3957  LocationSummary* locations = cls->GetLocations();
3958  Register out = locations->Out().AsRegister<Register>();
3959  Register current_method = locations->InAt(0).AsRegister<Register>();
3960  if (cls->IsReferrersClass()) {
3961    DCHECK(!cls->CanCallRuntime());
3962    DCHECK(!cls->MustGenerateClinitCheck());
3963    __ LoadFromOffset(
3964        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
3965  } else {
3966    DCHECK(cls->CanCallRuntime());
3967    __ LoadFromOffset(kLoadWord,
3968                      out,
3969                      current_method,
3970                      ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
3971    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
3972
3973    SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3974        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
3975    codegen_->AddSlowPath(slow_path);
3976    __ cmp(out, ShifterOperand(0));
3977    __ b(slow_path->GetEntryLabel(), EQ);
3978    if (cls->MustGenerateClinitCheck()) {
3979      GenerateClassInitializationCheck(slow_path, out);
3980    } else {
3981      __ Bind(slow_path->GetExitLabel());
3982    }
3983  }
3984}
3985
3986void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
3987  LocationSummary* locations =
3988      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
3989  locations->SetInAt(0, Location::RequiresRegister());
3990  if (check->HasUses()) {
3991    locations->SetOut(Location::SameAsFirstInput());
3992  }
3993}
3994
3995void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
3996  // We assume the class is not null.
3997  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3998      check->GetLoadClass(), check, check->GetDexPc(), true);
3999  codegen_->AddSlowPath(slow_path);
4000  GenerateClassInitializationCheck(slow_path,
4001                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4002}
4003
4004void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4005    SlowPathCodeARM* slow_path, Register class_reg) {
4006  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4007  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4008  __ b(slow_path->GetEntryLabel(), LT);
4009  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4010  // properly. Therefore, we do a memory fence.
4011  __ dmb(ISH);
4012  __ Bind(slow_path->GetExitLabel());
4013}
4014
4015void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4016  LocationSummary* locations =
4017      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4018  locations->SetInAt(0, Location::RequiresRegister());
4019  locations->SetOut(Location::RequiresRegister());
4020}
4021
4022void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4023  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4024  codegen_->AddSlowPath(slow_path);
4025
4026  LocationSummary* locations = load->GetLocations();
4027  Register out = locations->Out().AsRegister<Register>();
4028  Register current_method = locations->InAt(0).AsRegister<Register>();
4029  __ LoadFromOffset(
4030      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4031  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4032  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4033  __ cmp(out, ShifterOperand(0));
4034  __ b(slow_path->GetEntryLabel(), EQ);
4035  __ Bind(slow_path->GetExitLabel());
4036}
4037
4038void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4039  LocationSummary* locations =
4040      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4041  locations->SetOut(Location::RequiresRegister());
4042}
4043
4044void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4045  Register out = load->GetLocations()->Out().AsRegister<Register>();
4046  int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4047  __ LoadFromOffset(kLoadWord, out, TR, offset);
4048  __ LoadImmediate(IP, 0);
4049  __ StoreToOffset(kStoreWord, IP, TR, offset);
4050}
4051
4052void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4053  LocationSummary* locations =
4054      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4055  InvokeRuntimeCallingConvention calling_convention;
4056  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4057}
4058
4059void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4060  codegen_->InvokeRuntime(
4061      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4062}
4063
4064void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4065  LocationSummary::CallKind call_kind = instruction->IsClassFinal()
4066      ? LocationSummary::kNoCall
4067      : LocationSummary::kCallOnSlowPath;
4068  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4069  locations->SetInAt(0, Location::RequiresRegister());
4070  locations->SetInAt(1, Location::RequiresRegister());
4071  // The out register is used as a temporary, so it overlaps with the inputs.
4072  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4073}
4074
4075void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4076  LocationSummary* locations = instruction->GetLocations();
4077  Register obj = locations->InAt(0).AsRegister<Register>();
4078  Register cls = locations->InAt(1).AsRegister<Register>();
4079  Register out = locations->Out().AsRegister<Register>();
4080  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4081  Label done, zero;
4082  SlowPathCodeARM* slow_path = nullptr;
4083
4084  // Return 0 if `obj` is null.
4085  // avoid null check if we know obj is not null.
4086  if (instruction->MustDoNullCheck()) {
4087    __ CompareAndBranchIfZero(obj, &zero);
4088  }
4089  // Compare the class of `obj` with `cls`.
4090  __ LoadFromOffset(kLoadWord, out, obj, class_offset);
4091  __ cmp(out, ShifterOperand(cls));
4092  if (instruction->IsClassFinal()) {
4093    // Classes must be equal for the instanceof to succeed.
4094    __ b(&zero, NE);
4095    __ LoadImmediate(out, 1);
4096    __ b(&done);
4097  } else {
4098    // If the classes are not equal, we go into a slow path.
4099    DCHECK(locations->OnlyCallsOnSlowPath());
4100    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4101        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
4102    codegen_->AddSlowPath(slow_path);
4103    __ b(slow_path->GetEntryLabel(), NE);
4104    __ LoadImmediate(out, 1);
4105    __ b(&done);
4106  }
4107
4108  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
4109    __ Bind(&zero);
4110    __ LoadImmediate(out, 0);
4111  }
4112
4113  if (slow_path != nullptr) {
4114    __ Bind(slow_path->GetExitLabel());
4115  }
4116  __ Bind(&done);
4117}
4118
4119void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4120  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4121      instruction, LocationSummary::kCallOnSlowPath);
4122  locations->SetInAt(0, Location::RequiresRegister());
4123  locations->SetInAt(1, Location::RequiresRegister());
4124  locations->AddTemp(Location::RequiresRegister());
4125}
4126
4127void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4128  LocationSummary* locations = instruction->GetLocations();
4129  Register obj = locations->InAt(0).AsRegister<Register>();
4130  Register cls = locations->InAt(1).AsRegister<Register>();
4131  Register temp = locations->GetTemp(0).AsRegister<Register>();
4132  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4133
4134  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4135      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
4136  codegen_->AddSlowPath(slow_path);
4137
4138  // avoid null check if we know obj is not null.
4139  if (instruction->MustDoNullCheck()) {
4140    __ CompareAndBranchIfZero(obj, slow_path->GetExitLabel());
4141  }
4142  // Compare the class of `obj` with `cls`.
4143  __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4144  __ cmp(temp, ShifterOperand(cls));
4145  __ b(slow_path->GetEntryLabel(), NE);
4146  __ Bind(slow_path->GetExitLabel());
4147}
4148
4149void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4150  LocationSummary* locations =
4151      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4152  InvokeRuntimeCallingConvention calling_convention;
4153  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4154}
4155
4156void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4157  codegen_->InvokeRuntime(instruction->IsEnter()
4158        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4159      instruction,
4160      instruction->GetDexPc(),
4161      nullptr);
4162}
4163
4164void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
4165void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
4166void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4167
4168void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4169  LocationSummary* locations =
4170      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4171  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4172         || instruction->GetResultType() == Primitive::kPrimLong);
4173  locations->SetInAt(0, Location::RequiresRegister());
4174  locations->SetInAt(1, Location::RequiresRegister());
4175  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4176}
4177
4178void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4179  HandleBitwiseOperation(instruction);
4180}
4181
4182void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4183  HandleBitwiseOperation(instruction);
4184}
4185
4186void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4187  HandleBitwiseOperation(instruction);
4188}
4189
4190void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4191  LocationSummary* locations = instruction->GetLocations();
4192
4193  if (instruction->GetResultType() == Primitive::kPrimInt) {
4194    Register first = locations->InAt(0).AsRegister<Register>();
4195    Register second = locations->InAt(1).AsRegister<Register>();
4196    Register out = locations->Out().AsRegister<Register>();
4197    if (instruction->IsAnd()) {
4198      __ and_(out, first, ShifterOperand(second));
4199    } else if (instruction->IsOr()) {
4200      __ orr(out, first, ShifterOperand(second));
4201    } else {
4202      DCHECK(instruction->IsXor());
4203      __ eor(out, first, ShifterOperand(second));
4204    }
4205  } else {
4206    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4207    Location first = locations->InAt(0);
4208    Location second = locations->InAt(1);
4209    Location out = locations->Out();
4210    if (instruction->IsAnd()) {
4211      __ and_(out.AsRegisterPairLow<Register>(),
4212              first.AsRegisterPairLow<Register>(),
4213              ShifterOperand(second.AsRegisterPairLow<Register>()));
4214      __ and_(out.AsRegisterPairHigh<Register>(),
4215              first.AsRegisterPairHigh<Register>(),
4216              ShifterOperand(second.AsRegisterPairHigh<Register>()));
4217    } else if (instruction->IsOr()) {
4218      __ orr(out.AsRegisterPairLow<Register>(),
4219             first.AsRegisterPairLow<Register>(),
4220             ShifterOperand(second.AsRegisterPairLow<Register>()));
4221      __ orr(out.AsRegisterPairHigh<Register>(),
4222             first.AsRegisterPairHigh<Register>(),
4223             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4224    } else {
4225      DCHECK(instruction->IsXor());
4226      __ eor(out.AsRegisterPairLow<Register>(),
4227             first.AsRegisterPairLow<Register>(),
4228             ShifterOperand(second.AsRegisterPairLow<Register>()));
4229      __ eor(out.AsRegisterPairHigh<Register>(),
4230             first.AsRegisterPairHigh<Register>(),
4231             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4232    }
4233  }
4234}
4235
4236void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
4237  // TODO: Implement all kinds of calls:
4238  // 1) boot -> boot
4239  // 2) app -> boot
4240  // 3) app -> app
4241  //
4242  // Currently we implement the app -> app logic, which looks up in the resolve cache.
4243
4244  if (invoke->IsStringInit()) {
4245    Register reg = temp.AsRegister<Register>();
4246    // temp = thread->string_init_entrypoint
4247    __ LoadFromOffset(kLoadWord, reg, TR, invoke->GetStringInitOffset());
4248    // LR = temp[offset_of_quick_compiled_code]
4249    __ LoadFromOffset(kLoadWord, LR, reg,
4250                      ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4251                          kArmWordSize).Int32Value());
4252    // LR()
4253    __ blx(LR);
4254  } else if (invoke->IsRecursive()) {
4255    __ bl(GetFrameEntryLabel());
4256  } else {
4257    Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
4258    Register method_reg;
4259    Register reg = temp.AsRegister<Register>();
4260    if (current_method.IsRegister()) {
4261      method_reg = current_method.AsRegister<Register>();
4262    } else {
4263      DCHECK(invoke->GetLocations()->Intrinsified());
4264      DCHECK(!current_method.IsValid());
4265      method_reg = reg;
4266      __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
4267    }
4268    // reg = current_method->dex_cache_resolved_methods_;
4269    __ LoadFromOffset(
4270        kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
4271    // reg = reg[index_in_cache]
4272    __ LoadFromOffset(
4273        kLoadWord, reg, reg, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
4274    // LR = reg[offset_of_quick_compiled_code]
4275    __ LoadFromOffset(kLoadWord, LR, reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4276        kArmWordSize).Int32Value());
4277    // LR()
4278    __ blx(LR);
4279  }
4280
4281  DCHECK(!IsLeafMethod());
4282}
4283
4284void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4285  // Nothing to do, this should be removed during prepare for register allocator.
4286  UNUSED(instruction);
4287  LOG(FATAL) << "Unreachable";
4288}
4289
4290void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4291  // Nothing to do, this should be removed during prepare for register allocator.
4292  UNUSED(instruction);
4293  LOG(FATAL) << "Unreachable";
4294}
4295
4296}  // namespace arm
4297}  // namespace art
4298