code_generator_arm.cc revision 2bcb43111edf7bf99fe409ff3e9c76a285e54c25
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "gc/accounting/card_table.h"
24#include "intrinsics.h"
25#include "intrinsics_arm.h"
26#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "thread.h"
29#include "utils/arm/assembler_arm.h"
30#include "utils/arm/managed_register_arm.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35
36namespace arm {
37
38static bool ExpectedPairLayout(Location location) {
39  // We expected this for both core and fpu register pairs.
40  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
41}
42
43static constexpr int kCurrentMethodStackOffset = 0;
44static constexpr Register kMethodRegisterArgument = R0;
45
46// We unconditionally allocate R5 to ensure we can do long operations
47// with baseline.
48static constexpr Register kCoreSavedRegisterForBaseline = R5;
49static constexpr Register kCoreCalleeSaves[] =
50    { R5, R6, R7, R8, R10, R11, PC };
51static constexpr SRegister kFpuCalleeSaves[] =
52    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
53
54// D31 cannot be split into two S registers, and the register allocator only works on
55// S registers. Therefore there is no need to block it.
56static constexpr DRegister DTMP = D31;
57
58#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
59#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
60
61class NullCheckSlowPathARM : public SlowPathCodeARM {
62 public:
63  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
64
65  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
66    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
67    __ Bind(GetEntryLabel());
68    arm_codegen->InvokeRuntime(
69        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
70  }
71
72  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
73
74 private:
75  HNullCheck* const instruction_;
76  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
77};
78
79class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
80 public:
81  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
82
83  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
84    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
85    __ Bind(GetEntryLabel());
86    arm_codegen->InvokeRuntime(
87        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
88  }
89
90  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
91
92 private:
93  HDivZeroCheck* const instruction_;
94  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
95};
96
97class SuspendCheckSlowPathARM : public SlowPathCodeARM {
98 public:
99  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
100      : instruction_(instruction), successor_(successor) {}
101
102  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
103    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
104    __ Bind(GetEntryLabel());
105    SaveLiveRegisters(codegen, instruction_->GetLocations());
106    arm_codegen->InvokeRuntime(
107        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
108    RestoreLiveRegisters(codegen, instruction_->GetLocations());
109    if (successor_ == nullptr) {
110      __ b(GetReturnLabel());
111    } else {
112      __ b(arm_codegen->GetLabelOf(successor_));
113    }
114  }
115
116  Label* GetReturnLabel() {
117    DCHECK(successor_ == nullptr);
118    return &return_label_;
119  }
120
121  HBasicBlock* GetSuccessor() const {
122    return successor_;
123  }
124
125  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; }
126
127 private:
128  HSuspendCheck* const instruction_;
129  // If not null, the block to branch to after the suspend check.
130  HBasicBlock* const successor_;
131
132  // If `successor_` is null, the label to branch to after the suspend check.
133  Label return_label_;
134
135  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
136};
137
138class BoundsCheckSlowPathARM : public SlowPathCodeARM {
139 public:
140  BoundsCheckSlowPathARM(HBoundsCheck* instruction,
141                         Location index_location,
142                         Location length_location)
143      : instruction_(instruction),
144        index_location_(index_location),
145        length_location_(length_location) {}
146
147  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
148    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
149    __ Bind(GetEntryLabel());
150    // We're moving two locations to locations that could overlap, so we need a parallel
151    // move resolver.
152    InvokeRuntimeCallingConvention calling_convention;
153    codegen->EmitParallelMoves(
154        index_location_,
155        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
156        Primitive::kPrimInt,
157        length_location_,
158        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
159        Primitive::kPrimInt);
160    arm_codegen->InvokeRuntime(
161        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
162  }
163
164  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
165
166 private:
167  HBoundsCheck* const instruction_;
168  const Location index_location_;
169  const Location length_location_;
170
171  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
172};
173
174class LoadClassSlowPathARM : public SlowPathCodeARM {
175 public:
176  LoadClassSlowPathARM(HLoadClass* cls,
177                       HInstruction* at,
178                       uint32_t dex_pc,
179                       bool do_clinit)
180      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
181    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
182  }
183
184  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
185    LocationSummary* locations = at_->GetLocations();
186
187    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
188    __ Bind(GetEntryLabel());
189    SaveLiveRegisters(codegen, locations);
190
191    InvokeRuntimeCallingConvention calling_convention;
192    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
193    int32_t entry_point_offset = do_clinit_
194        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
195        : QUICK_ENTRY_POINT(pInitializeType);
196    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
197
198    // Move the class to the desired location.
199    Location out = locations->Out();
200    if (out.IsValid()) {
201      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
202      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
203    }
204    RestoreLiveRegisters(codegen, locations);
205    __ b(GetExitLabel());
206  }
207
208  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM"; }
209
210 private:
211  // The class this slow path will load.
212  HLoadClass* const cls_;
213
214  // The instruction where this slow path is happening.
215  // (Might be the load class or an initialization check).
216  HInstruction* const at_;
217
218  // The dex PC of `at_`.
219  const uint32_t dex_pc_;
220
221  // Whether to initialize the class.
222  const bool do_clinit_;
223
224  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
225};
226
227class LoadStringSlowPathARM : public SlowPathCodeARM {
228 public:
229  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
230
231  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
232    LocationSummary* locations = instruction_->GetLocations();
233    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
234
235    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
236    __ Bind(GetEntryLabel());
237    SaveLiveRegisters(codegen, locations);
238
239    InvokeRuntimeCallingConvention calling_convention;
240    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
241    arm_codegen->InvokeRuntime(
242        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
243    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
244
245    RestoreLiveRegisters(codegen, locations);
246    __ b(GetExitLabel());
247  }
248
249  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
250
251 private:
252  HLoadString* const instruction_;
253
254  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
255};
256
257class TypeCheckSlowPathARM : public SlowPathCodeARM {
258 public:
259  TypeCheckSlowPathARM(HInstruction* instruction,
260                       Location class_to_check,
261                       Location object_class,
262                       uint32_t dex_pc)
263      : instruction_(instruction),
264        class_to_check_(class_to_check),
265        object_class_(object_class),
266        dex_pc_(dex_pc) {}
267
268  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
269    LocationSummary* locations = instruction_->GetLocations();
270    DCHECK(instruction_->IsCheckCast()
271           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
272
273    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
274    __ Bind(GetEntryLabel());
275    SaveLiveRegisters(codegen, locations);
276
277    // We're moving two locations to locations that could overlap, so we need a parallel
278    // move resolver.
279    InvokeRuntimeCallingConvention calling_convention;
280    codegen->EmitParallelMoves(
281        class_to_check_,
282        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
283        Primitive::kPrimNot,
284        object_class_,
285        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
286        Primitive::kPrimNot);
287
288    if (instruction_->IsInstanceOf()) {
289      arm_codegen->InvokeRuntime(
290          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
291      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
292    } else {
293      DCHECK(instruction_->IsCheckCast());
294      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
295    }
296
297    RestoreLiveRegisters(codegen, locations);
298    __ b(GetExitLabel());
299  }
300
301  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; }
302
303 private:
304  HInstruction* const instruction_;
305  const Location class_to_check_;
306  const Location object_class_;
307  uint32_t dex_pc_;
308
309  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
310};
311
312class DeoptimizationSlowPathARM : public SlowPathCodeARM {
313 public:
314  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
315    : instruction_(instruction) {}
316
317  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
318    __ Bind(GetEntryLabel());
319    SaveLiveRegisters(codegen, instruction_->GetLocations());
320    DCHECK(instruction_->IsDeoptimize());
321    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
322    uint32_t dex_pc = deoptimize->GetDexPc();
323    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
324    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
325  }
326
327  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
328
329 private:
330  HInstruction* const instruction_;
331  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
332};
333
334#undef __
335
336#undef __
337#define __ down_cast<ArmAssembler*>(GetAssembler())->
338
339inline Condition ARMCondition(IfCondition cond) {
340  switch (cond) {
341    case kCondEQ: return EQ;
342    case kCondNE: return NE;
343    case kCondLT: return LT;
344    case kCondLE: return LE;
345    case kCondGT: return GT;
346    case kCondGE: return GE;
347    default:
348      LOG(FATAL) << "Unknown if condition";
349  }
350  return EQ;        // Unreachable.
351}
352
353inline Condition ARMOppositeCondition(IfCondition cond) {
354  switch (cond) {
355    case kCondEQ: return NE;
356    case kCondNE: return EQ;
357    case kCondLT: return GE;
358    case kCondLE: return GT;
359    case kCondGT: return LE;
360    case kCondGE: return LT;
361    default:
362      LOG(FATAL) << "Unknown if condition";
363  }
364  return EQ;        // Unreachable.
365}
366
367void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
368  stream << Register(reg);
369}
370
371void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
372  stream << SRegister(reg);
373}
374
375size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
376  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
377  return kArmWordSize;
378}
379
380size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
381  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
382  return kArmWordSize;
383}
384
385size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
386  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
387  return kArmWordSize;
388}
389
390size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
391  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
392  return kArmWordSize;
393}
394
395CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
396                                   const ArmInstructionSetFeatures& isa_features,
397                                   const CompilerOptions& compiler_options)
398    : CodeGenerator(graph,
399                    kNumberOfCoreRegisters,
400                    kNumberOfSRegisters,
401                    kNumberOfRegisterPairs,
402                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
403                                        arraysize(kCoreCalleeSaves)),
404                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
405                                        arraysize(kFpuCalleeSaves)),
406                    compiler_options),
407      block_labels_(graph->GetArena(), 0),
408      location_builder_(graph, this),
409      instruction_visitor_(graph, this),
410      move_resolver_(graph->GetArena(), this),
411      assembler_(),
412      isa_features_(isa_features) {
413  // Save the PC register to mimic Quick.
414  AddAllocatedRegister(Location::RegisterLocation(PC));
415}
416
417void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
418  // Ensure that we fix up branches and literal loads and emit the literal pool.
419  __ FinalizeCode();
420
421  // Adjust native pc offsets in stack maps.
422  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
423    uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
424    uint32_t new_position = __ GetAdjustedPosition(old_position);
425    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
426  }
427  // Adjust native pc offsets of block labels.
428  for (size_t block_idx = 0u, end = block_order_->Size(); block_idx != end; ++block_idx) {
429    HBasicBlock* block = block_order_->Get(block_idx);
430    // Get the label directly from block_labels_ rather than through GetLabelOf() to avoid
431    // FirstNonEmptyBlock() which could lead to adjusting a label more than once.
432    DCHECK_LT(static_cast<size_t>(block->GetBlockId()), block_labels_.Size());
433    Label* block_label = &block_labels_.GetRawStorage()[block->GetBlockId()];
434    DCHECK_EQ(block_label->IsBound(), !block->IsSingleJump());
435    if (block_label->IsBound()) {
436      __ AdjustLabelPosition(block_label);
437    }
438  }
439  // Adjust pc offsets for the disassembly information.
440  if (disasm_info_ != nullptr) {
441    GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
442    frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
443    frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
444    for (auto& it : *disasm_info_->GetInstructionIntervals()) {
445      it.second.start = __ GetAdjustedPosition(it.second.start);
446      it.second.end = __ GetAdjustedPosition(it.second.end);
447    }
448    for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
449      it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
450      it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
451    }
452  }
453
454  CodeGenerator::Finalize(allocator);
455}
456
457Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
458  switch (type) {
459    case Primitive::kPrimLong: {
460      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
461      ArmManagedRegister pair =
462          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
463      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
464      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
465
466      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
467      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
468      UpdateBlockedPairRegisters();
469      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
470    }
471
472    case Primitive::kPrimByte:
473    case Primitive::kPrimBoolean:
474    case Primitive::kPrimChar:
475    case Primitive::kPrimShort:
476    case Primitive::kPrimInt:
477    case Primitive::kPrimNot: {
478      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
479      // Block all register pairs that contain `reg`.
480      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
481        ArmManagedRegister current =
482            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
483        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
484          blocked_register_pairs_[i] = true;
485        }
486      }
487      return Location::RegisterLocation(reg);
488    }
489
490    case Primitive::kPrimFloat: {
491      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
492      return Location::FpuRegisterLocation(reg);
493    }
494
495    case Primitive::kPrimDouble: {
496      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
497      DCHECK_EQ(reg % 2, 0);
498      return Location::FpuRegisterPairLocation(reg, reg + 1);
499    }
500
501    case Primitive::kPrimVoid:
502      LOG(FATAL) << "Unreachable type " << type;
503  }
504
505  return Location();
506}
507
508void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
509  // Don't allocate the dalvik style register pair passing.
510  blocked_register_pairs_[R1_R2] = true;
511
512  // Stack register, LR and PC are always reserved.
513  blocked_core_registers_[SP] = true;
514  blocked_core_registers_[LR] = true;
515  blocked_core_registers_[PC] = true;
516
517  // Reserve thread register.
518  blocked_core_registers_[TR] = true;
519
520  // Reserve temp register.
521  blocked_core_registers_[IP] = true;
522
523  if (is_baseline) {
524    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
525      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
526    }
527
528    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
529
530    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
531      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
532    }
533  }
534
535  UpdateBlockedPairRegisters();
536}
537
538void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
539  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
540    ArmManagedRegister current =
541        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
542    if (blocked_core_registers_[current.AsRegisterPairLow()]
543        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
544      blocked_register_pairs_[i] = true;
545    }
546  }
547}
548
549InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
550      : HGraphVisitor(graph),
551        assembler_(codegen->GetAssembler()),
552        codegen_(codegen) {}
553
554void CodeGeneratorARM::ComputeSpillMask() {
555  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
556  // Save one extra register for baseline. Note that on thumb2, there is no easy
557  // instruction to restore just the PC, so this actually helps both baseline
558  // and non-baseline to save and restore at least two registers at entry and exit.
559  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
560  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
561  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
562  // We use vpush and vpop for saving and restoring floating point registers, which take
563  // a SRegister and the number of registers to save/restore after that SRegister. We
564  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
565  // but in the range.
566  if (fpu_spill_mask_ != 0) {
567    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
568    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
569    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
570      fpu_spill_mask_ |= (1 << i);
571    }
572  }
573}
574
575static dwarf::Reg DWARFReg(Register reg) {
576  return dwarf::Reg::ArmCore(static_cast<int>(reg));
577}
578
579static dwarf::Reg DWARFReg(SRegister reg) {
580  return dwarf::Reg::ArmFp(static_cast<int>(reg));
581}
582
583void CodeGeneratorARM::GenerateFrameEntry() {
584  bool skip_overflow_check =
585      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
586  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
587  __ Bind(&frame_entry_label_);
588
589  if (HasEmptyFrame()) {
590    return;
591  }
592
593  if (!skip_overflow_check) {
594    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
595    __ LoadFromOffset(kLoadWord, IP, IP, 0);
596    RecordPcInfo(nullptr, 0);
597  }
598
599  // PC is in the list of callee-save to mimic Quick, but we need to push
600  // LR at entry instead.
601  uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
602  __ PushList(push_mask);
603  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
604  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
605  if (fpu_spill_mask_ != 0) {
606    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
607    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
608    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
609    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
610  }
611  int adjust = GetFrameSize() - FrameEntrySpillSize();
612  __ AddConstant(SP, -adjust);
613  __ cfi().AdjustCFAOffset(adjust);
614  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
615}
616
617void CodeGeneratorARM::GenerateFrameExit() {
618  if (HasEmptyFrame()) {
619    __ bx(LR);
620    return;
621  }
622  __ cfi().RememberState();
623  int adjust = GetFrameSize() - FrameEntrySpillSize();
624  __ AddConstant(SP, adjust);
625  __ cfi().AdjustCFAOffset(-adjust);
626  if (fpu_spill_mask_ != 0) {
627    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
628    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
629    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
630    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
631  }
632  __ PopList(core_spill_mask_);
633  __ cfi().RestoreState();
634  __ cfi().DefCFAOffset(GetFrameSize());
635}
636
637void CodeGeneratorARM::Bind(HBasicBlock* block) {
638  __ Bind(GetLabelOf(block));
639}
640
641Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
642  switch (load->GetType()) {
643    case Primitive::kPrimLong:
644    case Primitive::kPrimDouble:
645      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
646
647    case Primitive::kPrimInt:
648    case Primitive::kPrimNot:
649    case Primitive::kPrimFloat:
650      return Location::StackSlot(GetStackSlot(load->GetLocal()));
651
652    case Primitive::kPrimBoolean:
653    case Primitive::kPrimByte:
654    case Primitive::kPrimChar:
655    case Primitive::kPrimShort:
656    case Primitive::kPrimVoid:
657      LOG(FATAL) << "Unexpected type " << load->GetType();
658      UNREACHABLE();
659  }
660
661  LOG(FATAL) << "Unreachable";
662  UNREACHABLE();
663}
664
665Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
666  switch (type) {
667    case Primitive::kPrimBoolean:
668    case Primitive::kPrimByte:
669    case Primitive::kPrimChar:
670    case Primitive::kPrimShort:
671    case Primitive::kPrimInt:
672    case Primitive::kPrimNot: {
673      uint32_t index = gp_index_++;
674      uint32_t stack_index = stack_index_++;
675      if (index < calling_convention.GetNumberOfRegisters()) {
676        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
677      } else {
678        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
679      }
680    }
681
682    case Primitive::kPrimLong: {
683      uint32_t index = gp_index_;
684      uint32_t stack_index = stack_index_;
685      gp_index_ += 2;
686      stack_index_ += 2;
687      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
688        if (calling_convention.GetRegisterAt(index) == R1) {
689          // Skip R1, and use R2_R3 instead.
690          gp_index_++;
691          index++;
692        }
693      }
694      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
695        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
696                  calling_convention.GetRegisterAt(index + 1));
697        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
698                                              calling_convention.GetRegisterAt(index + 1));
699      } else {
700        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
701      }
702    }
703
704    case Primitive::kPrimFloat: {
705      uint32_t stack_index = stack_index_++;
706      if (float_index_ % 2 == 0) {
707        float_index_ = std::max(double_index_, float_index_);
708      }
709      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
710        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
711      } else {
712        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
713      }
714    }
715
716    case Primitive::kPrimDouble: {
717      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
718      uint32_t stack_index = stack_index_;
719      stack_index_ += 2;
720      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
721        uint32_t index = double_index_;
722        double_index_ += 2;
723        Location result = Location::FpuRegisterPairLocation(
724          calling_convention.GetFpuRegisterAt(index),
725          calling_convention.GetFpuRegisterAt(index + 1));
726        DCHECK(ExpectedPairLayout(result));
727        return result;
728      } else {
729        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
730      }
731    }
732
733    case Primitive::kPrimVoid:
734      LOG(FATAL) << "Unexpected parameter type " << type;
735      break;
736  }
737  return Location();
738}
739
740Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
741  switch (type) {
742    case Primitive::kPrimBoolean:
743    case Primitive::kPrimByte:
744    case Primitive::kPrimChar:
745    case Primitive::kPrimShort:
746    case Primitive::kPrimInt:
747    case Primitive::kPrimNot: {
748      return Location::RegisterLocation(R0);
749    }
750
751    case Primitive::kPrimFloat: {
752      return Location::FpuRegisterLocation(S0);
753    }
754
755    case Primitive::kPrimLong: {
756      return Location::RegisterPairLocation(R0, R1);
757    }
758
759    case Primitive::kPrimDouble: {
760      return Location::FpuRegisterPairLocation(S0, S1);
761    }
762
763    case Primitive::kPrimVoid:
764      return Location();
765  }
766
767  UNREACHABLE();
768}
769
770Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
771  return Location::RegisterLocation(kMethodRegisterArgument);
772}
773
774void CodeGeneratorARM::Move32(Location destination, Location source) {
775  if (source.Equals(destination)) {
776    return;
777  }
778  if (destination.IsRegister()) {
779    if (source.IsRegister()) {
780      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
781    } else if (source.IsFpuRegister()) {
782      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
783    } else {
784      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
785    }
786  } else if (destination.IsFpuRegister()) {
787    if (source.IsRegister()) {
788      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
789    } else if (source.IsFpuRegister()) {
790      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
791    } else {
792      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
793    }
794  } else {
795    DCHECK(destination.IsStackSlot()) << destination;
796    if (source.IsRegister()) {
797      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
798    } else if (source.IsFpuRegister()) {
799      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
800    } else {
801      DCHECK(source.IsStackSlot()) << source;
802      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
803      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
804    }
805  }
806}
807
808void CodeGeneratorARM::Move64(Location destination, Location source) {
809  if (source.Equals(destination)) {
810    return;
811  }
812  if (destination.IsRegisterPair()) {
813    if (source.IsRegisterPair()) {
814      EmitParallelMoves(
815          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
816          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
817          Primitive::kPrimInt,
818          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
819          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
820          Primitive::kPrimInt);
821    } else if (source.IsFpuRegister()) {
822      UNIMPLEMENTED(FATAL);
823    } else {
824      DCHECK(source.IsDoubleStackSlot());
825      DCHECK(ExpectedPairLayout(destination));
826      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
827                        SP, source.GetStackIndex());
828    }
829  } else if (destination.IsFpuRegisterPair()) {
830    if (source.IsDoubleStackSlot()) {
831      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
832                         SP,
833                         source.GetStackIndex());
834    } else {
835      UNIMPLEMENTED(FATAL);
836    }
837  } else {
838    DCHECK(destination.IsDoubleStackSlot());
839    if (source.IsRegisterPair()) {
840      // No conflict possible, so just do the moves.
841      if (source.AsRegisterPairLow<Register>() == R1) {
842        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
843        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
844        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
845      } else {
846        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
847                         SP, destination.GetStackIndex());
848      }
849    } else if (source.IsFpuRegisterPair()) {
850      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
851                        SP,
852                        destination.GetStackIndex());
853    } else {
854      DCHECK(source.IsDoubleStackSlot());
855      EmitParallelMoves(
856          Location::StackSlot(source.GetStackIndex()),
857          Location::StackSlot(destination.GetStackIndex()),
858          Primitive::kPrimInt,
859          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
860          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
861          Primitive::kPrimInt);
862    }
863  }
864}
865
866void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
867  LocationSummary* locations = instruction->GetLocations();
868  if (instruction->IsCurrentMethod()) {
869    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
870  } else if (locations != nullptr && locations->Out().Equals(location)) {
871    return;
872  } else if (locations != nullptr && locations->Out().IsConstant()) {
873    HConstant* const_to_move = locations->Out().GetConstant();
874    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
875      int32_t value = GetInt32ValueOf(const_to_move);
876      if (location.IsRegister()) {
877        __ LoadImmediate(location.AsRegister<Register>(), value);
878      } else {
879        DCHECK(location.IsStackSlot());
880        __ LoadImmediate(IP, value);
881        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
882      }
883    } else {
884      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
885      int64_t value = const_to_move->AsLongConstant()->GetValue();
886      if (location.IsRegisterPair()) {
887        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
888        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
889      } else {
890        DCHECK(location.IsDoubleStackSlot());
891        __ LoadImmediate(IP, Low32Bits(value));
892        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
893        __ LoadImmediate(IP, High32Bits(value));
894        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
895      }
896    }
897  } else if (instruction->IsLoadLocal()) {
898    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
899    switch (instruction->GetType()) {
900      case Primitive::kPrimBoolean:
901      case Primitive::kPrimByte:
902      case Primitive::kPrimChar:
903      case Primitive::kPrimShort:
904      case Primitive::kPrimInt:
905      case Primitive::kPrimNot:
906      case Primitive::kPrimFloat:
907        Move32(location, Location::StackSlot(stack_slot));
908        break;
909
910      case Primitive::kPrimLong:
911      case Primitive::kPrimDouble:
912        Move64(location, Location::DoubleStackSlot(stack_slot));
913        break;
914
915      default:
916        LOG(FATAL) << "Unexpected type " << instruction->GetType();
917    }
918  } else if (instruction->IsTemporary()) {
919    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
920    if (temp_location.IsStackSlot()) {
921      Move32(location, temp_location);
922    } else {
923      DCHECK(temp_location.IsDoubleStackSlot());
924      Move64(location, temp_location);
925    }
926  } else {
927    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
928    switch (instruction->GetType()) {
929      case Primitive::kPrimBoolean:
930      case Primitive::kPrimByte:
931      case Primitive::kPrimChar:
932      case Primitive::kPrimShort:
933      case Primitive::kPrimNot:
934      case Primitive::kPrimInt:
935      case Primitive::kPrimFloat:
936        Move32(location, locations->Out());
937        break;
938
939      case Primitive::kPrimLong:
940      case Primitive::kPrimDouble:
941        Move64(location, locations->Out());
942        break;
943
944      default:
945        LOG(FATAL) << "Unexpected type " << instruction->GetType();
946    }
947  }
948}
949
950void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
951                                     HInstruction* instruction,
952                                     uint32_t dex_pc,
953                                     SlowPathCode* slow_path) {
954  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
955  __ blx(LR);
956  RecordPcInfo(instruction, dex_pc, slow_path);
957  DCHECK(instruction->IsSuspendCheck()
958      || instruction->IsBoundsCheck()
959      || instruction->IsNullCheck()
960      || instruction->IsDivZeroCheck()
961      || instruction->GetLocations()->CanCall()
962      || !IsLeafMethod());
963}
964
965void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) {
966  DCHECK(!successor->IsExitBlock());
967
968  HBasicBlock* block = got->GetBlock();
969  HInstruction* previous = got->GetPrevious();
970
971  HLoopInformation* info = block->GetLoopInformation();
972  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
973    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
974    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
975    return;
976  }
977
978  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
979    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
980  }
981  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
982    __ b(codegen_->GetLabelOf(successor));
983  }
984}
985
986void LocationsBuilderARM::VisitGoto(HGoto* got) {
987  got->SetLocations(nullptr);
988}
989
990void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
991  HandleGoto(got, got->GetSuccessor());
992}
993
994void LocationsBuilderARM::VisitTryBoundary(HTryBoundary* try_boundary) {
995  try_boundary->SetLocations(nullptr);
996}
997
998void InstructionCodeGeneratorARM::VisitTryBoundary(HTryBoundary* try_boundary) {
999  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1000  if (!successor->IsExitBlock()) {
1001    HandleGoto(try_boundary, successor);
1002  }
1003}
1004
1005void LocationsBuilderARM::VisitExit(HExit* exit) {
1006  exit->SetLocations(nullptr);
1007}
1008
1009void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
1010  UNUSED(exit);
1011}
1012
1013void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
1014                                                        Label* true_target,
1015                                                        Label* false_target,
1016                                                        Label* always_true_target) {
1017  HInstruction* cond = instruction->InputAt(0);
1018  if (cond->IsIntConstant()) {
1019    // Constant condition, statically compared against 1.
1020    int32_t cond_value = cond->AsIntConstant()->GetValue();
1021    if (cond_value == 1) {
1022      if (always_true_target != nullptr) {
1023        __ b(always_true_target);
1024      }
1025      return;
1026    } else {
1027      DCHECK_EQ(cond_value, 0);
1028    }
1029  } else {
1030    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1031      // Condition has been materialized, compare the output to 0
1032      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
1033      __ CompareAndBranchIfNonZero(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
1034                                   true_target);
1035    } else {
1036      // Condition has not been materialized, use its inputs as the
1037      // comparison and its condition as the branch condition.
1038      LocationSummary* locations = cond->GetLocations();
1039      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
1040      Register left = locations->InAt(0).AsRegister<Register>();
1041      if (locations->InAt(1).IsRegister()) {
1042        __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1043      } else {
1044        DCHECK(locations->InAt(1).IsConstant());
1045        HConstant* constant = locations->InAt(1).GetConstant();
1046        int32_t value = CodeGenerator::GetInt32ValueOf(constant);
1047        ShifterOperand operand;
1048        if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1049          __ cmp(left, operand);
1050        } else {
1051          Register temp = IP;
1052          __ LoadImmediate(temp, value);
1053          __ cmp(left, ShifterOperand(temp));
1054        }
1055      }
1056      __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
1057    }
1058  }
1059  if (false_target != nullptr) {
1060    __ b(false_target);
1061  }
1062}
1063
1064void LocationsBuilderARM::VisitIf(HIf* if_instr) {
1065  LocationSummary* locations =
1066      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
1067  HInstruction* cond = if_instr->InputAt(0);
1068  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1069    locations->SetInAt(0, Location::RequiresRegister());
1070  }
1071}
1072
1073void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1074  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1075  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1076  Label* always_true_target = true_target;
1077  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1078                                if_instr->IfTrueSuccessor())) {
1079    always_true_target = nullptr;
1080  }
1081  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1082                                if_instr->IfFalseSuccessor())) {
1083    false_target = nullptr;
1084  }
1085  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1086}
1087
1088void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1089  LocationSummary* locations = new (GetGraph()->GetArena())
1090      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1091  HInstruction* cond = deoptimize->InputAt(0);
1092  DCHECK(cond->IsCondition());
1093  if (cond->AsCondition()->NeedsMaterialization()) {
1094    locations->SetInAt(0, Location::RequiresRegister());
1095  }
1096}
1097
1098void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1099  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
1100      DeoptimizationSlowPathARM(deoptimize);
1101  codegen_->AddSlowPath(slow_path);
1102  Label* slow_path_entry = slow_path->GetEntryLabel();
1103  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1104}
1105
1106void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1107  LocationSummary* locations =
1108      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1109  locations->SetInAt(0, Location::RequiresRegister());
1110  locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1111  if (cond->NeedsMaterialization()) {
1112    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1113  }
1114}
1115
1116void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1117  if (!cond->NeedsMaterialization()) return;
1118  LocationSummary* locations = cond->GetLocations();
1119  Register left = locations->InAt(0).AsRegister<Register>();
1120
1121  if (locations->InAt(1).IsRegister()) {
1122    __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1123  } else {
1124    DCHECK(locations->InAt(1).IsConstant());
1125    int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
1126    ShifterOperand operand;
1127    if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1128      __ cmp(left, operand);
1129    } else {
1130      Register temp = IP;
1131      __ LoadImmediate(temp, value);
1132      __ cmp(left, ShifterOperand(temp));
1133    }
1134  }
1135  __ it(ARMCondition(cond->GetCondition()), kItElse);
1136  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1137         ARMCondition(cond->GetCondition()));
1138  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1139         ARMOppositeCondition(cond->GetCondition()));
1140}
1141
1142void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1143  VisitCondition(comp);
1144}
1145
1146void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1147  VisitCondition(comp);
1148}
1149
1150void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1151  VisitCondition(comp);
1152}
1153
1154void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1155  VisitCondition(comp);
1156}
1157
1158void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1159  VisitCondition(comp);
1160}
1161
1162void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1163  VisitCondition(comp);
1164}
1165
1166void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1167  VisitCondition(comp);
1168}
1169
1170void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1171  VisitCondition(comp);
1172}
1173
1174void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1175  VisitCondition(comp);
1176}
1177
1178void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1179  VisitCondition(comp);
1180}
1181
1182void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1183  VisitCondition(comp);
1184}
1185
1186void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1187  VisitCondition(comp);
1188}
1189
1190void LocationsBuilderARM::VisitLocal(HLocal* local) {
1191  local->SetLocations(nullptr);
1192}
1193
1194void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1195  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1196}
1197
1198void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1199  load->SetLocations(nullptr);
1200}
1201
1202void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1203  // Nothing to do, this is driven by the code generator.
1204  UNUSED(load);
1205}
1206
1207void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1208  LocationSummary* locations =
1209      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1210  switch (store->InputAt(1)->GetType()) {
1211    case Primitive::kPrimBoolean:
1212    case Primitive::kPrimByte:
1213    case Primitive::kPrimChar:
1214    case Primitive::kPrimShort:
1215    case Primitive::kPrimInt:
1216    case Primitive::kPrimNot:
1217    case Primitive::kPrimFloat:
1218      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1219      break;
1220
1221    case Primitive::kPrimLong:
1222    case Primitive::kPrimDouble:
1223      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1224      break;
1225
1226    default:
1227      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1228  }
1229}
1230
1231void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1232  UNUSED(store);
1233}
1234
1235void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1236  LocationSummary* locations =
1237      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1238  locations->SetOut(Location::ConstantLocation(constant));
1239}
1240
1241void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1242  // Will be generated at use site.
1243  UNUSED(constant);
1244}
1245
1246void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1247  LocationSummary* locations =
1248      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1249  locations->SetOut(Location::ConstantLocation(constant));
1250}
1251
1252void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1253  // Will be generated at use site.
1254  UNUSED(constant);
1255}
1256
1257void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1258  LocationSummary* locations =
1259      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1260  locations->SetOut(Location::ConstantLocation(constant));
1261}
1262
1263void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1264  // Will be generated at use site.
1265  UNUSED(constant);
1266}
1267
1268void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1269  LocationSummary* locations =
1270      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1271  locations->SetOut(Location::ConstantLocation(constant));
1272}
1273
1274void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1275  // Will be generated at use site.
1276  UNUSED(constant);
1277}
1278
1279void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1280  LocationSummary* locations =
1281      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1282  locations->SetOut(Location::ConstantLocation(constant));
1283}
1284
1285void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1286  // Will be generated at use site.
1287  UNUSED(constant);
1288}
1289
1290void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1291  memory_barrier->SetLocations(nullptr);
1292}
1293
1294void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1295  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1296}
1297
1298void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1299  ret->SetLocations(nullptr);
1300}
1301
1302void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1303  UNUSED(ret);
1304  codegen_->GenerateFrameExit();
1305}
1306
1307void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1308  LocationSummary* locations =
1309      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1310  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1311}
1312
1313void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1314  UNUSED(ret);
1315  codegen_->GenerateFrameExit();
1316}
1317
1318void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1319  // When we do not run baseline, explicit clinit checks triggered by static
1320  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1321  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1322
1323  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1324                                         codegen_->GetInstructionSetFeatures());
1325  if (intrinsic.TryDispatch(invoke)) {
1326    return;
1327  }
1328
1329  HandleInvoke(invoke);
1330}
1331
1332static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1333  if (invoke->GetLocations()->Intrinsified()) {
1334    IntrinsicCodeGeneratorARM intrinsic(codegen);
1335    intrinsic.Dispatch(invoke);
1336    return true;
1337  }
1338  return false;
1339}
1340
1341void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1342  // When we do not run baseline, explicit clinit checks triggered by static
1343  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1344  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1345
1346  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1347    return;
1348  }
1349
1350  LocationSummary* locations = invoke->GetLocations();
1351  codegen_->GenerateStaticOrDirectCall(
1352      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
1353  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1354}
1355
1356void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1357  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1358  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
1359}
1360
1361void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1362  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1363                                         codegen_->GetInstructionSetFeatures());
1364  if (intrinsic.TryDispatch(invoke)) {
1365    return;
1366  }
1367
1368  HandleInvoke(invoke);
1369}
1370
1371void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1372  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1373    return;
1374  }
1375
1376  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1377  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
1378      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
1379  LocationSummary* locations = invoke->GetLocations();
1380  Location receiver = locations->InAt(0);
1381  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1382  // temp = object->GetClass();
1383  DCHECK(receiver.IsRegister());
1384  __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1385  codegen_->MaybeRecordImplicitNullCheck(invoke);
1386  // temp = temp->GetMethodAt(method_offset);
1387  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1388      kArmWordSize).Int32Value();
1389  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1390  // LR = temp->GetEntryPoint();
1391  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1392  // LR();
1393  __ blx(LR);
1394  DCHECK(!codegen_->IsLeafMethod());
1395  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1396}
1397
1398void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1399  HandleInvoke(invoke);
1400  // Add the hidden argument.
1401  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1402}
1403
1404void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1405  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1406  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1407  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1408      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1409  LocationSummary* locations = invoke->GetLocations();
1410  Location receiver = locations->InAt(0);
1411  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1412
1413  // Set the hidden argument.
1414  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1415                   invoke->GetDexMethodIndex());
1416
1417  // temp = object->GetClass();
1418  if (receiver.IsStackSlot()) {
1419    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1420    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1421  } else {
1422    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1423  }
1424  codegen_->MaybeRecordImplicitNullCheck(invoke);
1425  // temp = temp->GetImtEntryAt(method_offset);
1426  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1427      kArmWordSize).Int32Value();
1428  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1429  // LR = temp->GetEntryPoint();
1430  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1431  // LR();
1432  __ blx(LR);
1433  DCHECK(!codegen_->IsLeafMethod());
1434  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1435}
1436
1437void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1438  LocationSummary* locations =
1439      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1440  switch (neg->GetResultType()) {
1441    case Primitive::kPrimInt: {
1442      locations->SetInAt(0, Location::RequiresRegister());
1443      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1444      break;
1445    }
1446    case Primitive::kPrimLong: {
1447      locations->SetInAt(0, Location::RequiresRegister());
1448      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1449      break;
1450    }
1451
1452    case Primitive::kPrimFloat:
1453    case Primitive::kPrimDouble:
1454      locations->SetInAt(0, Location::RequiresFpuRegister());
1455      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1456      break;
1457
1458    default:
1459      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1460  }
1461}
1462
1463void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1464  LocationSummary* locations = neg->GetLocations();
1465  Location out = locations->Out();
1466  Location in = locations->InAt(0);
1467  switch (neg->GetResultType()) {
1468    case Primitive::kPrimInt:
1469      DCHECK(in.IsRegister());
1470      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1471      break;
1472
1473    case Primitive::kPrimLong:
1474      DCHECK(in.IsRegisterPair());
1475      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1476      __ rsbs(out.AsRegisterPairLow<Register>(),
1477              in.AsRegisterPairLow<Register>(),
1478              ShifterOperand(0));
1479      // We cannot emit an RSC (Reverse Subtract with Carry)
1480      // instruction here, as it does not exist in the Thumb-2
1481      // instruction set.  We use the following approach
1482      // using SBC and SUB instead.
1483      //
1484      // out.hi = -C
1485      __ sbc(out.AsRegisterPairHigh<Register>(),
1486             out.AsRegisterPairHigh<Register>(),
1487             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1488      // out.hi = out.hi - in.hi
1489      __ sub(out.AsRegisterPairHigh<Register>(),
1490             out.AsRegisterPairHigh<Register>(),
1491             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1492      break;
1493
1494    case Primitive::kPrimFloat:
1495      DCHECK(in.IsFpuRegister());
1496      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1497      break;
1498
1499    case Primitive::kPrimDouble:
1500      DCHECK(in.IsFpuRegisterPair());
1501      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1502               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1503      break;
1504
1505    default:
1506      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1507  }
1508}
1509
1510void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1511  Primitive::Type result_type = conversion->GetResultType();
1512  Primitive::Type input_type = conversion->GetInputType();
1513  DCHECK_NE(result_type, input_type);
1514
1515  // The float-to-long, double-to-long and long-to-float type conversions
1516  // rely on a call to the runtime.
1517  LocationSummary::CallKind call_kind =
1518      (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1519        && result_type == Primitive::kPrimLong)
1520       || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
1521      ? LocationSummary::kCall
1522      : LocationSummary::kNoCall;
1523  LocationSummary* locations =
1524      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1525
1526  // The Java language does not allow treating boolean as an integral type but
1527  // our bit representation makes it safe.
1528
1529  switch (result_type) {
1530    case Primitive::kPrimByte:
1531      switch (input_type) {
1532        case Primitive::kPrimBoolean:
1533          // Boolean input is a result of code transformations.
1534        case Primitive::kPrimShort:
1535        case Primitive::kPrimInt:
1536        case Primitive::kPrimChar:
1537          // Processing a Dex `int-to-byte' instruction.
1538          locations->SetInAt(0, Location::RequiresRegister());
1539          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1540          break;
1541
1542        default:
1543          LOG(FATAL) << "Unexpected type conversion from " << input_type
1544                     << " to " << result_type;
1545      }
1546      break;
1547
1548    case Primitive::kPrimShort:
1549      switch (input_type) {
1550        case Primitive::kPrimBoolean:
1551          // Boolean input is a result of code transformations.
1552        case Primitive::kPrimByte:
1553        case Primitive::kPrimInt:
1554        case Primitive::kPrimChar:
1555          // Processing a Dex `int-to-short' instruction.
1556          locations->SetInAt(0, Location::RequiresRegister());
1557          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1558          break;
1559
1560        default:
1561          LOG(FATAL) << "Unexpected type conversion from " << input_type
1562                     << " to " << result_type;
1563      }
1564      break;
1565
1566    case Primitive::kPrimInt:
1567      switch (input_type) {
1568        case Primitive::kPrimLong:
1569          // Processing a Dex `long-to-int' instruction.
1570          locations->SetInAt(0, Location::Any());
1571          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1572          break;
1573
1574        case Primitive::kPrimFloat:
1575          // Processing a Dex `float-to-int' instruction.
1576          locations->SetInAt(0, Location::RequiresFpuRegister());
1577          locations->SetOut(Location::RequiresRegister());
1578          locations->AddTemp(Location::RequiresFpuRegister());
1579          break;
1580
1581        case Primitive::kPrimDouble:
1582          // Processing a Dex `double-to-int' instruction.
1583          locations->SetInAt(0, Location::RequiresFpuRegister());
1584          locations->SetOut(Location::RequiresRegister());
1585          locations->AddTemp(Location::RequiresFpuRegister());
1586          break;
1587
1588        default:
1589          LOG(FATAL) << "Unexpected type conversion from " << input_type
1590                     << " to " << result_type;
1591      }
1592      break;
1593
1594    case Primitive::kPrimLong:
1595      switch (input_type) {
1596        case Primitive::kPrimBoolean:
1597          // Boolean input is a result of code transformations.
1598        case Primitive::kPrimByte:
1599        case Primitive::kPrimShort:
1600        case Primitive::kPrimInt:
1601        case Primitive::kPrimChar:
1602          // Processing a Dex `int-to-long' instruction.
1603          locations->SetInAt(0, Location::RequiresRegister());
1604          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1605          break;
1606
1607        case Primitive::kPrimFloat: {
1608          // Processing a Dex `float-to-long' instruction.
1609          InvokeRuntimeCallingConvention calling_convention;
1610          locations->SetInAt(0, Location::FpuRegisterLocation(
1611              calling_convention.GetFpuRegisterAt(0)));
1612          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1613          break;
1614        }
1615
1616        case Primitive::kPrimDouble: {
1617          // Processing a Dex `double-to-long' instruction.
1618          InvokeRuntimeCallingConvention calling_convention;
1619          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1620              calling_convention.GetFpuRegisterAt(0),
1621              calling_convention.GetFpuRegisterAt(1)));
1622          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1623          break;
1624        }
1625
1626        default:
1627          LOG(FATAL) << "Unexpected type conversion from " << input_type
1628                     << " to " << result_type;
1629      }
1630      break;
1631
1632    case Primitive::kPrimChar:
1633      switch (input_type) {
1634        case Primitive::kPrimBoolean:
1635          // Boolean input is a result of code transformations.
1636        case Primitive::kPrimByte:
1637        case Primitive::kPrimShort:
1638        case Primitive::kPrimInt:
1639          // Processing a Dex `int-to-char' instruction.
1640          locations->SetInAt(0, Location::RequiresRegister());
1641          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1642          break;
1643
1644        default:
1645          LOG(FATAL) << "Unexpected type conversion from " << input_type
1646                     << " to " << result_type;
1647      }
1648      break;
1649
1650    case Primitive::kPrimFloat:
1651      switch (input_type) {
1652        case Primitive::kPrimBoolean:
1653          // Boolean input is a result of code transformations.
1654        case Primitive::kPrimByte:
1655        case Primitive::kPrimShort:
1656        case Primitive::kPrimInt:
1657        case Primitive::kPrimChar:
1658          // Processing a Dex `int-to-float' instruction.
1659          locations->SetInAt(0, Location::RequiresRegister());
1660          locations->SetOut(Location::RequiresFpuRegister());
1661          break;
1662
1663        case Primitive::kPrimLong: {
1664          // Processing a Dex `long-to-float' instruction.
1665          InvokeRuntimeCallingConvention calling_convention;
1666          locations->SetInAt(0, Location::RegisterPairLocation(
1667              calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
1668          locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1669          break;
1670        }
1671
1672        case Primitive::kPrimDouble:
1673          // Processing a Dex `double-to-float' instruction.
1674          locations->SetInAt(0, Location::RequiresFpuRegister());
1675          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1676          break;
1677
1678        default:
1679          LOG(FATAL) << "Unexpected type conversion from " << input_type
1680                     << " to " << result_type;
1681      };
1682      break;
1683
1684    case Primitive::kPrimDouble:
1685      switch (input_type) {
1686        case Primitive::kPrimBoolean:
1687          // Boolean input is a result of code transformations.
1688        case Primitive::kPrimByte:
1689        case Primitive::kPrimShort:
1690        case Primitive::kPrimInt:
1691        case Primitive::kPrimChar:
1692          // Processing a Dex `int-to-double' instruction.
1693          locations->SetInAt(0, Location::RequiresRegister());
1694          locations->SetOut(Location::RequiresFpuRegister());
1695          break;
1696
1697        case Primitive::kPrimLong:
1698          // Processing a Dex `long-to-double' instruction.
1699          locations->SetInAt(0, Location::RequiresRegister());
1700          locations->SetOut(Location::RequiresFpuRegister());
1701          locations->AddTemp(Location::RequiresFpuRegister());
1702          locations->AddTemp(Location::RequiresFpuRegister());
1703          break;
1704
1705        case Primitive::kPrimFloat:
1706          // Processing a Dex `float-to-double' instruction.
1707          locations->SetInAt(0, Location::RequiresFpuRegister());
1708          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1709          break;
1710
1711        default:
1712          LOG(FATAL) << "Unexpected type conversion from " << input_type
1713                     << " to " << result_type;
1714      };
1715      break;
1716
1717    default:
1718      LOG(FATAL) << "Unexpected type conversion from " << input_type
1719                 << " to " << result_type;
1720  }
1721}
1722
1723void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1724  LocationSummary* locations = conversion->GetLocations();
1725  Location out = locations->Out();
1726  Location in = locations->InAt(0);
1727  Primitive::Type result_type = conversion->GetResultType();
1728  Primitive::Type input_type = conversion->GetInputType();
1729  DCHECK_NE(result_type, input_type);
1730  switch (result_type) {
1731    case Primitive::kPrimByte:
1732      switch (input_type) {
1733        case Primitive::kPrimBoolean:
1734          // Boolean input is a result of code transformations.
1735        case Primitive::kPrimShort:
1736        case Primitive::kPrimInt:
1737        case Primitive::kPrimChar:
1738          // Processing a Dex `int-to-byte' instruction.
1739          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
1740          break;
1741
1742        default:
1743          LOG(FATAL) << "Unexpected type conversion from " << input_type
1744                     << " to " << result_type;
1745      }
1746      break;
1747
1748    case Primitive::kPrimShort:
1749      switch (input_type) {
1750        case Primitive::kPrimBoolean:
1751          // Boolean input is a result of code transformations.
1752        case Primitive::kPrimByte:
1753        case Primitive::kPrimInt:
1754        case Primitive::kPrimChar:
1755          // Processing a Dex `int-to-short' instruction.
1756          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1757          break;
1758
1759        default:
1760          LOG(FATAL) << "Unexpected type conversion from " << input_type
1761                     << " to " << result_type;
1762      }
1763      break;
1764
1765    case Primitive::kPrimInt:
1766      switch (input_type) {
1767        case Primitive::kPrimLong:
1768          // Processing a Dex `long-to-int' instruction.
1769          DCHECK(out.IsRegister());
1770          if (in.IsRegisterPair()) {
1771            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
1772          } else if (in.IsDoubleStackSlot()) {
1773            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
1774          } else {
1775            DCHECK(in.IsConstant());
1776            DCHECK(in.GetConstant()->IsLongConstant());
1777            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
1778            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
1779          }
1780          break;
1781
1782        case Primitive::kPrimFloat: {
1783          // Processing a Dex `float-to-int' instruction.
1784          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1785          __ vmovs(temp, in.AsFpuRegister<SRegister>());
1786          __ vcvtis(temp, temp);
1787          __ vmovrs(out.AsRegister<Register>(), temp);
1788          break;
1789        }
1790
1791        case Primitive::kPrimDouble: {
1792          // Processing a Dex `double-to-int' instruction.
1793          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1794          DRegister temp_d = FromLowSToD(temp_s);
1795          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1796          __ vcvtid(temp_s, temp_d);
1797          __ vmovrs(out.AsRegister<Register>(), temp_s);
1798          break;
1799        }
1800
1801        default:
1802          LOG(FATAL) << "Unexpected type conversion from " << input_type
1803                     << " to " << result_type;
1804      }
1805      break;
1806
1807    case Primitive::kPrimLong:
1808      switch (input_type) {
1809        case Primitive::kPrimBoolean:
1810          // Boolean input is a result of code transformations.
1811        case Primitive::kPrimByte:
1812        case Primitive::kPrimShort:
1813        case Primitive::kPrimInt:
1814        case Primitive::kPrimChar:
1815          // Processing a Dex `int-to-long' instruction.
1816          DCHECK(out.IsRegisterPair());
1817          DCHECK(in.IsRegister());
1818          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
1819          // Sign extension.
1820          __ Asr(out.AsRegisterPairHigh<Register>(),
1821                 out.AsRegisterPairLow<Register>(),
1822                 31);
1823          break;
1824
1825        case Primitive::kPrimFloat:
1826          // Processing a Dex `float-to-long' instruction.
1827          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
1828                                  conversion,
1829                                  conversion->GetDexPc(),
1830                                  nullptr);
1831          break;
1832
1833        case Primitive::kPrimDouble:
1834          // Processing a Dex `double-to-long' instruction.
1835          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
1836                                  conversion,
1837                                  conversion->GetDexPc(),
1838                                  nullptr);
1839          break;
1840
1841        default:
1842          LOG(FATAL) << "Unexpected type conversion from " << input_type
1843                     << " to " << result_type;
1844      }
1845      break;
1846
1847    case Primitive::kPrimChar:
1848      switch (input_type) {
1849        case Primitive::kPrimBoolean:
1850          // Boolean input is a result of code transformations.
1851        case Primitive::kPrimByte:
1852        case Primitive::kPrimShort:
1853        case Primitive::kPrimInt:
1854          // Processing a Dex `int-to-char' instruction.
1855          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1856          break;
1857
1858        default:
1859          LOG(FATAL) << "Unexpected type conversion from " << input_type
1860                     << " to " << result_type;
1861      }
1862      break;
1863
1864    case Primitive::kPrimFloat:
1865      switch (input_type) {
1866        case Primitive::kPrimBoolean:
1867          // Boolean input is a result of code transformations.
1868        case Primitive::kPrimByte:
1869        case Primitive::kPrimShort:
1870        case Primitive::kPrimInt:
1871        case Primitive::kPrimChar: {
1872          // Processing a Dex `int-to-float' instruction.
1873          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
1874          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
1875          break;
1876        }
1877
1878        case Primitive::kPrimLong:
1879          // Processing a Dex `long-to-float' instruction.
1880          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
1881                                  conversion,
1882                                  conversion->GetDexPc(),
1883                                  nullptr);
1884          break;
1885
1886        case Primitive::kPrimDouble:
1887          // Processing a Dex `double-to-float' instruction.
1888          __ vcvtsd(out.AsFpuRegister<SRegister>(),
1889                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1890          break;
1891
1892        default:
1893          LOG(FATAL) << "Unexpected type conversion from " << input_type
1894                     << " to " << result_type;
1895      };
1896      break;
1897
1898    case Primitive::kPrimDouble:
1899      switch (input_type) {
1900        case Primitive::kPrimBoolean:
1901          // Boolean input is a result of code transformations.
1902        case Primitive::kPrimByte:
1903        case Primitive::kPrimShort:
1904        case Primitive::kPrimInt:
1905        case Primitive::kPrimChar: {
1906          // Processing a Dex `int-to-double' instruction.
1907          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
1908          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1909                    out.AsFpuRegisterPairLow<SRegister>());
1910          break;
1911        }
1912
1913        case Primitive::kPrimLong: {
1914          // Processing a Dex `long-to-double' instruction.
1915          Register low = in.AsRegisterPairLow<Register>();
1916          Register high = in.AsRegisterPairHigh<Register>();
1917          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
1918          DRegister out_d = FromLowSToD(out_s);
1919          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1920          DRegister temp_d = FromLowSToD(temp_s);
1921          SRegister constant_s = locations->GetTemp(1).AsFpuRegisterPairLow<SRegister>();
1922          DRegister constant_d = FromLowSToD(constant_s);
1923
1924          // temp_d = int-to-double(high)
1925          __ vmovsr(temp_s, high);
1926          __ vcvtdi(temp_d, temp_s);
1927          // constant_d = k2Pow32EncodingForDouble
1928          __ LoadDImmediate(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
1929          // out_d = unsigned-to-double(low)
1930          __ vmovsr(out_s, low);
1931          __ vcvtdu(out_d, out_s);
1932          // out_d += temp_d * constant_d
1933          __ vmlad(out_d, temp_d, constant_d);
1934          break;
1935        }
1936
1937        case Primitive::kPrimFloat:
1938          // Processing a Dex `float-to-double' instruction.
1939          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1940                    in.AsFpuRegister<SRegister>());
1941          break;
1942
1943        default:
1944          LOG(FATAL) << "Unexpected type conversion from " << input_type
1945                     << " to " << result_type;
1946      };
1947      break;
1948
1949    default:
1950      LOG(FATAL) << "Unexpected type conversion from " << input_type
1951                 << " to " << result_type;
1952  }
1953}
1954
1955void LocationsBuilderARM::VisitAdd(HAdd* add) {
1956  LocationSummary* locations =
1957      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
1958  switch (add->GetResultType()) {
1959    case Primitive::kPrimInt: {
1960      locations->SetInAt(0, Location::RequiresRegister());
1961      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
1962      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1963      break;
1964    }
1965
1966    case Primitive::kPrimLong: {
1967      locations->SetInAt(0, Location::RequiresRegister());
1968      locations->SetInAt(1, Location::RequiresRegister());
1969      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1970      break;
1971    }
1972
1973    case Primitive::kPrimFloat:
1974    case Primitive::kPrimDouble: {
1975      locations->SetInAt(0, Location::RequiresFpuRegister());
1976      locations->SetInAt(1, Location::RequiresFpuRegister());
1977      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1978      break;
1979    }
1980
1981    default:
1982      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
1983  }
1984}
1985
1986void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
1987  LocationSummary* locations = add->GetLocations();
1988  Location out = locations->Out();
1989  Location first = locations->InAt(0);
1990  Location second = locations->InAt(1);
1991  switch (add->GetResultType()) {
1992    case Primitive::kPrimInt:
1993      if (second.IsRegister()) {
1994        __ add(out.AsRegister<Register>(),
1995               first.AsRegister<Register>(),
1996               ShifterOperand(second.AsRegister<Register>()));
1997      } else {
1998        __ AddConstant(out.AsRegister<Register>(),
1999                       first.AsRegister<Register>(),
2000                       second.GetConstant()->AsIntConstant()->GetValue());
2001      }
2002      break;
2003
2004    case Primitive::kPrimLong: {
2005      DCHECK(second.IsRegisterPair());
2006      __ adds(out.AsRegisterPairLow<Register>(),
2007              first.AsRegisterPairLow<Register>(),
2008              ShifterOperand(second.AsRegisterPairLow<Register>()));
2009      __ adc(out.AsRegisterPairHigh<Register>(),
2010             first.AsRegisterPairHigh<Register>(),
2011             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2012      break;
2013    }
2014
2015    case Primitive::kPrimFloat:
2016      __ vadds(out.AsFpuRegister<SRegister>(),
2017               first.AsFpuRegister<SRegister>(),
2018               second.AsFpuRegister<SRegister>());
2019      break;
2020
2021    case Primitive::kPrimDouble:
2022      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2023               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2024               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2025      break;
2026
2027    default:
2028      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2029  }
2030}
2031
2032void LocationsBuilderARM::VisitSub(HSub* sub) {
2033  LocationSummary* locations =
2034      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2035  switch (sub->GetResultType()) {
2036    case Primitive::kPrimInt: {
2037      locations->SetInAt(0, Location::RequiresRegister());
2038      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2039      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2040      break;
2041    }
2042
2043    case Primitive::kPrimLong: {
2044      locations->SetInAt(0, Location::RequiresRegister());
2045      locations->SetInAt(1, Location::RequiresRegister());
2046      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2047      break;
2048    }
2049    case Primitive::kPrimFloat:
2050    case Primitive::kPrimDouble: {
2051      locations->SetInAt(0, Location::RequiresFpuRegister());
2052      locations->SetInAt(1, Location::RequiresFpuRegister());
2053      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2054      break;
2055    }
2056    default:
2057      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2058  }
2059}
2060
2061void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2062  LocationSummary* locations = sub->GetLocations();
2063  Location out = locations->Out();
2064  Location first = locations->InAt(0);
2065  Location second = locations->InAt(1);
2066  switch (sub->GetResultType()) {
2067    case Primitive::kPrimInt: {
2068      if (second.IsRegister()) {
2069        __ sub(out.AsRegister<Register>(),
2070               first.AsRegister<Register>(),
2071               ShifterOperand(second.AsRegister<Register>()));
2072      } else {
2073        __ AddConstant(out.AsRegister<Register>(),
2074                       first.AsRegister<Register>(),
2075                       -second.GetConstant()->AsIntConstant()->GetValue());
2076      }
2077      break;
2078    }
2079
2080    case Primitive::kPrimLong: {
2081      DCHECK(second.IsRegisterPair());
2082      __ subs(out.AsRegisterPairLow<Register>(),
2083              first.AsRegisterPairLow<Register>(),
2084              ShifterOperand(second.AsRegisterPairLow<Register>()));
2085      __ sbc(out.AsRegisterPairHigh<Register>(),
2086             first.AsRegisterPairHigh<Register>(),
2087             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2088      break;
2089    }
2090
2091    case Primitive::kPrimFloat: {
2092      __ vsubs(out.AsFpuRegister<SRegister>(),
2093               first.AsFpuRegister<SRegister>(),
2094               second.AsFpuRegister<SRegister>());
2095      break;
2096    }
2097
2098    case Primitive::kPrimDouble: {
2099      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2100               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2101               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2102      break;
2103    }
2104
2105
2106    default:
2107      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2108  }
2109}
2110
2111void LocationsBuilderARM::VisitMul(HMul* mul) {
2112  LocationSummary* locations =
2113      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2114  switch (mul->GetResultType()) {
2115    case Primitive::kPrimInt:
2116    case Primitive::kPrimLong:  {
2117      locations->SetInAt(0, Location::RequiresRegister());
2118      locations->SetInAt(1, Location::RequiresRegister());
2119      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2120      break;
2121    }
2122
2123    case Primitive::kPrimFloat:
2124    case Primitive::kPrimDouble: {
2125      locations->SetInAt(0, Location::RequiresFpuRegister());
2126      locations->SetInAt(1, Location::RequiresFpuRegister());
2127      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2128      break;
2129    }
2130
2131    default:
2132      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2133  }
2134}
2135
2136void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2137  LocationSummary* locations = mul->GetLocations();
2138  Location out = locations->Out();
2139  Location first = locations->InAt(0);
2140  Location second = locations->InAt(1);
2141  switch (mul->GetResultType()) {
2142    case Primitive::kPrimInt: {
2143      __ mul(out.AsRegister<Register>(),
2144             first.AsRegister<Register>(),
2145             second.AsRegister<Register>());
2146      break;
2147    }
2148    case Primitive::kPrimLong: {
2149      Register out_hi = out.AsRegisterPairHigh<Register>();
2150      Register out_lo = out.AsRegisterPairLow<Register>();
2151      Register in1_hi = first.AsRegisterPairHigh<Register>();
2152      Register in1_lo = first.AsRegisterPairLow<Register>();
2153      Register in2_hi = second.AsRegisterPairHigh<Register>();
2154      Register in2_lo = second.AsRegisterPairLow<Register>();
2155
2156      // Extra checks to protect caused by the existence of R1_R2.
2157      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2158      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2159      DCHECK_NE(out_hi, in1_lo);
2160      DCHECK_NE(out_hi, in2_lo);
2161
2162      // input: in1 - 64 bits, in2 - 64 bits
2163      // output: out
2164      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2165      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2166      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2167
2168      // IP <- in1.lo * in2.hi
2169      __ mul(IP, in1_lo, in2_hi);
2170      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2171      __ mla(out_hi, in1_hi, in2_lo, IP);
2172      // out.lo <- (in1.lo * in2.lo)[31:0];
2173      __ umull(out_lo, IP, in1_lo, in2_lo);
2174      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2175      __ add(out_hi, out_hi, ShifterOperand(IP));
2176      break;
2177    }
2178
2179    case Primitive::kPrimFloat: {
2180      __ vmuls(out.AsFpuRegister<SRegister>(),
2181               first.AsFpuRegister<SRegister>(),
2182               second.AsFpuRegister<SRegister>());
2183      break;
2184    }
2185
2186    case Primitive::kPrimDouble: {
2187      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2188               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2189               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2190      break;
2191    }
2192
2193    default:
2194      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2195  }
2196}
2197
2198void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2199  DCHECK(instruction->IsDiv() || instruction->IsRem());
2200  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2201
2202  LocationSummary* locations = instruction->GetLocations();
2203  Location second = locations->InAt(1);
2204  DCHECK(second.IsConstant());
2205
2206  Register out = locations->Out().AsRegister<Register>();
2207  Register dividend = locations->InAt(0).AsRegister<Register>();
2208  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2209  DCHECK(imm == 1 || imm == -1);
2210
2211  if (instruction->IsRem()) {
2212    __ LoadImmediate(out, 0);
2213  } else {
2214    if (imm == 1) {
2215      __ Mov(out, dividend);
2216    } else {
2217      __ rsb(out, dividend, ShifterOperand(0));
2218    }
2219  }
2220}
2221
2222void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2223  DCHECK(instruction->IsDiv() || instruction->IsRem());
2224  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2225
2226  LocationSummary* locations = instruction->GetLocations();
2227  Location second = locations->InAt(1);
2228  DCHECK(second.IsConstant());
2229
2230  Register out = locations->Out().AsRegister<Register>();
2231  Register dividend = locations->InAt(0).AsRegister<Register>();
2232  Register temp = locations->GetTemp(0).AsRegister<Register>();
2233  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2234  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2235  DCHECK(IsPowerOfTwo(abs_imm));
2236  int ctz_imm = CTZ(abs_imm);
2237
2238  if (ctz_imm == 1) {
2239    __ Lsr(temp, dividend, 32 - ctz_imm);
2240  } else {
2241    __ Asr(temp, dividend, 31);
2242    __ Lsr(temp, temp, 32 - ctz_imm);
2243  }
2244  __ add(out, temp, ShifterOperand(dividend));
2245
2246  if (instruction->IsDiv()) {
2247    __ Asr(out, out, ctz_imm);
2248    if (imm < 0) {
2249      __ rsb(out, out, ShifterOperand(0));
2250    }
2251  } else {
2252    __ ubfx(out, out, 0, ctz_imm);
2253    __ sub(out, out, ShifterOperand(temp));
2254  }
2255}
2256
2257void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2258  DCHECK(instruction->IsDiv() || instruction->IsRem());
2259  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2260
2261  LocationSummary* locations = instruction->GetLocations();
2262  Location second = locations->InAt(1);
2263  DCHECK(second.IsConstant());
2264
2265  Register out = locations->Out().AsRegister<Register>();
2266  Register dividend = locations->InAt(0).AsRegister<Register>();
2267  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2268  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2269  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2270
2271  int64_t magic;
2272  int shift;
2273  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2274
2275  __ LoadImmediate(temp1, magic);
2276  __ smull(temp2, temp1, dividend, temp1);
2277
2278  if (imm > 0 && magic < 0) {
2279    __ add(temp1, temp1, ShifterOperand(dividend));
2280  } else if (imm < 0 && magic > 0) {
2281    __ sub(temp1, temp1, ShifterOperand(dividend));
2282  }
2283
2284  if (shift != 0) {
2285    __ Asr(temp1, temp1, shift);
2286  }
2287
2288  if (instruction->IsDiv()) {
2289    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2290  } else {
2291    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2292    // TODO: Strength reduction for mls.
2293    __ LoadImmediate(temp2, imm);
2294    __ mls(out, temp1, temp2, dividend);
2295  }
2296}
2297
2298void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2299  DCHECK(instruction->IsDiv() || instruction->IsRem());
2300  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2301
2302  LocationSummary* locations = instruction->GetLocations();
2303  Location second = locations->InAt(1);
2304  DCHECK(second.IsConstant());
2305
2306  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2307  if (imm == 0) {
2308    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2309  } else if (imm == 1 || imm == -1) {
2310    DivRemOneOrMinusOne(instruction);
2311  } else if (IsPowerOfTwo(std::abs(imm))) {
2312    DivRemByPowerOfTwo(instruction);
2313  } else {
2314    DCHECK(imm <= -2 || imm >= 2);
2315    GenerateDivRemWithAnyConstant(instruction);
2316  }
2317}
2318
2319void LocationsBuilderARM::VisitDiv(HDiv* div) {
2320  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2321  if (div->GetResultType() == Primitive::kPrimLong) {
2322    // pLdiv runtime call.
2323    call_kind = LocationSummary::kCall;
2324  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2325    // sdiv will be replaced by other instruction sequence.
2326  } else if (div->GetResultType() == Primitive::kPrimInt &&
2327             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2328    // pIdivmod runtime call.
2329    call_kind = LocationSummary::kCall;
2330  }
2331
2332  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2333
2334  switch (div->GetResultType()) {
2335    case Primitive::kPrimInt: {
2336      if (div->InputAt(1)->IsConstant()) {
2337        locations->SetInAt(0, Location::RequiresRegister());
2338        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2339        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2340        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2341        if (abs_imm <= 1) {
2342          // No temp register required.
2343        } else {
2344          locations->AddTemp(Location::RequiresRegister());
2345          if (!IsPowerOfTwo(abs_imm)) {
2346            locations->AddTemp(Location::RequiresRegister());
2347          }
2348        }
2349      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2350        locations->SetInAt(0, Location::RequiresRegister());
2351        locations->SetInAt(1, Location::RequiresRegister());
2352        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2353      } else {
2354        InvokeRuntimeCallingConvention calling_convention;
2355        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2356        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2357        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2358        //       we only need the former.
2359        locations->SetOut(Location::RegisterLocation(R0));
2360      }
2361      break;
2362    }
2363    case Primitive::kPrimLong: {
2364      InvokeRuntimeCallingConvention calling_convention;
2365      locations->SetInAt(0, Location::RegisterPairLocation(
2366          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2367      locations->SetInAt(1, Location::RegisterPairLocation(
2368          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2369      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2370      break;
2371    }
2372    case Primitive::kPrimFloat:
2373    case Primitive::kPrimDouble: {
2374      locations->SetInAt(0, Location::RequiresFpuRegister());
2375      locations->SetInAt(1, Location::RequiresFpuRegister());
2376      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2377      break;
2378    }
2379
2380    default:
2381      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2382  }
2383}
2384
2385void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2386  LocationSummary* locations = div->GetLocations();
2387  Location out = locations->Out();
2388  Location first = locations->InAt(0);
2389  Location second = locations->InAt(1);
2390
2391  switch (div->GetResultType()) {
2392    case Primitive::kPrimInt: {
2393      if (second.IsConstant()) {
2394        GenerateDivRemConstantIntegral(div);
2395      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2396        __ sdiv(out.AsRegister<Register>(),
2397                first.AsRegister<Register>(),
2398                second.AsRegister<Register>());
2399      } else {
2400        InvokeRuntimeCallingConvention calling_convention;
2401        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2402        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2403        DCHECK_EQ(R0, out.AsRegister<Register>());
2404
2405        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2406      }
2407      break;
2408    }
2409
2410    case Primitive::kPrimLong: {
2411      InvokeRuntimeCallingConvention calling_convention;
2412      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2413      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2414      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2415      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2416      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2417      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2418
2419      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2420      break;
2421    }
2422
2423    case Primitive::kPrimFloat: {
2424      __ vdivs(out.AsFpuRegister<SRegister>(),
2425               first.AsFpuRegister<SRegister>(),
2426               second.AsFpuRegister<SRegister>());
2427      break;
2428    }
2429
2430    case Primitive::kPrimDouble: {
2431      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2432               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2433               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2434      break;
2435    }
2436
2437    default:
2438      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2439  }
2440}
2441
2442void LocationsBuilderARM::VisitRem(HRem* rem) {
2443  Primitive::Type type = rem->GetResultType();
2444
2445  // Most remainders are implemented in the runtime.
2446  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2447  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2448    // sdiv will be replaced by other instruction sequence.
2449    call_kind = LocationSummary::kNoCall;
2450  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2451             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2452    // Have hardware divide instruction for int, do it with three instructions.
2453    call_kind = LocationSummary::kNoCall;
2454  }
2455
2456  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2457
2458  switch (type) {
2459    case Primitive::kPrimInt: {
2460      if (rem->InputAt(1)->IsConstant()) {
2461        locations->SetInAt(0, Location::RequiresRegister());
2462        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2463        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2464        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2465        if (abs_imm <= 1) {
2466          // No temp register required.
2467        } else {
2468          locations->AddTemp(Location::RequiresRegister());
2469          if (!IsPowerOfTwo(abs_imm)) {
2470            locations->AddTemp(Location::RequiresRegister());
2471          }
2472        }
2473      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2474        locations->SetInAt(0, Location::RequiresRegister());
2475        locations->SetInAt(1, Location::RequiresRegister());
2476        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2477        locations->AddTemp(Location::RequiresRegister());
2478      } else {
2479        InvokeRuntimeCallingConvention calling_convention;
2480        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2481        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2482        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2483        //       we only need the latter.
2484        locations->SetOut(Location::RegisterLocation(R1));
2485      }
2486      break;
2487    }
2488    case Primitive::kPrimLong: {
2489      InvokeRuntimeCallingConvention calling_convention;
2490      locations->SetInAt(0, Location::RegisterPairLocation(
2491          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2492      locations->SetInAt(1, Location::RegisterPairLocation(
2493          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2494      // The runtime helper puts the output in R2,R3.
2495      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2496      break;
2497    }
2498    case Primitive::kPrimFloat: {
2499      InvokeRuntimeCallingConvention calling_convention;
2500      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2501      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2502      locations->SetOut(Location::FpuRegisterLocation(S0));
2503      break;
2504    }
2505
2506    case Primitive::kPrimDouble: {
2507      InvokeRuntimeCallingConvention calling_convention;
2508      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2509          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2510      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2511          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2512      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2513      break;
2514    }
2515
2516    default:
2517      LOG(FATAL) << "Unexpected rem type " << type;
2518  }
2519}
2520
2521void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2522  LocationSummary* locations = rem->GetLocations();
2523  Location out = locations->Out();
2524  Location first = locations->InAt(0);
2525  Location second = locations->InAt(1);
2526
2527  Primitive::Type type = rem->GetResultType();
2528  switch (type) {
2529    case Primitive::kPrimInt: {
2530        if (second.IsConstant()) {
2531          GenerateDivRemConstantIntegral(rem);
2532        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2533        Register reg1 = first.AsRegister<Register>();
2534        Register reg2 = second.AsRegister<Register>();
2535        Register temp = locations->GetTemp(0).AsRegister<Register>();
2536
2537        // temp = reg1 / reg2  (integer division)
2538        // temp = temp * reg2
2539        // dest = reg1 - temp
2540        __ sdiv(temp, reg1, reg2);
2541        __ mul(temp, temp, reg2);
2542        __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
2543      } else {
2544        InvokeRuntimeCallingConvention calling_convention;
2545        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2546        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2547        DCHECK_EQ(R1, out.AsRegister<Register>());
2548
2549        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2550      }
2551      break;
2552    }
2553
2554    case Primitive::kPrimLong: {
2555      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2556      break;
2557    }
2558
2559    case Primitive::kPrimFloat: {
2560      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2561      break;
2562    }
2563
2564    case Primitive::kPrimDouble: {
2565      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2566      break;
2567    }
2568
2569    default:
2570      LOG(FATAL) << "Unexpected rem type " << type;
2571  }
2572}
2573
2574void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2575  LocationSummary* locations =
2576      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2577  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2578  if (instruction->HasUses()) {
2579    locations->SetOut(Location::SameAsFirstInput());
2580  }
2581}
2582
2583void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2584  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2585  codegen_->AddSlowPath(slow_path);
2586
2587  LocationSummary* locations = instruction->GetLocations();
2588  Location value = locations->InAt(0);
2589
2590  switch (instruction->GetType()) {
2591    case Primitive::kPrimInt: {
2592      if (value.IsRegister()) {
2593        __ CompareAndBranchIfZero(value.AsRegister<Register>(), slow_path->GetEntryLabel());
2594      } else {
2595        DCHECK(value.IsConstant()) << value;
2596        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2597          __ b(slow_path->GetEntryLabel());
2598        }
2599      }
2600      break;
2601    }
2602    case Primitive::kPrimLong: {
2603      if (value.IsRegisterPair()) {
2604        __ orrs(IP,
2605                value.AsRegisterPairLow<Register>(),
2606                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2607        __ b(slow_path->GetEntryLabel(), EQ);
2608      } else {
2609        DCHECK(value.IsConstant()) << value;
2610        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2611          __ b(slow_path->GetEntryLabel());
2612        }
2613      }
2614      break;
2615    default:
2616      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2617    }
2618  }
2619}
2620
2621void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2622  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2623
2624  LocationSummary* locations =
2625      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2626
2627  switch (op->GetResultType()) {
2628    case Primitive::kPrimInt: {
2629      locations->SetInAt(0, Location::RequiresRegister());
2630      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2631      // Make the output overlap, as it will be used to hold the masked
2632      // second input.
2633      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2634      break;
2635    }
2636    case Primitive::kPrimLong: {
2637      locations->SetInAt(0, Location::RequiresRegister());
2638      locations->SetInAt(1, Location::RequiresRegister());
2639      locations->AddTemp(Location::RequiresRegister());
2640      locations->SetOut(Location::RequiresRegister());
2641      break;
2642    }
2643    default:
2644      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2645  }
2646}
2647
2648void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2649  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2650
2651  LocationSummary* locations = op->GetLocations();
2652  Location out = locations->Out();
2653  Location first = locations->InAt(0);
2654  Location second = locations->InAt(1);
2655
2656  Primitive::Type type = op->GetResultType();
2657  switch (type) {
2658    case Primitive::kPrimInt: {
2659      Register out_reg = out.AsRegister<Register>();
2660      Register first_reg = first.AsRegister<Register>();
2661      // Arm doesn't mask the shift count so we need to do it ourselves.
2662      if (second.IsRegister()) {
2663        Register second_reg = second.AsRegister<Register>();
2664        __ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2665        if (op->IsShl()) {
2666          __ Lsl(out_reg, first_reg, out_reg);
2667        } else if (op->IsShr()) {
2668          __ Asr(out_reg, first_reg, out_reg);
2669        } else {
2670          __ Lsr(out_reg, first_reg, out_reg);
2671        }
2672      } else {
2673        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2674        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2675        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
2676          __ Mov(out_reg, first_reg);
2677        } else if (op->IsShl()) {
2678          __ Lsl(out_reg, first_reg, shift_value);
2679        } else if (op->IsShr()) {
2680          __ Asr(out_reg, first_reg, shift_value);
2681        } else {
2682          __ Lsr(out_reg, first_reg, shift_value);
2683        }
2684      }
2685      break;
2686    }
2687    case Primitive::kPrimLong: {
2688      Register o_h = out.AsRegisterPairHigh<Register>();
2689      Register o_l = out.AsRegisterPairLow<Register>();
2690
2691      Register temp = locations->GetTemp(0).AsRegister<Register>();
2692
2693      Register high = first.AsRegisterPairHigh<Register>();
2694      Register low = first.AsRegisterPairLow<Register>();
2695
2696      Register second_reg = second.AsRegister<Register>();
2697
2698      if (op->IsShl()) {
2699        __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
2700        // Shift the high part
2701        __ Lsl(o_h, high, o_l);
2702        // Shift the low part and `or` what overflew on the high part
2703        __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
2704        __ Lsr(temp, low, temp);
2705        __ orr(o_h, o_h, ShifterOperand(temp));
2706        // If the shift is > 32 bits, override the high part
2707        __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
2708        __ it(PL);
2709        __ Lsl(o_h, low, temp, false, PL);
2710        // Shift the low part
2711        __ Lsl(o_l, low, o_l);
2712      } else if (op->IsShr()) {
2713        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2714        // Shift the low part
2715        __ Lsr(o_l, low, o_h);
2716        // Shift the high part and `or` what underflew on the low part
2717        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2718        __ Lsl(temp, high, temp);
2719        __ orr(o_l, o_l, ShifterOperand(temp));
2720        // If the shift is > 32 bits, override the low part
2721        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
2722        __ it(PL);
2723        __ Asr(o_l, high, temp, false, PL);
2724        // Shift the high part
2725        __ Asr(o_h, high, o_h);
2726      } else {
2727        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2728        // same as Shr except we use `Lsr`s and not `Asr`s
2729        __ Lsr(o_l, low, o_h);
2730        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2731        __ Lsl(temp, high, temp);
2732        __ orr(o_l, o_l, ShifterOperand(temp));
2733        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
2734        __ it(PL);
2735        __ Lsr(o_l, high, temp, false, PL);
2736        __ Lsr(o_h, high, o_h);
2737      }
2738      break;
2739    }
2740    default:
2741      LOG(FATAL) << "Unexpected operation type " << type;
2742  }
2743}
2744
2745void LocationsBuilderARM::VisitShl(HShl* shl) {
2746  HandleShift(shl);
2747}
2748
2749void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
2750  HandleShift(shl);
2751}
2752
2753void LocationsBuilderARM::VisitShr(HShr* shr) {
2754  HandleShift(shr);
2755}
2756
2757void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
2758  HandleShift(shr);
2759}
2760
2761void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
2762  HandleShift(ushr);
2763}
2764
2765void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
2766  HandleShift(ushr);
2767}
2768
2769void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
2770  LocationSummary* locations =
2771      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2772  InvokeRuntimeCallingConvention calling_convention;
2773  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2774  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2775  locations->SetOut(Location::RegisterLocation(R0));
2776}
2777
2778void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
2779  InvokeRuntimeCallingConvention calling_convention;
2780  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2781  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2782                          instruction,
2783                          instruction->GetDexPc(),
2784                          nullptr);
2785}
2786
2787void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
2788  LocationSummary* locations =
2789      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2790  InvokeRuntimeCallingConvention calling_convention;
2791  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2792  locations->SetOut(Location::RegisterLocation(R0));
2793  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2794  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2795}
2796
2797void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
2798  InvokeRuntimeCallingConvention calling_convention;
2799  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2800  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2801                          instruction,
2802                          instruction->GetDexPc(),
2803                          nullptr);
2804}
2805
2806void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
2807  LocationSummary* locations =
2808      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2809  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2810  if (location.IsStackSlot()) {
2811    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2812  } else if (location.IsDoubleStackSlot()) {
2813    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2814  }
2815  locations->SetOut(location);
2816}
2817
2818void InstructionCodeGeneratorARM::VisitParameterValue(
2819    HParameterValue* instruction ATTRIBUTE_UNUSED) {
2820  // Nothing to do, the parameter is already at its location.
2821}
2822
2823void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
2824  LocationSummary* locations =
2825      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2826  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2827}
2828
2829void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
2830  // Nothing to do, the method is already at its location.
2831}
2832
2833void LocationsBuilderARM::VisitNot(HNot* not_) {
2834  LocationSummary* locations =
2835      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
2836  locations->SetInAt(0, Location::RequiresRegister());
2837  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2838}
2839
2840void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
2841  LocationSummary* locations = not_->GetLocations();
2842  Location out = locations->Out();
2843  Location in = locations->InAt(0);
2844  switch (not_->GetResultType()) {
2845    case Primitive::kPrimInt:
2846      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
2847      break;
2848
2849    case Primitive::kPrimLong:
2850      __ mvn(out.AsRegisterPairLow<Register>(),
2851             ShifterOperand(in.AsRegisterPairLow<Register>()));
2852      __ mvn(out.AsRegisterPairHigh<Register>(),
2853             ShifterOperand(in.AsRegisterPairHigh<Register>()));
2854      break;
2855
2856    default:
2857      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
2858  }
2859}
2860
2861void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
2862  LocationSummary* locations =
2863      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
2864  locations->SetInAt(0, Location::RequiresRegister());
2865  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2866}
2867
2868void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
2869  LocationSummary* locations = bool_not->GetLocations();
2870  Location out = locations->Out();
2871  Location in = locations->InAt(0);
2872  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
2873}
2874
2875void LocationsBuilderARM::VisitCompare(HCompare* compare) {
2876  LocationSummary* locations =
2877      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
2878  switch (compare->InputAt(0)->GetType()) {
2879    case Primitive::kPrimLong: {
2880      locations->SetInAt(0, Location::RequiresRegister());
2881      locations->SetInAt(1, Location::RequiresRegister());
2882      // Output overlaps because it is written before doing the low comparison.
2883      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2884      break;
2885    }
2886    case Primitive::kPrimFloat:
2887    case Primitive::kPrimDouble: {
2888      locations->SetInAt(0, Location::RequiresFpuRegister());
2889      locations->SetInAt(1, Location::RequiresFpuRegister());
2890      locations->SetOut(Location::RequiresRegister());
2891      break;
2892    }
2893    default:
2894      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
2895  }
2896}
2897
2898void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
2899  LocationSummary* locations = compare->GetLocations();
2900  Register out = locations->Out().AsRegister<Register>();
2901  Location left = locations->InAt(0);
2902  Location right = locations->InAt(1);
2903
2904  Label less, greater, done;
2905  Primitive::Type type = compare->InputAt(0)->GetType();
2906  switch (type) {
2907    case Primitive::kPrimLong: {
2908      __ cmp(left.AsRegisterPairHigh<Register>(),
2909             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
2910      __ b(&less, LT);
2911      __ b(&greater, GT);
2912      // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
2913      __ LoadImmediate(out, 0);
2914      __ cmp(left.AsRegisterPairLow<Register>(),
2915             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
2916      break;
2917    }
2918    case Primitive::kPrimFloat:
2919    case Primitive::kPrimDouble: {
2920      __ LoadImmediate(out, 0);
2921      if (type == Primitive::kPrimFloat) {
2922        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
2923      } else {
2924        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
2925                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
2926      }
2927      __ vmstat();  // transfer FP status register to ARM APSR.
2928      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
2929      break;
2930    }
2931    default:
2932      LOG(FATAL) << "Unexpected compare type " << type;
2933  }
2934  __ b(&done, EQ);
2935  __ b(&less, CC);  // CC is for both: unsigned compare for longs and 'less than' for floats.
2936
2937  __ Bind(&greater);
2938  __ LoadImmediate(out, 1);
2939  __ b(&done);
2940
2941  __ Bind(&less);
2942  __ LoadImmediate(out, -1);
2943
2944  __ Bind(&done);
2945}
2946
2947void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
2948  LocationSummary* locations =
2949      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2950  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2951    locations->SetInAt(i, Location::Any());
2952  }
2953  locations->SetOut(Location::Any());
2954}
2955
2956void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
2957  UNUSED(instruction);
2958  LOG(FATAL) << "Unreachable";
2959}
2960
2961void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
2962  // TODO (ported from quick): revisit Arm barrier kinds
2963  DmbOptions flavor = DmbOptions::ISH;  // quiet c++ warnings
2964  switch (kind) {
2965    case MemBarrierKind::kAnyStore:
2966    case MemBarrierKind::kLoadAny:
2967    case MemBarrierKind::kAnyAny: {
2968      flavor = DmbOptions::ISH;
2969      break;
2970    }
2971    case MemBarrierKind::kStoreStore: {
2972      flavor = DmbOptions::ISHST;
2973      break;
2974    }
2975    default:
2976      LOG(FATAL) << "Unexpected memory barrier " << kind;
2977  }
2978  __ dmb(flavor);
2979}
2980
2981void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
2982                                                         uint32_t offset,
2983                                                         Register out_lo,
2984                                                         Register out_hi) {
2985  if (offset != 0) {
2986    __ LoadImmediate(out_lo, offset);
2987    __ add(IP, addr, ShifterOperand(out_lo));
2988    addr = IP;
2989  }
2990  __ ldrexd(out_lo, out_hi, addr);
2991}
2992
2993void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
2994                                                          uint32_t offset,
2995                                                          Register value_lo,
2996                                                          Register value_hi,
2997                                                          Register temp1,
2998                                                          Register temp2,
2999                                                          HInstruction* instruction) {
3000  Label fail;
3001  if (offset != 0) {
3002    __ LoadImmediate(temp1, offset);
3003    __ add(IP, addr, ShifterOperand(temp1));
3004    addr = IP;
3005  }
3006  __ Bind(&fail);
3007  // We need a load followed by store. (The address used in a STREX instruction must
3008  // be the same as the address in the most recently executed LDREX instruction.)
3009  __ ldrexd(temp1, temp2, addr);
3010  codegen_->MaybeRecordImplicitNullCheck(instruction);
3011  __ strexd(temp1, value_lo, value_hi, addr);
3012  __ CompareAndBranchIfNonZero(temp1, &fail);
3013}
3014
3015void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
3016  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3017
3018  LocationSummary* locations =
3019      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3020  locations->SetInAt(0, Location::RequiresRegister());
3021
3022  Primitive::Type field_type = field_info.GetFieldType();
3023  if (Primitive::IsFloatingPointType(field_type)) {
3024    locations->SetInAt(1, Location::RequiresFpuRegister());
3025  } else {
3026    locations->SetInAt(1, Location::RequiresRegister());
3027  }
3028
3029  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3030  bool generate_volatile = field_info.IsVolatile()
3031      && is_wide
3032      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3033  // Temporary registers for the write barrier.
3034  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3035  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3036    locations->AddTemp(Location::RequiresRegister());
3037    locations->AddTemp(Location::RequiresRegister());
3038  } else if (generate_volatile) {
3039    // Arm encoding have some additional constraints for ldrexd/strexd:
3040    // - registers need to be consecutive
3041    // - the first register should be even but not R14.
3042    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3043    // enable Arm encoding.
3044    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3045
3046    locations->AddTemp(Location::RequiresRegister());
3047    locations->AddTemp(Location::RequiresRegister());
3048    if (field_type == Primitive::kPrimDouble) {
3049      // For doubles we need two more registers to copy the value.
3050      locations->AddTemp(Location::RegisterLocation(R2));
3051      locations->AddTemp(Location::RegisterLocation(R3));
3052    }
3053  }
3054}
3055
3056void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3057                                                 const FieldInfo& field_info,
3058                                                 bool value_can_be_null) {
3059  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3060
3061  LocationSummary* locations = instruction->GetLocations();
3062  Register base = locations->InAt(0).AsRegister<Register>();
3063  Location value = locations->InAt(1);
3064
3065  bool is_volatile = field_info.IsVolatile();
3066  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3067  Primitive::Type field_type = field_info.GetFieldType();
3068  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3069
3070  if (is_volatile) {
3071    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3072  }
3073
3074  switch (field_type) {
3075    case Primitive::kPrimBoolean:
3076    case Primitive::kPrimByte: {
3077      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3078      break;
3079    }
3080
3081    case Primitive::kPrimShort:
3082    case Primitive::kPrimChar: {
3083      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3084      break;
3085    }
3086
3087    case Primitive::kPrimInt:
3088    case Primitive::kPrimNot: {
3089      __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3090      break;
3091    }
3092
3093    case Primitive::kPrimLong: {
3094      if (is_volatile && !atomic_ldrd_strd) {
3095        GenerateWideAtomicStore(base, offset,
3096                                value.AsRegisterPairLow<Register>(),
3097                                value.AsRegisterPairHigh<Register>(),
3098                                locations->GetTemp(0).AsRegister<Register>(),
3099                                locations->GetTemp(1).AsRegister<Register>(),
3100                                instruction);
3101      } else {
3102        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3103        codegen_->MaybeRecordImplicitNullCheck(instruction);
3104      }
3105      break;
3106    }
3107
3108    case Primitive::kPrimFloat: {
3109      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3110      break;
3111    }
3112
3113    case Primitive::kPrimDouble: {
3114      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3115      if (is_volatile && !atomic_ldrd_strd) {
3116        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3117        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3118
3119        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3120
3121        GenerateWideAtomicStore(base, offset,
3122                                value_reg_lo,
3123                                value_reg_hi,
3124                                locations->GetTemp(2).AsRegister<Register>(),
3125                                locations->GetTemp(3).AsRegister<Register>(),
3126                                instruction);
3127      } else {
3128        __ StoreDToOffset(value_reg, base, offset);
3129        codegen_->MaybeRecordImplicitNullCheck(instruction);
3130      }
3131      break;
3132    }
3133
3134    case Primitive::kPrimVoid:
3135      LOG(FATAL) << "Unreachable type " << field_type;
3136      UNREACHABLE();
3137  }
3138
3139  // Longs and doubles are handled in the switch.
3140  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3141    codegen_->MaybeRecordImplicitNullCheck(instruction);
3142  }
3143
3144  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3145    Register temp = locations->GetTemp(0).AsRegister<Register>();
3146    Register card = locations->GetTemp(1).AsRegister<Register>();
3147    codegen_->MarkGCCard(
3148        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3149  }
3150
3151  if (is_volatile) {
3152    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3153  }
3154}
3155
3156void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3157  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3158  LocationSummary* locations =
3159      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3160  locations->SetInAt(0, Location::RequiresRegister());
3161
3162  bool volatile_for_double = field_info.IsVolatile()
3163      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3164      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3165  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3166
3167  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3168    locations->SetOut(Location::RequiresFpuRegister());
3169  } else {
3170    locations->SetOut(Location::RequiresRegister(),
3171                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3172  }
3173  if (volatile_for_double) {
3174    // Arm encoding have some additional constraints for ldrexd/strexd:
3175    // - registers need to be consecutive
3176    // - the first register should be even but not R14.
3177    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3178    // enable Arm encoding.
3179    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3180    locations->AddTemp(Location::RequiresRegister());
3181    locations->AddTemp(Location::RequiresRegister());
3182  }
3183}
3184
3185void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3186                                                 const FieldInfo& field_info) {
3187  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3188
3189  LocationSummary* locations = instruction->GetLocations();
3190  Register base = locations->InAt(0).AsRegister<Register>();
3191  Location out = locations->Out();
3192  bool is_volatile = field_info.IsVolatile();
3193  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3194  Primitive::Type field_type = field_info.GetFieldType();
3195  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3196
3197  switch (field_type) {
3198    case Primitive::kPrimBoolean: {
3199      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3200      break;
3201    }
3202
3203    case Primitive::kPrimByte: {
3204      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3205      break;
3206    }
3207
3208    case Primitive::kPrimShort: {
3209      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3210      break;
3211    }
3212
3213    case Primitive::kPrimChar: {
3214      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3215      break;
3216    }
3217
3218    case Primitive::kPrimInt:
3219    case Primitive::kPrimNot: {
3220      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3221      break;
3222    }
3223
3224    case Primitive::kPrimLong: {
3225      if (is_volatile && !atomic_ldrd_strd) {
3226        GenerateWideAtomicLoad(base, offset,
3227                               out.AsRegisterPairLow<Register>(),
3228                               out.AsRegisterPairHigh<Register>());
3229      } else {
3230        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3231      }
3232      break;
3233    }
3234
3235    case Primitive::kPrimFloat: {
3236      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3237      break;
3238    }
3239
3240    case Primitive::kPrimDouble: {
3241      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3242      if (is_volatile && !atomic_ldrd_strd) {
3243        Register lo = locations->GetTemp(0).AsRegister<Register>();
3244        Register hi = locations->GetTemp(1).AsRegister<Register>();
3245        GenerateWideAtomicLoad(base, offset, lo, hi);
3246        codegen_->MaybeRecordImplicitNullCheck(instruction);
3247        __ vmovdrr(out_reg, lo, hi);
3248      } else {
3249        __ LoadDFromOffset(out_reg, base, offset);
3250        codegen_->MaybeRecordImplicitNullCheck(instruction);
3251      }
3252      break;
3253    }
3254
3255    case Primitive::kPrimVoid:
3256      LOG(FATAL) << "Unreachable type " << field_type;
3257      UNREACHABLE();
3258  }
3259
3260  // Doubles are handled in the switch.
3261  if (field_type != Primitive::kPrimDouble) {
3262    codegen_->MaybeRecordImplicitNullCheck(instruction);
3263  }
3264
3265  if (is_volatile) {
3266    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3267  }
3268}
3269
3270void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3271  HandleFieldSet(instruction, instruction->GetFieldInfo());
3272}
3273
3274void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3275  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3276}
3277
3278void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3279  HandleFieldGet(instruction, instruction->GetFieldInfo());
3280}
3281
3282void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3283  HandleFieldGet(instruction, instruction->GetFieldInfo());
3284}
3285
3286void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3287  HandleFieldGet(instruction, instruction->GetFieldInfo());
3288}
3289
3290void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3291  HandleFieldGet(instruction, instruction->GetFieldInfo());
3292}
3293
3294void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3295  HandleFieldSet(instruction, instruction->GetFieldInfo());
3296}
3297
3298void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3299  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3300}
3301
3302void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3303  LocationSummary* locations =
3304      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3305  locations->SetInAt(0, Location::RequiresRegister());
3306  if (instruction->HasUses()) {
3307    locations->SetOut(Location::SameAsFirstInput());
3308  }
3309}
3310
3311void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3312  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3313    return;
3314  }
3315  Location obj = instruction->GetLocations()->InAt(0);
3316
3317  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3318  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3319}
3320
3321void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3322  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3323  codegen_->AddSlowPath(slow_path);
3324
3325  LocationSummary* locations = instruction->GetLocations();
3326  Location obj = locations->InAt(0);
3327
3328  __ CompareAndBranchIfZero(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
3329}
3330
3331void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3332  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
3333    GenerateImplicitNullCheck(instruction);
3334  } else {
3335    GenerateExplicitNullCheck(instruction);
3336  }
3337}
3338
3339void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3340  LocationSummary* locations =
3341      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3342  locations->SetInAt(0, Location::RequiresRegister());
3343  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3344  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3345    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3346  } else {
3347    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3348  }
3349}
3350
3351void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3352  LocationSummary* locations = instruction->GetLocations();
3353  Register obj = locations->InAt(0).AsRegister<Register>();
3354  Location index = locations->InAt(1);
3355
3356  switch (instruction->GetType()) {
3357    case Primitive::kPrimBoolean: {
3358      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3359      Register out = locations->Out().AsRegister<Register>();
3360      if (index.IsConstant()) {
3361        size_t offset =
3362            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3363        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3364      } else {
3365        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3366        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3367      }
3368      break;
3369    }
3370
3371    case Primitive::kPrimByte: {
3372      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3373      Register out = locations->Out().AsRegister<Register>();
3374      if (index.IsConstant()) {
3375        size_t offset =
3376            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3377        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3378      } else {
3379        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3380        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3381      }
3382      break;
3383    }
3384
3385    case Primitive::kPrimShort: {
3386      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3387      Register out = locations->Out().AsRegister<Register>();
3388      if (index.IsConstant()) {
3389        size_t offset =
3390            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3391        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3392      } else {
3393        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3394        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3395      }
3396      break;
3397    }
3398
3399    case Primitive::kPrimChar: {
3400      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3401      Register out = locations->Out().AsRegister<Register>();
3402      if (index.IsConstant()) {
3403        size_t offset =
3404            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3405        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3406      } else {
3407        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3408        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3409      }
3410      break;
3411    }
3412
3413    case Primitive::kPrimInt:
3414    case Primitive::kPrimNot: {
3415      static_assert(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
3416                    "art::mirror::HeapReference<mirror::Object> and int32_t have different sizes.");
3417      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3418      Register out = locations->Out().AsRegister<Register>();
3419      if (index.IsConstant()) {
3420        size_t offset =
3421            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3422        __ LoadFromOffset(kLoadWord, out, obj, offset);
3423      } else {
3424        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3425        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3426      }
3427      break;
3428    }
3429
3430    case Primitive::kPrimLong: {
3431      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3432      Location out = locations->Out();
3433      if (index.IsConstant()) {
3434        size_t offset =
3435            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3436        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3437      } else {
3438        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3439        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3440      }
3441      break;
3442    }
3443
3444    case Primitive::kPrimFloat: {
3445      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3446      Location out = locations->Out();
3447      DCHECK(out.IsFpuRegister());
3448      if (index.IsConstant()) {
3449        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3450        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3451      } else {
3452        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3453        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3454      }
3455      break;
3456    }
3457
3458    case Primitive::kPrimDouble: {
3459      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3460      Location out = locations->Out();
3461      DCHECK(out.IsFpuRegisterPair());
3462      if (index.IsConstant()) {
3463        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3464        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3465      } else {
3466        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3467        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3468      }
3469      break;
3470    }
3471
3472    case Primitive::kPrimVoid:
3473      LOG(FATAL) << "Unreachable type " << instruction->GetType();
3474      UNREACHABLE();
3475  }
3476  codegen_->MaybeRecordImplicitNullCheck(instruction);
3477}
3478
3479void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3480  Primitive::Type value_type = instruction->GetComponentType();
3481
3482  bool needs_write_barrier =
3483      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3484  bool needs_runtime_call = instruction->NeedsTypeCheck();
3485
3486  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3487      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3488  if (needs_runtime_call) {
3489    InvokeRuntimeCallingConvention calling_convention;
3490    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3491    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3492    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3493  } else {
3494    locations->SetInAt(0, Location::RequiresRegister());
3495    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3496    if (Primitive::IsFloatingPointType(value_type)) {
3497      locations->SetInAt(2, Location::RequiresFpuRegister());
3498    } else {
3499      locations->SetInAt(2, Location::RequiresRegister());
3500    }
3501
3502    if (needs_write_barrier) {
3503      // Temporary registers for the write barrier.
3504      locations->AddTemp(Location::RequiresRegister());
3505      locations->AddTemp(Location::RequiresRegister());
3506    }
3507  }
3508}
3509
3510void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3511  LocationSummary* locations = instruction->GetLocations();
3512  Register obj = locations->InAt(0).AsRegister<Register>();
3513  Location index = locations->InAt(1);
3514  Primitive::Type value_type = instruction->GetComponentType();
3515  bool needs_runtime_call = locations->WillCall();
3516  bool needs_write_barrier =
3517      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3518
3519  switch (value_type) {
3520    case Primitive::kPrimBoolean:
3521    case Primitive::kPrimByte: {
3522      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3523      Register value = locations->InAt(2).AsRegister<Register>();
3524      if (index.IsConstant()) {
3525        size_t offset =
3526            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3527        __ StoreToOffset(kStoreByte, value, obj, offset);
3528      } else {
3529        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3530        __ StoreToOffset(kStoreByte, value, IP, data_offset);
3531      }
3532      break;
3533    }
3534
3535    case Primitive::kPrimShort:
3536    case Primitive::kPrimChar: {
3537      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3538      Register value = locations->InAt(2).AsRegister<Register>();
3539      if (index.IsConstant()) {
3540        size_t offset =
3541            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3542        __ StoreToOffset(kStoreHalfword, value, obj, offset);
3543      } else {
3544        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3545        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3546      }
3547      break;
3548    }
3549
3550    case Primitive::kPrimInt:
3551    case Primitive::kPrimNot: {
3552      if (!needs_runtime_call) {
3553        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3554        Register value = locations->InAt(2).AsRegister<Register>();
3555        if (index.IsConstant()) {
3556          size_t offset =
3557              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3558          __ StoreToOffset(kStoreWord, value, obj, offset);
3559        } else {
3560          DCHECK(index.IsRegister()) << index;
3561          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3562          __ StoreToOffset(kStoreWord, value, IP, data_offset);
3563        }
3564        codegen_->MaybeRecordImplicitNullCheck(instruction);
3565        if (needs_write_barrier) {
3566          DCHECK_EQ(value_type, Primitive::kPrimNot);
3567          Register temp = locations->GetTemp(0).AsRegister<Register>();
3568          Register card = locations->GetTemp(1).AsRegister<Register>();
3569          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
3570        }
3571      } else {
3572        DCHECK_EQ(value_type, Primitive::kPrimNot);
3573        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3574                                instruction,
3575                                instruction->GetDexPc(),
3576                                nullptr);
3577      }
3578      break;
3579    }
3580
3581    case Primitive::kPrimLong: {
3582      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3583      Location value = locations->InAt(2);
3584      if (index.IsConstant()) {
3585        size_t offset =
3586            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3587        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3588      } else {
3589        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3590        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3591      }
3592      break;
3593    }
3594
3595    case Primitive::kPrimFloat: {
3596      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3597      Location value = locations->InAt(2);
3598      DCHECK(value.IsFpuRegister());
3599      if (index.IsConstant()) {
3600        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3601        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3602      } else {
3603        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3604        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3605      }
3606      break;
3607    }
3608
3609    case Primitive::kPrimDouble: {
3610      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3611      Location value = locations->InAt(2);
3612      DCHECK(value.IsFpuRegisterPair());
3613      if (index.IsConstant()) {
3614        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3615        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3616      } else {
3617        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3618        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3619      }
3620
3621      break;
3622    }
3623
3624    case Primitive::kPrimVoid:
3625      LOG(FATAL) << "Unreachable type " << value_type;
3626      UNREACHABLE();
3627  }
3628
3629  // Ints and objects are handled in the switch.
3630  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
3631    codegen_->MaybeRecordImplicitNullCheck(instruction);
3632  }
3633}
3634
3635void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
3636  LocationSummary* locations =
3637      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3638  locations->SetInAt(0, Location::RequiresRegister());
3639  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3640}
3641
3642void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
3643  LocationSummary* locations = instruction->GetLocations();
3644  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
3645  Register obj = locations->InAt(0).AsRegister<Register>();
3646  Register out = locations->Out().AsRegister<Register>();
3647  __ LoadFromOffset(kLoadWord, out, obj, offset);
3648  codegen_->MaybeRecordImplicitNullCheck(instruction);
3649}
3650
3651void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3652  LocationSummary* locations =
3653      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3654  locations->SetInAt(0, Location::RequiresRegister());
3655  locations->SetInAt(1, Location::RequiresRegister());
3656  if (instruction->HasUses()) {
3657    locations->SetOut(Location::SameAsFirstInput());
3658  }
3659}
3660
3661void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3662  LocationSummary* locations = instruction->GetLocations();
3663  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
3664      instruction, locations->InAt(0), locations->InAt(1));
3665  codegen_->AddSlowPath(slow_path);
3666
3667  Register index = locations->InAt(0).AsRegister<Register>();
3668  Register length = locations->InAt(1).AsRegister<Register>();
3669
3670  __ cmp(index, ShifterOperand(length));
3671  __ b(slow_path->GetEntryLabel(), CS);
3672}
3673
3674void CodeGeneratorARM::MarkGCCard(Register temp,
3675                                  Register card,
3676                                  Register object,
3677                                  Register value,
3678                                  bool can_be_null) {
3679  Label is_null;
3680  if (can_be_null) {
3681    __ CompareAndBranchIfZero(value, &is_null);
3682  }
3683  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
3684  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
3685  __ strb(card, Address(card, temp));
3686  if (can_be_null) {
3687    __ Bind(&is_null);
3688  }
3689}
3690
3691void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
3692  temp->SetLocations(nullptr);
3693}
3694
3695void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
3696  // Nothing to do, this is driven by the code generator.
3697  UNUSED(temp);
3698}
3699
3700void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
3701  UNUSED(instruction);
3702  LOG(FATAL) << "Unreachable";
3703}
3704
3705void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
3706  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3707}
3708
3709void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3710  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3711}
3712
3713void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3714  HBasicBlock* block = instruction->GetBlock();
3715  if (block->GetLoopInformation() != nullptr) {
3716    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3717    // The back edge will generate the suspend check.
3718    return;
3719  }
3720  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3721    // The goto will generate the suspend check.
3722    return;
3723  }
3724  GenerateSuspendCheck(instruction, nullptr);
3725}
3726
3727void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
3728                                                       HBasicBlock* successor) {
3729  SuspendCheckSlowPathARM* slow_path =
3730      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
3731  if (slow_path == nullptr) {
3732    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
3733    instruction->SetSlowPath(slow_path);
3734    codegen_->AddSlowPath(slow_path);
3735    if (successor != nullptr) {
3736      DCHECK(successor->IsLoopHeader());
3737      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
3738    }
3739  } else {
3740    DCHECK_EQ(slow_path->GetSuccessor(), successor);
3741  }
3742
3743  __ LoadFromOffset(
3744      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
3745  if (successor == nullptr) {
3746    __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
3747    __ Bind(slow_path->GetReturnLabel());
3748  } else {
3749    __ CompareAndBranchIfZero(IP, codegen_->GetLabelOf(successor));
3750    __ b(slow_path->GetEntryLabel());
3751  }
3752}
3753
3754ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
3755  return codegen_->GetAssembler();
3756}
3757
3758void ParallelMoveResolverARM::EmitMove(size_t index) {
3759  MoveOperands* move = moves_.Get(index);
3760  Location source = move->GetSource();
3761  Location destination = move->GetDestination();
3762
3763  if (source.IsRegister()) {
3764    if (destination.IsRegister()) {
3765      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
3766    } else {
3767      DCHECK(destination.IsStackSlot());
3768      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
3769                       SP, destination.GetStackIndex());
3770    }
3771  } else if (source.IsStackSlot()) {
3772    if (destination.IsRegister()) {
3773      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
3774                        SP, source.GetStackIndex());
3775    } else if (destination.IsFpuRegister()) {
3776      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
3777    } else {
3778      DCHECK(destination.IsStackSlot());
3779      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
3780      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3781    }
3782  } else if (source.IsFpuRegister()) {
3783    if (destination.IsFpuRegister()) {
3784      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
3785    } else {
3786      DCHECK(destination.IsStackSlot());
3787      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
3788    }
3789  } else if (source.IsDoubleStackSlot()) {
3790    if (destination.IsDoubleStackSlot()) {
3791      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
3792      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
3793    } else if (destination.IsRegisterPair()) {
3794      DCHECK(ExpectedPairLayout(destination));
3795      __ LoadFromOffset(
3796          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
3797    } else {
3798      DCHECK(destination.IsFpuRegisterPair()) << destination;
3799      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3800                         SP,
3801                         source.GetStackIndex());
3802    }
3803  } else if (source.IsRegisterPair()) {
3804    if (destination.IsRegisterPair()) {
3805      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
3806      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
3807    } else {
3808      DCHECK(destination.IsDoubleStackSlot()) << destination;
3809      DCHECK(ExpectedPairLayout(source));
3810      __ StoreToOffset(
3811          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
3812    }
3813  } else if (source.IsFpuRegisterPair()) {
3814    if (destination.IsFpuRegisterPair()) {
3815      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3816               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
3817    } else {
3818      DCHECK(destination.IsDoubleStackSlot()) << destination;
3819      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
3820                        SP,
3821                        destination.GetStackIndex());
3822    }
3823  } else {
3824    DCHECK(source.IsConstant()) << source;
3825    HConstant* constant = source.GetConstant();
3826    if (constant->IsIntConstant() || constant->IsNullConstant()) {
3827      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
3828      if (destination.IsRegister()) {
3829        __ LoadImmediate(destination.AsRegister<Register>(), value);
3830      } else {
3831        DCHECK(destination.IsStackSlot());
3832        __ LoadImmediate(IP, value);
3833        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3834      }
3835    } else if (constant->IsLongConstant()) {
3836      int64_t value = constant->AsLongConstant()->GetValue();
3837      if (destination.IsRegisterPair()) {
3838        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
3839        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
3840      } else {
3841        DCHECK(destination.IsDoubleStackSlot()) << destination;
3842        __ LoadImmediate(IP, Low32Bits(value));
3843        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3844        __ LoadImmediate(IP, High32Bits(value));
3845        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3846      }
3847    } else if (constant->IsDoubleConstant()) {
3848      double value = constant->AsDoubleConstant()->GetValue();
3849      if (destination.IsFpuRegisterPair()) {
3850        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
3851      } else {
3852        DCHECK(destination.IsDoubleStackSlot()) << destination;
3853        uint64_t int_value = bit_cast<uint64_t, double>(value);
3854        __ LoadImmediate(IP, Low32Bits(int_value));
3855        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3856        __ LoadImmediate(IP, High32Bits(int_value));
3857        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3858      }
3859    } else {
3860      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
3861      float value = constant->AsFloatConstant()->GetValue();
3862      if (destination.IsFpuRegister()) {
3863        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
3864      } else {
3865        DCHECK(destination.IsStackSlot());
3866        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
3867        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3868      }
3869    }
3870  }
3871}
3872
3873void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
3874  __ Mov(IP, reg);
3875  __ LoadFromOffset(kLoadWord, reg, SP, mem);
3876  __ StoreToOffset(kStoreWord, IP, SP, mem);
3877}
3878
3879void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
3880  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
3881  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
3882  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
3883                    SP, mem1 + stack_offset);
3884  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
3885  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
3886                   SP, mem2 + stack_offset);
3887  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
3888}
3889
3890void ParallelMoveResolverARM::EmitSwap(size_t index) {
3891  MoveOperands* move = moves_.Get(index);
3892  Location source = move->GetSource();
3893  Location destination = move->GetDestination();
3894
3895  if (source.IsRegister() && destination.IsRegister()) {
3896    DCHECK_NE(source.AsRegister<Register>(), IP);
3897    DCHECK_NE(destination.AsRegister<Register>(), IP);
3898    __ Mov(IP, source.AsRegister<Register>());
3899    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
3900    __ Mov(destination.AsRegister<Register>(), IP);
3901  } else if (source.IsRegister() && destination.IsStackSlot()) {
3902    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
3903  } else if (source.IsStackSlot() && destination.IsRegister()) {
3904    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
3905  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
3906    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3907  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
3908    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
3909    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
3910    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
3911  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
3912    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
3913    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
3914    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
3915    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
3916               destination.AsRegisterPairHigh<Register>(),
3917               DTMP);
3918  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
3919    Register low_reg = source.IsRegisterPair()
3920        ? source.AsRegisterPairLow<Register>()
3921        : destination.AsRegisterPairLow<Register>();
3922    int mem = source.IsRegisterPair()
3923        ? destination.GetStackIndex()
3924        : source.GetStackIndex();
3925    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
3926    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
3927    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
3928    __ StoreDToOffset(DTMP, SP, mem);
3929  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
3930    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
3931    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3932    __ vmovd(DTMP, first);
3933    __ vmovd(first, second);
3934    __ vmovd(second, DTMP);
3935  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
3936    DRegister reg = source.IsFpuRegisterPair()
3937        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
3938        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3939    int mem = source.IsFpuRegisterPair()
3940        ? destination.GetStackIndex()
3941        : source.GetStackIndex();
3942    __ vmovd(DTMP, reg);
3943    __ LoadDFromOffset(reg, SP, mem);
3944    __ StoreDToOffset(DTMP, SP, mem);
3945  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
3946    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
3947                                           : destination.AsFpuRegister<SRegister>();
3948    int mem = source.IsFpuRegister()
3949        ? destination.GetStackIndex()
3950        : source.GetStackIndex();
3951
3952    __ vmovrs(IP, reg);
3953    __ LoadSFromOffset(reg, SP, mem);
3954    __ StoreToOffset(kStoreWord, IP, SP, mem);
3955  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
3956    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3957    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
3958  } else {
3959    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
3960  }
3961}
3962
3963void ParallelMoveResolverARM::SpillScratch(int reg) {
3964  __ Push(static_cast<Register>(reg));
3965}
3966
3967void ParallelMoveResolverARM::RestoreScratch(int reg) {
3968  __ Pop(static_cast<Register>(reg));
3969}
3970
3971void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
3972  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
3973      ? LocationSummary::kCallOnSlowPath
3974      : LocationSummary::kNoCall;
3975  LocationSummary* locations =
3976      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
3977  locations->SetInAt(0, Location::RequiresRegister());
3978  locations->SetOut(Location::RequiresRegister());
3979}
3980
3981void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
3982  LocationSummary* locations = cls->GetLocations();
3983  Register out = locations->Out().AsRegister<Register>();
3984  Register current_method = locations->InAt(0).AsRegister<Register>();
3985  if (cls->IsReferrersClass()) {
3986    DCHECK(!cls->CanCallRuntime());
3987    DCHECK(!cls->MustGenerateClinitCheck());
3988    __ LoadFromOffset(
3989        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
3990  } else {
3991    DCHECK(cls->CanCallRuntime());
3992    __ LoadFromOffset(kLoadWord,
3993                      out,
3994                      current_method,
3995                      ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
3996    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
3997
3998    SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
3999        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
4000    codegen_->AddSlowPath(slow_path);
4001    __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4002    if (cls->MustGenerateClinitCheck()) {
4003      GenerateClassInitializationCheck(slow_path, out);
4004    } else {
4005      __ Bind(slow_path->GetExitLabel());
4006    }
4007  }
4008}
4009
4010void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
4011  LocationSummary* locations =
4012      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
4013  locations->SetInAt(0, Location::RequiresRegister());
4014  if (check->HasUses()) {
4015    locations->SetOut(Location::SameAsFirstInput());
4016  }
4017}
4018
4019void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
4020  // We assume the class is not null.
4021  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4022      check->GetLoadClass(), check, check->GetDexPc(), true);
4023  codegen_->AddSlowPath(slow_path);
4024  GenerateClassInitializationCheck(slow_path,
4025                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4026}
4027
4028void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4029    SlowPathCodeARM* slow_path, Register class_reg) {
4030  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4031  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4032  __ b(slow_path->GetEntryLabel(), LT);
4033  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4034  // properly. Therefore, we do a memory fence.
4035  __ dmb(ISH);
4036  __ Bind(slow_path->GetExitLabel());
4037}
4038
4039void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4040  LocationSummary* locations =
4041      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4042  locations->SetInAt(0, Location::RequiresRegister());
4043  locations->SetOut(Location::RequiresRegister());
4044}
4045
4046void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4047  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4048  codegen_->AddSlowPath(slow_path);
4049
4050  LocationSummary* locations = load->GetLocations();
4051  Register out = locations->Out().AsRegister<Register>();
4052  Register current_method = locations->InAt(0).AsRegister<Register>();
4053  __ LoadFromOffset(
4054      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4055  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4056  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4057  __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4058  __ Bind(slow_path->GetExitLabel());
4059}
4060
4061void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4062  LocationSummary* locations =
4063      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4064  locations->SetOut(Location::RequiresRegister());
4065}
4066
4067void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4068  Register out = load->GetLocations()->Out().AsRegister<Register>();
4069  int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4070  __ LoadFromOffset(kLoadWord, out, TR, offset);
4071  __ LoadImmediate(IP, 0);
4072  __ StoreToOffset(kStoreWord, IP, TR, offset);
4073}
4074
4075void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4076  LocationSummary* locations =
4077      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4078  InvokeRuntimeCallingConvention calling_convention;
4079  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4080}
4081
4082void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4083  codegen_->InvokeRuntime(
4084      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4085}
4086
4087void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4088  LocationSummary::CallKind call_kind = instruction->IsClassFinal()
4089      ? LocationSummary::kNoCall
4090      : LocationSummary::kCallOnSlowPath;
4091  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4092  locations->SetInAt(0, Location::RequiresRegister());
4093  locations->SetInAt(1, Location::RequiresRegister());
4094  // The out register is used as a temporary, so it overlaps with the inputs.
4095  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4096}
4097
4098void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4099  LocationSummary* locations = instruction->GetLocations();
4100  Register obj = locations->InAt(0).AsRegister<Register>();
4101  Register cls = locations->InAt(1).AsRegister<Register>();
4102  Register out = locations->Out().AsRegister<Register>();
4103  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4104  Label done, zero;
4105  SlowPathCodeARM* slow_path = nullptr;
4106
4107  // Return 0 if `obj` is null.
4108  // avoid null check if we know obj is not null.
4109  if (instruction->MustDoNullCheck()) {
4110    __ CompareAndBranchIfZero(obj, &zero);
4111  }
4112  // Compare the class of `obj` with `cls`.
4113  __ LoadFromOffset(kLoadWord, out, obj, class_offset);
4114  __ cmp(out, ShifterOperand(cls));
4115  if (instruction->IsClassFinal()) {
4116    // Classes must be equal for the instanceof to succeed.
4117    __ b(&zero, NE);
4118    __ LoadImmediate(out, 1);
4119    __ b(&done);
4120  } else {
4121    // If the classes are not equal, we go into a slow path.
4122    DCHECK(locations->OnlyCallsOnSlowPath());
4123    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4124        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
4125    codegen_->AddSlowPath(slow_path);
4126    __ b(slow_path->GetEntryLabel(), NE);
4127    __ LoadImmediate(out, 1);
4128    __ b(&done);
4129  }
4130
4131  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
4132    __ Bind(&zero);
4133    __ LoadImmediate(out, 0);
4134  }
4135
4136  if (slow_path != nullptr) {
4137    __ Bind(slow_path->GetExitLabel());
4138  }
4139  __ Bind(&done);
4140}
4141
4142void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4143  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4144      instruction, LocationSummary::kCallOnSlowPath);
4145  locations->SetInAt(0, Location::RequiresRegister());
4146  locations->SetInAt(1, Location::RequiresRegister());
4147  locations->AddTemp(Location::RequiresRegister());
4148}
4149
4150void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4151  LocationSummary* locations = instruction->GetLocations();
4152  Register obj = locations->InAt(0).AsRegister<Register>();
4153  Register cls = locations->InAt(1).AsRegister<Register>();
4154  Register temp = locations->GetTemp(0).AsRegister<Register>();
4155  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4156
4157  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4158      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
4159  codegen_->AddSlowPath(slow_path);
4160
4161  // avoid null check if we know obj is not null.
4162  if (instruction->MustDoNullCheck()) {
4163    __ CompareAndBranchIfZero(obj, slow_path->GetExitLabel());
4164  }
4165  // Compare the class of `obj` with `cls`.
4166  __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4167  __ cmp(temp, ShifterOperand(cls));
4168  __ b(slow_path->GetEntryLabel(), NE);
4169  __ Bind(slow_path->GetExitLabel());
4170}
4171
4172void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4173  LocationSummary* locations =
4174      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4175  InvokeRuntimeCallingConvention calling_convention;
4176  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4177}
4178
4179void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4180  codegen_->InvokeRuntime(instruction->IsEnter()
4181        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4182      instruction,
4183      instruction->GetDexPc(),
4184      nullptr);
4185}
4186
4187void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
4188void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
4189void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4190
4191void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4192  LocationSummary* locations =
4193      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4194  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4195         || instruction->GetResultType() == Primitive::kPrimLong);
4196  locations->SetInAt(0, Location::RequiresRegister());
4197  locations->SetInAt(1, Location::RequiresRegister());
4198  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4199}
4200
4201void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4202  HandleBitwiseOperation(instruction);
4203}
4204
4205void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4206  HandleBitwiseOperation(instruction);
4207}
4208
4209void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4210  HandleBitwiseOperation(instruction);
4211}
4212
4213void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4214  LocationSummary* locations = instruction->GetLocations();
4215
4216  if (instruction->GetResultType() == Primitive::kPrimInt) {
4217    Register first = locations->InAt(0).AsRegister<Register>();
4218    Register second = locations->InAt(1).AsRegister<Register>();
4219    Register out = locations->Out().AsRegister<Register>();
4220    if (instruction->IsAnd()) {
4221      __ and_(out, first, ShifterOperand(second));
4222    } else if (instruction->IsOr()) {
4223      __ orr(out, first, ShifterOperand(second));
4224    } else {
4225      DCHECK(instruction->IsXor());
4226      __ eor(out, first, ShifterOperand(second));
4227    }
4228  } else {
4229    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4230    Location first = locations->InAt(0);
4231    Location second = locations->InAt(1);
4232    Location out = locations->Out();
4233    if (instruction->IsAnd()) {
4234      __ and_(out.AsRegisterPairLow<Register>(),
4235              first.AsRegisterPairLow<Register>(),
4236              ShifterOperand(second.AsRegisterPairLow<Register>()));
4237      __ and_(out.AsRegisterPairHigh<Register>(),
4238              first.AsRegisterPairHigh<Register>(),
4239              ShifterOperand(second.AsRegisterPairHigh<Register>()));
4240    } else if (instruction->IsOr()) {
4241      __ orr(out.AsRegisterPairLow<Register>(),
4242             first.AsRegisterPairLow<Register>(),
4243             ShifterOperand(second.AsRegisterPairLow<Register>()));
4244      __ orr(out.AsRegisterPairHigh<Register>(),
4245             first.AsRegisterPairHigh<Register>(),
4246             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4247    } else {
4248      DCHECK(instruction->IsXor());
4249      __ eor(out.AsRegisterPairLow<Register>(),
4250             first.AsRegisterPairLow<Register>(),
4251             ShifterOperand(second.AsRegisterPairLow<Register>()));
4252      __ eor(out.AsRegisterPairHigh<Register>(),
4253             first.AsRegisterPairHigh<Register>(),
4254             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4255    }
4256  }
4257}
4258
4259void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
4260  // TODO: Implement all kinds of calls:
4261  // 1) boot -> boot
4262  // 2) app -> boot
4263  // 3) app -> app
4264  //
4265  // Currently we implement the app -> app logic, which looks up in the resolve cache.
4266
4267  if (invoke->IsStringInit()) {
4268    Register reg = temp.AsRegister<Register>();
4269    // temp = thread->string_init_entrypoint
4270    __ LoadFromOffset(kLoadWord, reg, TR, invoke->GetStringInitOffset());
4271    // LR = temp[offset_of_quick_compiled_code]
4272    __ LoadFromOffset(kLoadWord, LR, reg,
4273                      ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4274                          kArmWordSize).Int32Value());
4275    // LR()
4276    __ blx(LR);
4277  } else if (invoke->IsRecursive()) {
4278    __ bl(GetFrameEntryLabel());
4279  } else {
4280    Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
4281    Register method_reg;
4282    Register reg = temp.AsRegister<Register>();
4283    if (current_method.IsRegister()) {
4284      method_reg = current_method.AsRegister<Register>();
4285    } else {
4286      DCHECK(invoke->GetLocations()->Intrinsified());
4287      DCHECK(!current_method.IsValid());
4288      method_reg = reg;
4289      __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
4290    }
4291    // reg = current_method->dex_cache_resolved_methods_;
4292    __ LoadFromOffset(
4293        kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
4294    // reg = reg[index_in_cache]
4295    __ LoadFromOffset(
4296        kLoadWord, reg, reg, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
4297    // LR = reg[offset_of_quick_compiled_code]
4298    __ LoadFromOffset(kLoadWord, LR, reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4299        kArmWordSize).Int32Value());
4300    // LR()
4301    __ blx(LR);
4302  }
4303
4304  DCHECK(!IsLeafMethod());
4305}
4306
4307void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4308  // Nothing to do, this should be removed during prepare for register allocator.
4309  UNUSED(instruction);
4310  LOG(FATAL) << "Unreachable";
4311}
4312
4313void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4314  // Nothing to do, this should be removed during prepare for register allocator.
4315  UNUSED(instruction);
4316  LOG(FATAL) << "Unreachable";
4317}
4318
4319}  // namespace arm
4320}  // namespace art
4321