code_generator_arm.cc revision fc6a86ab2b70781e72b807c1798b83829ca7f931
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "gc/accounting/card_table.h"
24#include "intrinsics.h"
25#include "intrinsics_arm.h"
26#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "thread.h"
29#include "utils/arm/assembler_arm.h"
30#include "utils/arm/managed_register_arm.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35
36namespace arm {
37
38static bool ExpectedPairLayout(Location location) {
39  // We expected this for both core and fpu register pairs.
40  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
41}
42
43static constexpr int kCurrentMethodStackOffset = 0;
44static constexpr Register kMethodRegisterArgument = R0;
45
46// We unconditionally allocate R5 to ensure we can do long operations
47// with baseline.
48static constexpr Register kCoreSavedRegisterForBaseline = R5;
49static constexpr Register kCoreCalleeSaves[] =
50    { R5, R6, R7, R8, R10, R11, PC };
51static constexpr SRegister kFpuCalleeSaves[] =
52    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
53
54// D31 cannot be split into two S registers, and the register allocator only works on
55// S registers. Therefore there is no need to block it.
56static constexpr DRegister DTMP = D31;
57
58#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
59#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
60
61class NullCheckSlowPathARM : public SlowPathCodeARM {
62 public:
63  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
64
65  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
66    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
67    __ Bind(GetEntryLabel());
68    arm_codegen->InvokeRuntime(
69        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
70  }
71
72  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
73
74 private:
75  HNullCheck* const instruction_;
76  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
77};
78
79class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
80 public:
81  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
82
83  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
84    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
85    __ Bind(GetEntryLabel());
86    arm_codegen->InvokeRuntime(
87        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
88  }
89
90  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
91
92 private:
93  HDivZeroCheck* const instruction_;
94  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
95};
96
97class SuspendCheckSlowPathARM : public SlowPathCodeARM {
98 public:
99  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
100      : instruction_(instruction), successor_(successor) {}
101
102  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
103    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
104    __ Bind(GetEntryLabel());
105    SaveLiveRegisters(codegen, instruction_->GetLocations());
106    arm_codegen->InvokeRuntime(
107        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
108    RestoreLiveRegisters(codegen, instruction_->GetLocations());
109    if (successor_ == nullptr) {
110      __ b(GetReturnLabel());
111    } else {
112      __ b(arm_codegen->GetLabelOf(successor_));
113    }
114  }
115
116  Label* GetReturnLabel() {
117    DCHECK(successor_ == nullptr);
118    return &return_label_;
119  }
120
121  HBasicBlock* GetSuccessor() const {
122    return successor_;
123  }
124
125  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; }
126
127 private:
128  HSuspendCheck* const instruction_;
129  // If not null, the block to branch to after the suspend check.
130  HBasicBlock* const successor_;
131
132  // If `successor_` is null, the label to branch to after the suspend check.
133  Label return_label_;
134
135  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
136};
137
138class BoundsCheckSlowPathARM : public SlowPathCodeARM {
139 public:
140  BoundsCheckSlowPathARM(HBoundsCheck* instruction,
141                         Location index_location,
142                         Location length_location)
143      : instruction_(instruction),
144        index_location_(index_location),
145        length_location_(length_location) {}
146
147  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
148    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
149    __ Bind(GetEntryLabel());
150    // We're moving two locations to locations that could overlap, so we need a parallel
151    // move resolver.
152    InvokeRuntimeCallingConvention calling_convention;
153    codegen->EmitParallelMoves(
154        index_location_,
155        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
156        Primitive::kPrimInt,
157        length_location_,
158        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
159        Primitive::kPrimInt);
160    arm_codegen->InvokeRuntime(
161        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
162  }
163
164  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
165
166 private:
167  HBoundsCheck* const instruction_;
168  const Location index_location_;
169  const Location length_location_;
170
171  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
172};
173
174class LoadClassSlowPathARM : public SlowPathCodeARM {
175 public:
176  LoadClassSlowPathARM(HLoadClass* cls,
177                       HInstruction* at,
178                       uint32_t dex_pc,
179                       bool do_clinit)
180      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
181    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
182  }
183
184  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
185    LocationSummary* locations = at_->GetLocations();
186
187    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
188    __ Bind(GetEntryLabel());
189    SaveLiveRegisters(codegen, locations);
190
191    InvokeRuntimeCallingConvention calling_convention;
192    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
193    int32_t entry_point_offset = do_clinit_
194        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
195        : QUICK_ENTRY_POINT(pInitializeType);
196    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
197
198    // Move the class to the desired location.
199    Location out = locations->Out();
200    if (out.IsValid()) {
201      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
202      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
203    }
204    RestoreLiveRegisters(codegen, locations);
205    __ b(GetExitLabel());
206  }
207
208  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM"; }
209
210 private:
211  // The class this slow path will load.
212  HLoadClass* const cls_;
213
214  // The instruction where this slow path is happening.
215  // (Might be the load class or an initialization check).
216  HInstruction* const at_;
217
218  // The dex PC of `at_`.
219  const uint32_t dex_pc_;
220
221  // Whether to initialize the class.
222  const bool do_clinit_;
223
224  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
225};
226
227class LoadStringSlowPathARM : public SlowPathCodeARM {
228 public:
229  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
230
231  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
232    LocationSummary* locations = instruction_->GetLocations();
233    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
234
235    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
236    __ Bind(GetEntryLabel());
237    SaveLiveRegisters(codegen, locations);
238
239    InvokeRuntimeCallingConvention calling_convention;
240    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
241    arm_codegen->InvokeRuntime(
242        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
243    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
244
245    RestoreLiveRegisters(codegen, locations);
246    __ b(GetExitLabel());
247  }
248
249  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
250
251 private:
252  HLoadString* const instruction_;
253
254  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
255};
256
257class TypeCheckSlowPathARM : public SlowPathCodeARM {
258 public:
259  TypeCheckSlowPathARM(HInstruction* instruction,
260                       Location class_to_check,
261                       Location object_class,
262                       uint32_t dex_pc)
263      : instruction_(instruction),
264        class_to_check_(class_to_check),
265        object_class_(object_class),
266        dex_pc_(dex_pc) {}
267
268  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
269    LocationSummary* locations = instruction_->GetLocations();
270    DCHECK(instruction_->IsCheckCast()
271           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
272
273    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
274    __ Bind(GetEntryLabel());
275    SaveLiveRegisters(codegen, locations);
276
277    // We're moving two locations to locations that could overlap, so we need a parallel
278    // move resolver.
279    InvokeRuntimeCallingConvention calling_convention;
280    codegen->EmitParallelMoves(
281        class_to_check_,
282        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
283        Primitive::kPrimNot,
284        object_class_,
285        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
286        Primitive::kPrimNot);
287
288    if (instruction_->IsInstanceOf()) {
289      arm_codegen->InvokeRuntime(
290          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
291      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
292    } else {
293      DCHECK(instruction_->IsCheckCast());
294      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
295    }
296
297    RestoreLiveRegisters(codegen, locations);
298    __ b(GetExitLabel());
299  }
300
301  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; }
302
303 private:
304  HInstruction* const instruction_;
305  const Location class_to_check_;
306  const Location object_class_;
307  uint32_t dex_pc_;
308
309  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
310};
311
312class DeoptimizationSlowPathARM : public SlowPathCodeARM {
313 public:
314  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
315    : instruction_(instruction) {}
316
317  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
318    __ Bind(GetEntryLabel());
319    SaveLiveRegisters(codegen, instruction_->GetLocations());
320    DCHECK(instruction_->IsDeoptimize());
321    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
322    uint32_t dex_pc = deoptimize->GetDexPc();
323    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
324    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
325  }
326
327  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
328
329 private:
330  HInstruction* const instruction_;
331  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
332};
333
334#undef __
335
336#undef __
337#define __ down_cast<ArmAssembler*>(GetAssembler())->
338
339inline Condition ARMCondition(IfCondition cond) {
340  switch (cond) {
341    case kCondEQ: return EQ;
342    case kCondNE: return NE;
343    case kCondLT: return LT;
344    case kCondLE: return LE;
345    case kCondGT: return GT;
346    case kCondGE: return GE;
347    default:
348      LOG(FATAL) << "Unknown if condition";
349  }
350  return EQ;        // Unreachable.
351}
352
353inline Condition ARMOppositeCondition(IfCondition cond) {
354  switch (cond) {
355    case kCondEQ: return NE;
356    case kCondNE: return EQ;
357    case kCondLT: return GE;
358    case kCondLE: return GT;
359    case kCondGT: return LE;
360    case kCondGE: return LT;
361    default:
362      LOG(FATAL) << "Unknown if condition";
363  }
364  return EQ;        // Unreachable.
365}
366
367void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
368  stream << Register(reg);
369}
370
371void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
372  stream << SRegister(reg);
373}
374
375size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
376  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
377  return kArmWordSize;
378}
379
380size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
381  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
382  return kArmWordSize;
383}
384
385size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
386  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
387  return kArmWordSize;
388}
389
390size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
391  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
392  return kArmWordSize;
393}
394
395CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
396                                   const ArmInstructionSetFeatures& isa_features,
397                                   const CompilerOptions& compiler_options)
398    : CodeGenerator(graph,
399                    kNumberOfCoreRegisters,
400                    kNumberOfSRegisters,
401                    kNumberOfRegisterPairs,
402                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
403                                        arraysize(kCoreCalleeSaves)),
404                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
405                                        arraysize(kFpuCalleeSaves)),
406                    compiler_options),
407      block_labels_(graph->GetArena(), 0),
408      location_builder_(graph, this),
409      instruction_visitor_(graph, this),
410      move_resolver_(graph->GetArena(), this),
411      assembler_(),
412      isa_features_(isa_features) {
413  // Save the PC register to mimic Quick.
414  AddAllocatedRegister(Location::RegisterLocation(PC));
415}
416
417void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
418  // Ensure that we fix up branches and literal loads and emit the literal pool.
419  __ FinalizeCode();
420
421  // Adjust native pc offsets in stack maps.
422  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
423    uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
424    uint32_t new_position = __ GetAdjustedPosition(old_position);
425    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
426  }
427  // Adjust native pc offsets of block labels.
428  for (size_t block_idx = 0u, end = block_order_->Size(); block_idx != end; ++block_idx) {
429    HBasicBlock* block = block_order_->Get(block_idx);
430    // Get the label directly from block_labels_ rather than through GetLabelOf() to avoid
431    // FirstNonEmptyBlock() which could lead to adjusting a label more than once.
432    DCHECK_LT(static_cast<size_t>(block->GetBlockId()), block_labels_.Size());
433    Label* block_label = &block_labels_.GetRawStorage()[block->GetBlockId()];
434    DCHECK_EQ(block_label->IsBound(), !block->IsSingleJump());
435    if (block_label->IsBound()) {
436      __ AdjustLabelPosition(block_label);
437    }
438  }
439  // Adjust pc offsets for the disassembly information.
440  if (disasm_info_ != nullptr) {
441    GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
442    frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
443    frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
444    for (auto& it : *disasm_info_->GetInstructionIntervals()) {
445      it.second.start = __ GetAdjustedPosition(it.second.start);
446      it.second.end = __ GetAdjustedPosition(it.second.end);
447    }
448    for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
449      it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
450      it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
451    }
452  }
453
454  CodeGenerator::Finalize(allocator);
455}
456
457Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
458  switch (type) {
459    case Primitive::kPrimLong: {
460      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
461      ArmManagedRegister pair =
462          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
463      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
464      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
465
466      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
467      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
468      UpdateBlockedPairRegisters();
469      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
470    }
471
472    case Primitive::kPrimByte:
473    case Primitive::kPrimBoolean:
474    case Primitive::kPrimChar:
475    case Primitive::kPrimShort:
476    case Primitive::kPrimInt:
477    case Primitive::kPrimNot: {
478      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
479      // Block all register pairs that contain `reg`.
480      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
481        ArmManagedRegister current =
482            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
483        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
484          blocked_register_pairs_[i] = true;
485        }
486      }
487      return Location::RegisterLocation(reg);
488    }
489
490    case Primitive::kPrimFloat: {
491      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
492      return Location::FpuRegisterLocation(reg);
493    }
494
495    case Primitive::kPrimDouble: {
496      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
497      DCHECK_EQ(reg % 2, 0);
498      return Location::FpuRegisterPairLocation(reg, reg + 1);
499    }
500
501    case Primitive::kPrimVoid:
502      LOG(FATAL) << "Unreachable type " << type;
503  }
504
505  return Location();
506}
507
508void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
509  // Don't allocate the dalvik style register pair passing.
510  blocked_register_pairs_[R1_R2] = true;
511
512  // Stack register, LR and PC are always reserved.
513  blocked_core_registers_[SP] = true;
514  blocked_core_registers_[LR] = true;
515  blocked_core_registers_[PC] = true;
516
517  // Reserve thread register.
518  blocked_core_registers_[TR] = true;
519
520  // Reserve temp register.
521  blocked_core_registers_[IP] = true;
522
523  if (is_baseline) {
524    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
525      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
526    }
527
528    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
529
530    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
531      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
532    }
533  }
534
535  UpdateBlockedPairRegisters();
536}
537
538void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
539  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
540    ArmManagedRegister current =
541        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
542    if (blocked_core_registers_[current.AsRegisterPairLow()]
543        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
544      blocked_register_pairs_[i] = true;
545    }
546  }
547}
548
549InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
550      : HGraphVisitor(graph),
551        assembler_(codegen->GetAssembler()),
552        codegen_(codegen) {}
553
554void CodeGeneratorARM::ComputeSpillMask() {
555  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
556  // Save one extra register for baseline. Note that on thumb2, there is no easy
557  // instruction to restore just the PC, so this actually helps both baseline
558  // and non-baseline to save and restore at least two registers at entry and exit.
559  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
560  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
561  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
562  // We use vpush and vpop for saving and restoring floating point registers, which take
563  // a SRegister and the number of registers to save/restore after that SRegister. We
564  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
565  // but in the range.
566  if (fpu_spill_mask_ != 0) {
567    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
568    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
569    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
570      fpu_spill_mask_ |= (1 << i);
571    }
572  }
573}
574
575static dwarf::Reg DWARFReg(Register reg) {
576  return dwarf::Reg::ArmCore(static_cast<int>(reg));
577}
578
579static dwarf::Reg DWARFReg(SRegister reg) {
580  return dwarf::Reg::ArmFp(static_cast<int>(reg));
581}
582
583void CodeGeneratorARM::GenerateFrameEntry() {
584  bool skip_overflow_check =
585      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
586  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
587  __ Bind(&frame_entry_label_);
588
589  if (HasEmptyFrame()) {
590    return;
591  }
592
593  if (!skip_overflow_check) {
594    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
595    __ LoadFromOffset(kLoadWord, IP, IP, 0);
596    RecordPcInfo(nullptr, 0);
597  }
598
599  // PC is in the list of callee-save to mimic Quick, but we need to push
600  // LR at entry instead.
601  uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
602  __ PushList(push_mask);
603  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
604  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
605  if (fpu_spill_mask_ != 0) {
606    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
607    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
608    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
609    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
610  }
611  int adjust = GetFrameSize() - FrameEntrySpillSize();
612  __ AddConstant(SP, -adjust);
613  __ cfi().AdjustCFAOffset(adjust);
614  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
615}
616
617void CodeGeneratorARM::GenerateFrameExit() {
618  if (HasEmptyFrame()) {
619    __ bx(LR);
620    return;
621  }
622  __ cfi().RememberState();
623  int adjust = GetFrameSize() - FrameEntrySpillSize();
624  __ AddConstant(SP, adjust);
625  __ cfi().AdjustCFAOffset(-adjust);
626  if (fpu_spill_mask_ != 0) {
627    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
628    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
629    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
630    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
631  }
632  __ PopList(core_spill_mask_);
633  __ cfi().RestoreState();
634  __ cfi().DefCFAOffset(GetFrameSize());
635}
636
637void CodeGeneratorARM::Bind(HBasicBlock* block) {
638  __ Bind(GetLabelOf(block));
639}
640
641Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
642  switch (load->GetType()) {
643    case Primitive::kPrimLong:
644    case Primitive::kPrimDouble:
645      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
646
647    case Primitive::kPrimInt:
648    case Primitive::kPrimNot:
649    case Primitive::kPrimFloat:
650      return Location::StackSlot(GetStackSlot(load->GetLocal()));
651
652    case Primitive::kPrimBoolean:
653    case Primitive::kPrimByte:
654    case Primitive::kPrimChar:
655    case Primitive::kPrimShort:
656    case Primitive::kPrimVoid:
657      LOG(FATAL) << "Unexpected type " << load->GetType();
658      UNREACHABLE();
659  }
660
661  LOG(FATAL) << "Unreachable";
662  UNREACHABLE();
663}
664
665Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
666  switch (type) {
667    case Primitive::kPrimBoolean:
668    case Primitive::kPrimByte:
669    case Primitive::kPrimChar:
670    case Primitive::kPrimShort:
671    case Primitive::kPrimInt:
672    case Primitive::kPrimNot: {
673      uint32_t index = gp_index_++;
674      uint32_t stack_index = stack_index_++;
675      if (index < calling_convention.GetNumberOfRegisters()) {
676        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
677      } else {
678        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
679      }
680    }
681
682    case Primitive::kPrimLong: {
683      uint32_t index = gp_index_;
684      uint32_t stack_index = stack_index_;
685      gp_index_ += 2;
686      stack_index_ += 2;
687      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
688        if (calling_convention.GetRegisterAt(index) == R1) {
689          // Skip R1, and use R2_R3 instead.
690          gp_index_++;
691          index++;
692        }
693      }
694      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
695        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
696                  calling_convention.GetRegisterAt(index + 1));
697        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
698                                              calling_convention.GetRegisterAt(index + 1));
699      } else {
700        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
701      }
702    }
703
704    case Primitive::kPrimFloat: {
705      uint32_t stack_index = stack_index_++;
706      if (float_index_ % 2 == 0) {
707        float_index_ = std::max(double_index_, float_index_);
708      }
709      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
710        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
711      } else {
712        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
713      }
714    }
715
716    case Primitive::kPrimDouble: {
717      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
718      uint32_t stack_index = stack_index_;
719      stack_index_ += 2;
720      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
721        uint32_t index = double_index_;
722        double_index_ += 2;
723        Location result = Location::FpuRegisterPairLocation(
724          calling_convention.GetFpuRegisterAt(index),
725          calling_convention.GetFpuRegisterAt(index + 1));
726        DCHECK(ExpectedPairLayout(result));
727        return result;
728      } else {
729        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
730      }
731    }
732
733    case Primitive::kPrimVoid:
734      LOG(FATAL) << "Unexpected parameter type " << type;
735      break;
736  }
737  return Location();
738}
739
740Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
741  switch (type) {
742    case Primitive::kPrimBoolean:
743    case Primitive::kPrimByte:
744    case Primitive::kPrimChar:
745    case Primitive::kPrimShort:
746    case Primitive::kPrimInt:
747    case Primitive::kPrimNot: {
748      return Location::RegisterLocation(R0);
749    }
750
751    case Primitive::kPrimFloat: {
752      return Location::FpuRegisterLocation(S0);
753    }
754
755    case Primitive::kPrimLong: {
756      return Location::RegisterPairLocation(R0, R1);
757    }
758
759    case Primitive::kPrimDouble: {
760      return Location::FpuRegisterPairLocation(S0, S1);
761    }
762
763    case Primitive::kPrimVoid:
764      return Location();
765  }
766
767  UNREACHABLE();
768}
769
770Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
771  return Location::RegisterLocation(kMethodRegisterArgument);
772}
773
774void CodeGeneratorARM::Move32(Location destination, Location source) {
775  if (source.Equals(destination)) {
776    return;
777  }
778  if (destination.IsRegister()) {
779    if (source.IsRegister()) {
780      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
781    } else if (source.IsFpuRegister()) {
782      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
783    } else {
784      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
785    }
786  } else if (destination.IsFpuRegister()) {
787    if (source.IsRegister()) {
788      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
789    } else if (source.IsFpuRegister()) {
790      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
791    } else {
792      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
793    }
794  } else {
795    DCHECK(destination.IsStackSlot()) << destination;
796    if (source.IsRegister()) {
797      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
798    } else if (source.IsFpuRegister()) {
799      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
800    } else {
801      DCHECK(source.IsStackSlot()) << source;
802      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
803      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
804    }
805  }
806}
807
808void CodeGeneratorARM::Move64(Location destination, Location source) {
809  if (source.Equals(destination)) {
810    return;
811  }
812  if (destination.IsRegisterPair()) {
813    if (source.IsRegisterPair()) {
814      EmitParallelMoves(
815          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
816          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
817          Primitive::kPrimInt,
818          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
819          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
820          Primitive::kPrimInt);
821    } else if (source.IsFpuRegister()) {
822      UNIMPLEMENTED(FATAL);
823    } else {
824      DCHECK(source.IsDoubleStackSlot());
825      DCHECK(ExpectedPairLayout(destination));
826      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
827                        SP, source.GetStackIndex());
828    }
829  } else if (destination.IsFpuRegisterPair()) {
830    if (source.IsDoubleStackSlot()) {
831      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
832                         SP,
833                         source.GetStackIndex());
834    } else {
835      UNIMPLEMENTED(FATAL);
836    }
837  } else {
838    DCHECK(destination.IsDoubleStackSlot());
839    if (source.IsRegisterPair()) {
840      // No conflict possible, so just do the moves.
841      if (source.AsRegisterPairLow<Register>() == R1) {
842        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
843        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
844        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
845      } else {
846        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
847                         SP, destination.GetStackIndex());
848      }
849    } else if (source.IsFpuRegisterPair()) {
850      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
851                        SP,
852                        destination.GetStackIndex());
853    } else {
854      DCHECK(source.IsDoubleStackSlot());
855      EmitParallelMoves(
856          Location::StackSlot(source.GetStackIndex()),
857          Location::StackSlot(destination.GetStackIndex()),
858          Primitive::kPrimInt,
859          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
860          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
861          Primitive::kPrimInt);
862    }
863  }
864}
865
866void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
867  LocationSummary* locations = instruction->GetLocations();
868  if (instruction->IsCurrentMethod()) {
869    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
870  } else if (locations != nullptr && locations->Out().Equals(location)) {
871    return;
872  } else if (locations != nullptr && locations->Out().IsConstant()) {
873    HConstant* const_to_move = locations->Out().GetConstant();
874    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
875      int32_t value = GetInt32ValueOf(const_to_move);
876      if (location.IsRegister()) {
877        __ LoadImmediate(location.AsRegister<Register>(), value);
878      } else {
879        DCHECK(location.IsStackSlot());
880        __ LoadImmediate(IP, value);
881        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
882      }
883    } else {
884      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
885      int64_t value = const_to_move->AsLongConstant()->GetValue();
886      if (location.IsRegisterPair()) {
887        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
888        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
889      } else {
890        DCHECK(location.IsDoubleStackSlot());
891        __ LoadImmediate(IP, Low32Bits(value));
892        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
893        __ LoadImmediate(IP, High32Bits(value));
894        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
895      }
896    }
897  } else if (instruction->IsLoadLocal()) {
898    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
899    switch (instruction->GetType()) {
900      case Primitive::kPrimBoolean:
901      case Primitive::kPrimByte:
902      case Primitive::kPrimChar:
903      case Primitive::kPrimShort:
904      case Primitive::kPrimInt:
905      case Primitive::kPrimNot:
906      case Primitive::kPrimFloat:
907        Move32(location, Location::StackSlot(stack_slot));
908        break;
909
910      case Primitive::kPrimLong:
911      case Primitive::kPrimDouble:
912        Move64(location, Location::DoubleStackSlot(stack_slot));
913        break;
914
915      default:
916        LOG(FATAL) << "Unexpected type " << instruction->GetType();
917    }
918  } else if (instruction->IsTemporary()) {
919    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
920    if (temp_location.IsStackSlot()) {
921      Move32(location, temp_location);
922    } else {
923      DCHECK(temp_location.IsDoubleStackSlot());
924      Move64(location, temp_location);
925    }
926  } else {
927    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
928    switch (instruction->GetType()) {
929      case Primitive::kPrimBoolean:
930      case Primitive::kPrimByte:
931      case Primitive::kPrimChar:
932      case Primitive::kPrimShort:
933      case Primitive::kPrimNot:
934      case Primitive::kPrimInt:
935      case Primitive::kPrimFloat:
936        Move32(location, locations->Out());
937        break;
938
939      case Primitive::kPrimLong:
940      case Primitive::kPrimDouble:
941        Move64(location, locations->Out());
942        break;
943
944      default:
945        LOG(FATAL) << "Unexpected type " << instruction->GetType();
946    }
947  }
948}
949
950void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
951                                     HInstruction* instruction,
952                                     uint32_t dex_pc,
953                                     SlowPathCode* slow_path) {
954  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
955  __ blx(LR);
956  RecordPcInfo(instruction, dex_pc, slow_path);
957  DCHECK(instruction->IsSuspendCheck()
958      || instruction->IsBoundsCheck()
959      || instruction->IsNullCheck()
960      || instruction->IsDivZeroCheck()
961      || instruction->GetLocations()->CanCall()
962      || !IsLeafMethod());
963}
964
965void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) {
966  DCHECK(!successor->IsExitBlock());
967
968  HBasicBlock* block = got->GetBlock();
969  HInstruction* previous = got->GetPrevious();
970
971  HLoopInformation* info = block->GetLoopInformation();
972  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
973    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
974    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
975    return;
976  }
977
978  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
979    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
980  }
981  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
982    __ b(codegen_->GetLabelOf(successor));
983  }
984}
985
986void LocationsBuilderARM::VisitGoto(HGoto* got) {
987  got->SetLocations(nullptr);
988}
989
990void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
991  HandleGoto(got, got->GetSuccessor());
992}
993
994void LocationsBuilderARM::VisitTryBoundary(HTryBoundary* try_boundary) {
995  try_boundary->SetLocations(nullptr);
996}
997
998void InstructionCodeGeneratorARM::VisitTryBoundary(HTryBoundary* try_boundary) {
999  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1000  if (!successor->IsExitBlock()) {
1001    HandleGoto(try_boundary, successor);
1002  }
1003}
1004
1005void LocationsBuilderARM::VisitExit(HExit* exit) {
1006  exit->SetLocations(nullptr);
1007}
1008
1009void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
1010  UNUSED(exit);
1011}
1012
1013void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
1014                                                        Label* true_target,
1015                                                        Label* false_target,
1016                                                        Label* always_true_target) {
1017  HInstruction* cond = instruction->InputAt(0);
1018  if (cond->IsIntConstant()) {
1019    // Constant condition, statically compared against 1.
1020    int32_t cond_value = cond->AsIntConstant()->GetValue();
1021    if (cond_value == 1) {
1022      if (always_true_target != nullptr) {
1023        __ b(always_true_target);
1024      }
1025      return;
1026    } else {
1027      DCHECK_EQ(cond_value, 0);
1028    }
1029  } else {
1030    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1031      // Condition has been materialized, compare the output to 0
1032      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
1033      __ cmp(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
1034             ShifterOperand(0));
1035      __ b(true_target, NE);
1036    } else {
1037      // Condition has not been materialized, use its inputs as the
1038      // comparison and its condition as the branch condition.
1039      LocationSummary* locations = cond->GetLocations();
1040      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
1041      Register left = locations->InAt(0).AsRegister<Register>();
1042      if (locations->InAt(1).IsRegister()) {
1043        __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1044      } else {
1045        DCHECK(locations->InAt(1).IsConstant());
1046        HConstant* constant = locations->InAt(1).GetConstant();
1047        int32_t value = CodeGenerator::GetInt32ValueOf(constant);
1048        ShifterOperand operand;
1049        if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1050          __ cmp(left, operand);
1051        } else {
1052          Register temp = IP;
1053          __ LoadImmediate(temp, value);
1054          __ cmp(left, ShifterOperand(temp));
1055        }
1056      }
1057      __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
1058    }
1059  }
1060  if (false_target != nullptr) {
1061    __ b(false_target);
1062  }
1063}
1064
1065void LocationsBuilderARM::VisitIf(HIf* if_instr) {
1066  LocationSummary* locations =
1067      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
1068  HInstruction* cond = if_instr->InputAt(0);
1069  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1070    locations->SetInAt(0, Location::RequiresRegister());
1071  }
1072}
1073
1074void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1075  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1076  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1077  Label* always_true_target = true_target;
1078  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1079                                if_instr->IfTrueSuccessor())) {
1080    always_true_target = nullptr;
1081  }
1082  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1083                                if_instr->IfFalseSuccessor())) {
1084    false_target = nullptr;
1085  }
1086  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1087}
1088
1089void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1090  LocationSummary* locations = new (GetGraph()->GetArena())
1091      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1092  HInstruction* cond = deoptimize->InputAt(0);
1093  DCHECK(cond->IsCondition());
1094  if (cond->AsCondition()->NeedsMaterialization()) {
1095    locations->SetInAt(0, Location::RequiresRegister());
1096  }
1097}
1098
1099void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1100  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
1101      DeoptimizationSlowPathARM(deoptimize);
1102  codegen_->AddSlowPath(slow_path);
1103  Label* slow_path_entry = slow_path->GetEntryLabel();
1104  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1105}
1106
1107void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1108  LocationSummary* locations =
1109      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1110  locations->SetInAt(0, Location::RequiresRegister());
1111  locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1112  if (cond->NeedsMaterialization()) {
1113    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1114  }
1115}
1116
1117void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1118  if (!cond->NeedsMaterialization()) return;
1119  LocationSummary* locations = cond->GetLocations();
1120  Register left = locations->InAt(0).AsRegister<Register>();
1121
1122  if (locations->InAt(1).IsRegister()) {
1123    __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>()));
1124  } else {
1125    DCHECK(locations->InAt(1).IsConstant());
1126    int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant());
1127    ShifterOperand operand;
1128    if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) {
1129      __ cmp(left, operand);
1130    } else {
1131      Register temp = IP;
1132      __ LoadImmediate(temp, value);
1133      __ cmp(left, ShifterOperand(temp));
1134    }
1135  }
1136  __ it(ARMCondition(cond->GetCondition()), kItElse);
1137  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1138         ARMCondition(cond->GetCondition()));
1139  __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1140         ARMOppositeCondition(cond->GetCondition()));
1141}
1142
1143void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1144  VisitCondition(comp);
1145}
1146
1147void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1148  VisitCondition(comp);
1149}
1150
1151void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1152  VisitCondition(comp);
1153}
1154
1155void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1156  VisitCondition(comp);
1157}
1158
1159void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1160  VisitCondition(comp);
1161}
1162
1163void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1164  VisitCondition(comp);
1165}
1166
1167void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1168  VisitCondition(comp);
1169}
1170
1171void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1172  VisitCondition(comp);
1173}
1174
1175void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1176  VisitCondition(comp);
1177}
1178
1179void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1180  VisitCondition(comp);
1181}
1182
1183void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1184  VisitCondition(comp);
1185}
1186
1187void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1188  VisitCondition(comp);
1189}
1190
1191void LocationsBuilderARM::VisitLocal(HLocal* local) {
1192  local->SetLocations(nullptr);
1193}
1194
1195void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1196  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1197}
1198
1199void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1200  load->SetLocations(nullptr);
1201}
1202
1203void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1204  // Nothing to do, this is driven by the code generator.
1205  UNUSED(load);
1206}
1207
1208void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1209  LocationSummary* locations =
1210      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1211  switch (store->InputAt(1)->GetType()) {
1212    case Primitive::kPrimBoolean:
1213    case Primitive::kPrimByte:
1214    case Primitive::kPrimChar:
1215    case Primitive::kPrimShort:
1216    case Primitive::kPrimInt:
1217    case Primitive::kPrimNot:
1218    case Primitive::kPrimFloat:
1219      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1220      break;
1221
1222    case Primitive::kPrimLong:
1223    case Primitive::kPrimDouble:
1224      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1225      break;
1226
1227    default:
1228      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1229  }
1230}
1231
1232void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1233  UNUSED(store);
1234}
1235
1236void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1237  LocationSummary* locations =
1238      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1239  locations->SetOut(Location::ConstantLocation(constant));
1240}
1241
1242void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1243  // Will be generated at use site.
1244  UNUSED(constant);
1245}
1246
1247void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1248  LocationSummary* locations =
1249      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1250  locations->SetOut(Location::ConstantLocation(constant));
1251}
1252
1253void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1254  // Will be generated at use site.
1255  UNUSED(constant);
1256}
1257
1258void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1259  LocationSummary* locations =
1260      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1261  locations->SetOut(Location::ConstantLocation(constant));
1262}
1263
1264void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1265  // Will be generated at use site.
1266  UNUSED(constant);
1267}
1268
1269void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1270  LocationSummary* locations =
1271      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1272  locations->SetOut(Location::ConstantLocation(constant));
1273}
1274
1275void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1276  // Will be generated at use site.
1277  UNUSED(constant);
1278}
1279
1280void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1281  LocationSummary* locations =
1282      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1283  locations->SetOut(Location::ConstantLocation(constant));
1284}
1285
1286void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1287  // Will be generated at use site.
1288  UNUSED(constant);
1289}
1290
1291void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1292  memory_barrier->SetLocations(nullptr);
1293}
1294
1295void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1296  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1297}
1298
1299void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1300  ret->SetLocations(nullptr);
1301}
1302
1303void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1304  UNUSED(ret);
1305  codegen_->GenerateFrameExit();
1306}
1307
1308void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1309  LocationSummary* locations =
1310      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1311  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1312}
1313
1314void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1315  UNUSED(ret);
1316  codegen_->GenerateFrameExit();
1317}
1318
1319void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1320  // When we do not run baseline, explicit clinit checks triggered by static
1321  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1322  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1323
1324  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1325                                         codegen_->GetInstructionSetFeatures());
1326  if (intrinsic.TryDispatch(invoke)) {
1327    return;
1328  }
1329
1330  HandleInvoke(invoke);
1331}
1332
1333static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1334  if (invoke->GetLocations()->Intrinsified()) {
1335    IntrinsicCodeGeneratorARM intrinsic(codegen);
1336    intrinsic.Dispatch(invoke);
1337    return true;
1338  }
1339  return false;
1340}
1341
1342void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1343  // When we do not run baseline, explicit clinit checks triggered by static
1344  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1345  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1346
1347  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1348    return;
1349  }
1350
1351  LocationSummary* locations = invoke->GetLocations();
1352  codegen_->GenerateStaticOrDirectCall(
1353      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
1354  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1355}
1356
1357void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1358  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1359  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
1360}
1361
1362void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1363  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1364                                         codegen_->GetInstructionSetFeatures());
1365  if (intrinsic.TryDispatch(invoke)) {
1366    return;
1367  }
1368
1369  HandleInvoke(invoke);
1370}
1371
1372void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1373  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1374    return;
1375  }
1376
1377  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1378  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
1379      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
1380  LocationSummary* locations = invoke->GetLocations();
1381  Location receiver = locations->InAt(0);
1382  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1383  // temp = object->GetClass();
1384  DCHECK(receiver.IsRegister());
1385  __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1386  codegen_->MaybeRecordImplicitNullCheck(invoke);
1387  // temp = temp->GetMethodAt(method_offset);
1388  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1389      kArmWordSize).Int32Value();
1390  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1391  // LR = temp->GetEntryPoint();
1392  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1393  // LR();
1394  __ blx(LR);
1395  DCHECK(!codegen_->IsLeafMethod());
1396  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1397}
1398
1399void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1400  HandleInvoke(invoke);
1401  // Add the hidden argument.
1402  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1403}
1404
1405void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1406  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1407  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1408  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1409      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1410  LocationSummary* locations = invoke->GetLocations();
1411  Location receiver = locations->InAt(0);
1412  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1413
1414  // Set the hidden argument.
1415  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1416                   invoke->GetDexMethodIndex());
1417
1418  // temp = object->GetClass();
1419  if (receiver.IsStackSlot()) {
1420    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1421    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1422  } else {
1423    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1424  }
1425  codegen_->MaybeRecordImplicitNullCheck(invoke);
1426  // temp = temp->GetImtEntryAt(method_offset);
1427  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1428      kArmWordSize).Int32Value();
1429  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1430  // LR = temp->GetEntryPoint();
1431  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1432  // LR();
1433  __ blx(LR);
1434  DCHECK(!codegen_->IsLeafMethod());
1435  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1436}
1437
1438void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1439  LocationSummary* locations =
1440      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1441  switch (neg->GetResultType()) {
1442    case Primitive::kPrimInt: {
1443      locations->SetInAt(0, Location::RequiresRegister());
1444      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1445      break;
1446    }
1447    case Primitive::kPrimLong: {
1448      locations->SetInAt(0, Location::RequiresRegister());
1449      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1450      break;
1451    }
1452
1453    case Primitive::kPrimFloat:
1454    case Primitive::kPrimDouble:
1455      locations->SetInAt(0, Location::RequiresFpuRegister());
1456      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1457      break;
1458
1459    default:
1460      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1461  }
1462}
1463
1464void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1465  LocationSummary* locations = neg->GetLocations();
1466  Location out = locations->Out();
1467  Location in = locations->InAt(0);
1468  switch (neg->GetResultType()) {
1469    case Primitive::kPrimInt:
1470      DCHECK(in.IsRegister());
1471      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1472      break;
1473
1474    case Primitive::kPrimLong:
1475      DCHECK(in.IsRegisterPair());
1476      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1477      __ rsbs(out.AsRegisterPairLow<Register>(),
1478              in.AsRegisterPairLow<Register>(),
1479              ShifterOperand(0));
1480      // We cannot emit an RSC (Reverse Subtract with Carry)
1481      // instruction here, as it does not exist in the Thumb-2
1482      // instruction set.  We use the following approach
1483      // using SBC and SUB instead.
1484      //
1485      // out.hi = -C
1486      __ sbc(out.AsRegisterPairHigh<Register>(),
1487             out.AsRegisterPairHigh<Register>(),
1488             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1489      // out.hi = out.hi - in.hi
1490      __ sub(out.AsRegisterPairHigh<Register>(),
1491             out.AsRegisterPairHigh<Register>(),
1492             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1493      break;
1494
1495    case Primitive::kPrimFloat:
1496      DCHECK(in.IsFpuRegister());
1497      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1498      break;
1499
1500    case Primitive::kPrimDouble:
1501      DCHECK(in.IsFpuRegisterPair());
1502      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1503               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1504      break;
1505
1506    default:
1507      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1508  }
1509}
1510
1511void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1512  Primitive::Type result_type = conversion->GetResultType();
1513  Primitive::Type input_type = conversion->GetInputType();
1514  DCHECK_NE(result_type, input_type);
1515
1516  // The float-to-long, double-to-long and long-to-float type conversions
1517  // rely on a call to the runtime.
1518  LocationSummary::CallKind call_kind =
1519      (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1520        && result_type == Primitive::kPrimLong)
1521       || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
1522      ? LocationSummary::kCall
1523      : LocationSummary::kNoCall;
1524  LocationSummary* locations =
1525      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1526
1527  // The Java language does not allow treating boolean as an integral type but
1528  // our bit representation makes it safe.
1529
1530  switch (result_type) {
1531    case Primitive::kPrimByte:
1532      switch (input_type) {
1533        case Primitive::kPrimBoolean:
1534          // Boolean input is a result of code transformations.
1535        case Primitive::kPrimShort:
1536        case Primitive::kPrimInt:
1537        case Primitive::kPrimChar:
1538          // Processing a Dex `int-to-byte' instruction.
1539          locations->SetInAt(0, Location::RequiresRegister());
1540          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1541          break;
1542
1543        default:
1544          LOG(FATAL) << "Unexpected type conversion from " << input_type
1545                     << " to " << result_type;
1546      }
1547      break;
1548
1549    case Primitive::kPrimShort:
1550      switch (input_type) {
1551        case Primitive::kPrimBoolean:
1552          // Boolean input is a result of code transformations.
1553        case Primitive::kPrimByte:
1554        case Primitive::kPrimInt:
1555        case Primitive::kPrimChar:
1556          // Processing a Dex `int-to-short' instruction.
1557          locations->SetInAt(0, Location::RequiresRegister());
1558          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1559          break;
1560
1561        default:
1562          LOG(FATAL) << "Unexpected type conversion from " << input_type
1563                     << " to " << result_type;
1564      }
1565      break;
1566
1567    case Primitive::kPrimInt:
1568      switch (input_type) {
1569        case Primitive::kPrimLong:
1570          // Processing a Dex `long-to-int' instruction.
1571          locations->SetInAt(0, Location::Any());
1572          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1573          break;
1574
1575        case Primitive::kPrimFloat:
1576          // Processing a Dex `float-to-int' instruction.
1577          locations->SetInAt(0, Location::RequiresFpuRegister());
1578          locations->SetOut(Location::RequiresRegister());
1579          locations->AddTemp(Location::RequiresFpuRegister());
1580          break;
1581
1582        case Primitive::kPrimDouble:
1583          // Processing a Dex `double-to-int' instruction.
1584          locations->SetInAt(0, Location::RequiresFpuRegister());
1585          locations->SetOut(Location::RequiresRegister());
1586          locations->AddTemp(Location::RequiresFpuRegister());
1587          break;
1588
1589        default:
1590          LOG(FATAL) << "Unexpected type conversion from " << input_type
1591                     << " to " << result_type;
1592      }
1593      break;
1594
1595    case Primitive::kPrimLong:
1596      switch (input_type) {
1597        case Primitive::kPrimBoolean:
1598          // Boolean input is a result of code transformations.
1599        case Primitive::kPrimByte:
1600        case Primitive::kPrimShort:
1601        case Primitive::kPrimInt:
1602        case Primitive::kPrimChar:
1603          // Processing a Dex `int-to-long' instruction.
1604          locations->SetInAt(0, Location::RequiresRegister());
1605          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1606          break;
1607
1608        case Primitive::kPrimFloat: {
1609          // Processing a Dex `float-to-long' instruction.
1610          InvokeRuntimeCallingConvention calling_convention;
1611          locations->SetInAt(0, Location::FpuRegisterLocation(
1612              calling_convention.GetFpuRegisterAt(0)));
1613          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1614          break;
1615        }
1616
1617        case Primitive::kPrimDouble: {
1618          // Processing a Dex `double-to-long' instruction.
1619          InvokeRuntimeCallingConvention calling_convention;
1620          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1621              calling_convention.GetFpuRegisterAt(0),
1622              calling_convention.GetFpuRegisterAt(1)));
1623          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1624          break;
1625        }
1626
1627        default:
1628          LOG(FATAL) << "Unexpected type conversion from " << input_type
1629                     << " to " << result_type;
1630      }
1631      break;
1632
1633    case Primitive::kPrimChar:
1634      switch (input_type) {
1635        case Primitive::kPrimBoolean:
1636          // Boolean input is a result of code transformations.
1637        case Primitive::kPrimByte:
1638        case Primitive::kPrimShort:
1639        case Primitive::kPrimInt:
1640          // Processing a Dex `int-to-char' instruction.
1641          locations->SetInAt(0, Location::RequiresRegister());
1642          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1643          break;
1644
1645        default:
1646          LOG(FATAL) << "Unexpected type conversion from " << input_type
1647                     << " to " << result_type;
1648      }
1649      break;
1650
1651    case Primitive::kPrimFloat:
1652      switch (input_type) {
1653        case Primitive::kPrimBoolean:
1654          // Boolean input is a result of code transformations.
1655        case Primitive::kPrimByte:
1656        case Primitive::kPrimShort:
1657        case Primitive::kPrimInt:
1658        case Primitive::kPrimChar:
1659          // Processing a Dex `int-to-float' instruction.
1660          locations->SetInAt(0, Location::RequiresRegister());
1661          locations->SetOut(Location::RequiresFpuRegister());
1662          break;
1663
1664        case Primitive::kPrimLong: {
1665          // Processing a Dex `long-to-float' instruction.
1666          InvokeRuntimeCallingConvention calling_convention;
1667          locations->SetInAt(0, Location::RegisterPairLocation(
1668              calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
1669          locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1670          break;
1671        }
1672
1673        case Primitive::kPrimDouble:
1674          // Processing a Dex `double-to-float' instruction.
1675          locations->SetInAt(0, Location::RequiresFpuRegister());
1676          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1677          break;
1678
1679        default:
1680          LOG(FATAL) << "Unexpected type conversion from " << input_type
1681                     << " to " << result_type;
1682      };
1683      break;
1684
1685    case Primitive::kPrimDouble:
1686      switch (input_type) {
1687        case Primitive::kPrimBoolean:
1688          // Boolean input is a result of code transformations.
1689        case Primitive::kPrimByte:
1690        case Primitive::kPrimShort:
1691        case Primitive::kPrimInt:
1692        case Primitive::kPrimChar:
1693          // Processing a Dex `int-to-double' instruction.
1694          locations->SetInAt(0, Location::RequiresRegister());
1695          locations->SetOut(Location::RequiresFpuRegister());
1696          break;
1697
1698        case Primitive::kPrimLong:
1699          // Processing a Dex `long-to-double' instruction.
1700          locations->SetInAt(0, Location::RequiresRegister());
1701          locations->SetOut(Location::RequiresFpuRegister());
1702          locations->AddTemp(Location::RequiresFpuRegister());
1703          locations->AddTemp(Location::RequiresFpuRegister());
1704          break;
1705
1706        case Primitive::kPrimFloat:
1707          // Processing a Dex `float-to-double' instruction.
1708          locations->SetInAt(0, Location::RequiresFpuRegister());
1709          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1710          break;
1711
1712        default:
1713          LOG(FATAL) << "Unexpected type conversion from " << input_type
1714                     << " to " << result_type;
1715      };
1716      break;
1717
1718    default:
1719      LOG(FATAL) << "Unexpected type conversion from " << input_type
1720                 << " to " << result_type;
1721  }
1722}
1723
1724void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1725  LocationSummary* locations = conversion->GetLocations();
1726  Location out = locations->Out();
1727  Location in = locations->InAt(0);
1728  Primitive::Type result_type = conversion->GetResultType();
1729  Primitive::Type input_type = conversion->GetInputType();
1730  DCHECK_NE(result_type, input_type);
1731  switch (result_type) {
1732    case Primitive::kPrimByte:
1733      switch (input_type) {
1734        case Primitive::kPrimBoolean:
1735          // Boolean input is a result of code transformations.
1736        case Primitive::kPrimShort:
1737        case Primitive::kPrimInt:
1738        case Primitive::kPrimChar:
1739          // Processing a Dex `int-to-byte' instruction.
1740          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
1741          break;
1742
1743        default:
1744          LOG(FATAL) << "Unexpected type conversion from " << input_type
1745                     << " to " << result_type;
1746      }
1747      break;
1748
1749    case Primitive::kPrimShort:
1750      switch (input_type) {
1751        case Primitive::kPrimBoolean:
1752          // Boolean input is a result of code transformations.
1753        case Primitive::kPrimByte:
1754        case Primitive::kPrimInt:
1755        case Primitive::kPrimChar:
1756          // Processing a Dex `int-to-short' instruction.
1757          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1758          break;
1759
1760        default:
1761          LOG(FATAL) << "Unexpected type conversion from " << input_type
1762                     << " to " << result_type;
1763      }
1764      break;
1765
1766    case Primitive::kPrimInt:
1767      switch (input_type) {
1768        case Primitive::kPrimLong:
1769          // Processing a Dex `long-to-int' instruction.
1770          DCHECK(out.IsRegister());
1771          if (in.IsRegisterPair()) {
1772            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
1773          } else if (in.IsDoubleStackSlot()) {
1774            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
1775          } else {
1776            DCHECK(in.IsConstant());
1777            DCHECK(in.GetConstant()->IsLongConstant());
1778            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
1779            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
1780          }
1781          break;
1782
1783        case Primitive::kPrimFloat: {
1784          // Processing a Dex `float-to-int' instruction.
1785          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1786          __ vmovs(temp, in.AsFpuRegister<SRegister>());
1787          __ vcvtis(temp, temp);
1788          __ vmovrs(out.AsRegister<Register>(), temp);
1789          break;
1790        }
1791
1792        case Primitive::kPrimDouble: {
1793          // Processing a Dex `double-to-int' instruction.
1794          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1795          DRegister temp_d = FromLowSToD(temp_s);
1796          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1797          __ vcvtid(temp_s, temp_d);
1798          __ vmovrs(out.AsRegister<Register>(), temp_s);
1799          break;
1800        }
1801
1802        default:
1803          LOG(FATAL) << "Unexpected type conversion from " << input_type
1804                     << " to " << result_type;
1805      }
1806      break;
1807
1808    case Primitive::kPrimLong:
1809      switch (input_type) {
1810        case Primitive::kPrimBoolean:
1811          // Boolean input is a result of code transformations.
1812        case Primitive::kPrimByte:
1813        case Primitive::kPrimShort:
1814        case Primitive::kPrimInt:
1815        case Primitive::kPrimChar:
1816          // Processing a Dex `int-to-long' instruction.
1817          DCHECK(out.IsRegisterPair());
1818          DCHECK(in.IsRegister());
1819          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
1820          // Sign extension.
1821          __ Asr(out.AsRegisterPairHigh<Register>(),
1822                 out.AsRegisterPairLow<Register>(),
1823                 31);
1824          break;
1825
1826        case Primitive::kPrimFloat:
1827          // Processing a Dex `float-to-long' instruction.
1828          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
1829                                  conversion,
1830                                  conversion->GetDexPc(),
1831                                  nullptr);
1832          break;
1833
1834        case Primitive::kPrimDouble:
1835          // Processing a Dex `double-to-long' instruction.
1836          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
1837                                  conversion,
1838                                  conversion->GetDexPc(),
1839                                  nullptr);
1840          break;
1841
1842        default:
1843          LOG(FATAL) << "Unexpected type conversion from " << input_type
1844                     << " to " << result_type;
1845      }
1846      break;
1847
1848    case Primitive::kPrimChar:
1849      switch (input_type) {
1850        case Primitive::kPrimBoolean:
1851          // Boolean input is a result of code transformations.
1852        case Primitive::kPrimByte:
1853        case Primitive::kPrimShort:
1854        case Primitive::kPrimInt:
1855          // Processing a Dex `int-to-char' instruction.
1856          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1857          break;
1858
1859        default:
1860          LOG(FATAL) << "Unexpected type conversion from " << input_type
1861                     << " to " << result_type;
1862      }
1863      break;
1864
1865    case Primitive::kPrimFloat:
1866      switch (input_type) {
1867        case Primitive::kPrimBoolean:
1868          // Boolean input is a result of code transformations.
1869        case Primitive::kPrimByte:
1870        case Primitive::kPrimShort:
1871        case Primitive::kPrimInt:
1872        case Primitive::kPrimChar: {
1873          // Processing a Dex `int-to-float' instruction.
1874          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
1875          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
1876          break;
1877        }
1878
1879        case Primitive::kPrimLong:
1880          // Processing a Dex `long-to-float' instruction.
1881          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
1882                                  conversion,
1883                                  conversion->GetDexPc(),
1884                                  nullptr);
1885          break;
1886
1887        case Primitive::kPrimDouble:
1888          // Processing a Dex `double-to-float' instruction.
1889          __ vcvtsd(out.AsFpuRegister<SRegister>(),
1890                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1891          break;
1892
1893        default:
1894          LOG(FATAL) << "Unexpected type conversion from " << input_type
1895                     << " to " << result_type;
1896      };
1897      break;
1898
1899    case Primitive::kPrimDouble:
1900      switch (input_type) {
1901        case Primitive::kPrimBoolean:
1902          // Boolean input is a result of code transformations.
1903        case Primitive::kPrimByte:
1904        case Primitive::kPrimShort:
1905        case Primitive::kPrimInt:
1906        case Primitive::kPrimChar: {
1907          // Processing a Dex `int-to-double' instruction.
1908          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
1909          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1910                    out.AsFpuRegisterPairLow<SRegister>());
1911          break;
1912        }
1913
1914        case Primitive::kPrimLong: {
1915          // Processing a Dex `long-to-double' instruction.
1916          Register low = in.AsRegisterPairLow<Register>();
1917          Register high = in.AsRegisterPairHigh<Register>();
1918          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
1919          DRegister out_d = FromLowSToD(out_s);
1920          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
1921          DRegister temp_d = FromLowSToD(temp_s);
1922          SRegister constant_s = locations->GetTemp(1).AsFpuRegisterPairLow<SRegister>();
1923          DRegister constant_d = FromLowSToD(constant_s);
1924
1925          // temp_d = int-to-double(high)
1926          __ vmovsr(temp_s, high);
1927          __ vcvtdi(temp_d, temp_s);
1928          // constant_d = k2Pow32EncodingForDouble
1929          __ LoadDImmediate(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
1930          // out_d = unsigned-to-double(low)
1931          __ vmovsr(out_s, low);
1932          __ vcvtdu(out_d, out_s);
1933          // out_d += temp_d * constant_d
1934          __ vmlad(out_d, temp_d, constant_d);
1935          break;
1936        }
1937
1938        case Primitive::kPrimFloat:
1939          // Processing a Dex `float-to-double' instruction.
1940          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1941                    in.AsFpuRegister<SRegister>());
1942          break;
1943
1944        default:
1945          LOG(FATAL) << "Unexpected type conversion from " << input_type
1946                     << " to " << result_type;
1947      };
1948      break;
1949
1950    default:
1951      LOG(FATAL) << "Unexpected type conversion from " << input_type
1952                 << " to " << result_type;
1953  }
1954}
1955
1956void LocationsBuilderARM::VisitAdd(HAdd* add) {
1957  LocationSummary* locations =
1958      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
1959  switch (add->GetResultType()) {
1960    case Primitive::kPrimInt: {
1961      locations->SetInAt(0, Location::RequiresRegister());
1962      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
1963      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1964      break;
1965    }
1966
1967    case Primitive::kPrimLong: {
1968      locations->SetInAt(0, Location::RequiresRegister());
1969      locations->SetInAt(1, Location::RequiresRegister());
1970      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1971      break;
1972    }
1973
1974    case Primitive::kPrimFloat:
1975    case Primitive::kPrimDouble: {
1976      locations->SetInAt(0, Location::RequiresFpuRegister());
1977      locations->SetInAt(1, Location::RequiresFpuRegister());
1978      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1979      break;
1980    }
1981
1982    default:
1983      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
1984  }
1985}
1986
1987void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
1988  LocationSummary* locations = add->GetLocations();
1989  Location out = locations->Out();
1990  Location first = locations->InAt(0);
1991  Location second = locations->InAt(1);
1992  switch (add->GetResultType()) {
1993    case Primitive::kPrimInt:
1994      if (second.IsRegister()) {
1995        __ add(out.AsRegister<Register>(),
1996               first.AsRegister<Register>(),
1997               ShifterOperand(second.AsRegister<Register>()));
1998      } else {
1999        __ AddConstant(out.AsRegister<Register>(),
2000                       first.AsRegister<Register>(),
2001                       second.GetConstant()->AsIntConstant()->GetValue());
2002      }
2003      break;
2004
2005    case Primitive::kPrimLong: {
2006      DCHECK(second.IsRegisterPair());
2007      __ adds(out.AsRegisterPairLow<Register>(),
2008              first.AsRegisterPairLow<Register>(),
2009              ShifterOperand(second.AsRegisterPairLow<Register>()));
2010      __ adc(out.AsRegisterPairHigh<Register>(),
2011             first.AsRegisterPairHigh<Register>(),
2012             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2013      break;
2014    }
2015
2016    case Primitive::kPrimFloat:
2017      __ vadds(out.AsFpuRegister<SRegister>(),
2018               first.AsFpuRegister<SRegister>(),
2019               second.AsFpuRegister<SRegister>());
2020      break;
2021
2022    case Primitive::kPrimDouble:
2023      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2024               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2025               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2026      break;
2027
2028    default:
2029      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2030  }
2031}
2032
2033void LocationsBuilderARM::VisitSub(HSub* sub) {
2034  LocationSummary* locations =
2035      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2036  switch (sub->GetResultType()) {
2037    case Primitive::kPrimInt: {
2038      locations->SetInAt(0, Location::RequiresRegister());
2039      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2040      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2041      break;
2042    }
2043
2044    case Primitive::kPrimLong: {
2045      locations->SetInAt(0, Location::RequiresRegister());
2046      locations->SetInAt(1, Location::RequiresRegister());
2047      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2048      break;
2049    }
2050    case Primitive::kPrimFloat:
2051    case Primitive::kPrimDouble: {
2052      locations->SetInAt(0, Location::RequiresFpuRegister());
2053      locations->SetInAt(1, Location::RequiresFpuRegister());
2054      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2055      break;
2056    }
2057    default:
2058      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2059  }
2060}
2061
2062void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2063  LocationSummary* locations = sub->GetLocations();
2064  Location out = locations->Out();
2065  Location first = locations->InAt(0);
2066  Location second = locations->InAt(1);
2067  switch (sub->GetResultType()) {
2068    case Primitive::kPrimInt: {
2069      if (second.IsRegister()) {
2070        __ sub(out.AsRegister<Register>(),
2071               first.AsRegister<Register>(),
2072               ShifterOperand(second.AsRegister<Register>()));
2073      } else {
2074        __ AddConstant(out.AsRegister<Register>(),
2075                       first.AsRegister<Register>(),
2076                       -second.GetConstant()->AsIntConstant()->GetValue());
2077      }
2078      break;
2079    }
2080
2081    case Primitive::kPrimLong: {
2082      DCHECK(second.IsRegisterPair());
2083      __ subs(out.AsRegisterPairLow<Register>(),
2084              first.AsRegisterPairLow<Register>(),
2085              ShifterOperand(second.AsRegisterPairLow<Register>()));
2086      __ sbc(out.AsRegisterPairHigh<Register>(),
2087             first.AsRegisterPairHigh<Register>(),
2088             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2089      break;
2090    }
2091
2092    case Primitive::kPrimFloat: {
2093      __ vsubs(out.AsFpuRegister<SRegister>(),
2094               first.AsFpuRegister<SRegister>(),
2095               second.AsFpuRegister<SRegister>());
2096      break;
2097    }
2098
2099    case Primitive::kPrimDouble: {
2100      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2101               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2102               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2103      break;
2104    }
2105
2106
2107    default:
2108      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2109  }
2110}
2111
2112void LocationsBuilderARM::VisitMul(HMul* mul) {
2113  LocationSummary* locations =
2114      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2115  switch (mul->GetResultType()) {
2116    case Primitive::kPrimInt:
2117    case Primitive::kPrimLong:  {
2118      locations->SetInAt(0, Location::RequiresRegister());
2119      locations->SetInAt(1, Location::RequiresRegister());
2120      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2121      break;
2122    }
2123
2124    case Primitive::kPrimFloat:
2125    case Primitive::kPrimDouble: {
2126      locations->SetInAt(0, Location::RequiresFpuRegister());
2127      locations->SetInAt(1, Location::RequiresFpuRegister());
2128      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2129      break;
2130    }
2131
2132    default:
2133      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2134  }
2135}
2136
2137void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2138  LocationSummary* locations = mul->GetLocations();
2139  Location out = locations->Out();
2140  Location first = locations->InAt(0);
2141  Location second = locations->InAt(1);
2142  switch (mul->GetResultType()) {
2143    case Primitive::kPrimInt: {
2144      __ mul(out.AsRegister<Register>(),
2145             first.AsRegister<Register>(),
2146             second.AsRegister<Register>());
2147      break;
2148    }
2149    case Primitive::kPrimLong: {
2150      Register out_hi = out.AsRegisterPairHigh<Register>();
2151      Register out_lo = out.AsRegisterPairLow<Register>();
2152      Register in1_hi = first.AsRegisterPairHigh<Register>();
2153      Register in1_lo = first.AsRegisterPairLow<Register>();
2154      Register in2_hi = second.AsRegisterPairHigh<Register>();
2155      Register in2_lo = second.AsRegisterPairLow<Register>();
2156
2157      // Extra checks to protect caused by the existence of R1_R2.
2158      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2159      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2160      DCHECK_NE(out_hi, in1_lo);
2161      DCHECK_NE(out_hi, in2_lo);
2162
2163      // input: in1 - 64 bits, in2 - 64 bits
2164      // output: out
2165      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2166      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2167      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2168
2169      // IP <- in1.lo * in2.hi
2170      __ mul(IP, in1_lo, in2_hi);
2171      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2172      __ mla(out_hi, in1_hi, in2_lo, IP);
2173      // out.lo <- (in1.lo * in2.lo)[31:0];
2174      __ umull(out_lo, IP, in1_lo, in2_lo);
2175      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2176      __ add(out_hi, out_hi, ShifterOperand(IP));
2177      break;
2178    }
2179
2180    case Primitive::kPrimFloat: {
2181      __ vmuls(out.AsFpuRegister<SRegister>(),
2182               first.AsFpuRegister<SRegister>(),
2183               second.AsFpuRegister<SRegister>());
2184      break;
2185    }
2186
2187    case Primitive::kPrimDouble: {
2188      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2189               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2190               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2191      break;
2192    }
2193
2194    default:
2195      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2196  }
2197}
2198
2199void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2200  DCHECK(instruction->IsDiv() || instruction->IsRem());
2201  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2202
2203  LocationSummary* locations = instruction->GetLocations();
2204  Location second = locations->InAt(1);
2205  DCHECK(second.IsConstant());
2206
2207  Register out = locations->Out().AsRegister<Register>();
2208  Register dividend = locations->InAt(0).AsRegister<Register>();
2209  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2210  DCHECK(imm == 1 || imm == -1);
2211
2212  if (instruction->IsRem()) {
2213    __ LoadImmediate(out, 0);
2214  } else {
2215    if (imm == 1) {
2216      __ Mov(out, dividend);
2217    } else {
2218      __ rsb(out, dividend, ShifterOperand(0));
2219    }
2220  }
2221}
2222
2223void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2224  DCHECK(instruction->IsDiv() || instruction->IsRem());
2225  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2226
2227  LocationSummary* locations = instruction->GetLocations();
2228  Location second = locations->InAt(1);
2229  DCHECK(second.IsConstant());
2230
2231  Register out = locations->Out().AsRegister<Register>();
2232  Register dividend = locations->InAt(0).AsRegister<Register>();
2233  Register temp = locations->GetTemp(0).AsRegister<Register>();
2234  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2235  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2236  DCHECK(IsPowerOfTwo(abs_imm));
2237  int ctz_imm = CTZ(abs_imm);
2238
2239  if (ctz_imm == 1) {
2240    __ Lsr(temp, dividend, 32 - ctz_imm);
2241  } else {
2242    __ Asr(temp, dividend, 31);
2243    __ Lsr(temp, temp, 32 - ctz_imm);
2244  }
2245  __ add(out, temp, ShifterOperand(dividend));
2246
2247  if (instruction->IsDiv()) {
2248    __ Asr(out, out, ctz_imm);
2249    if (imm < 0) {
2250      __ rsb(out, out, ShifterOperand(0));
2251    }
2252  } else {
2253    __ ubfx(out, out, 0, ctz_imm);
2254    __ sub(out, out, ShifterOperand(temp));
2255  }
2256}
2257
2258void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2259  DCHECK(instruction->IsDiv() || instruction->IsRem());
2260  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2261
2262  LocationSummary* locations = instruction->GetLocations();
2263  Location second = locations->InAt(1);
2264  DCHECK(second.IsConstant());
2265
2266  Register out = locations->Out().AsRegister<Register>();
2267  Register dividend = locations->InAt(0).AsRegister<Register>();
2268  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2269  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2270  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2271
2272  int64_t magic;
2273  int shift;
2274  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2275
2276  __ LoadImmediate(temp1, magic);
2277  __ smull(temp2, temp1, dividend, temp1);
2278
2279  if (imm > 0 && magic < 0) {
2280    __ add(temp1, temp1, ShifterOperand(dividend));
2281  } else if (imm < 0 && magic > 0) {
2282    __ sub(temp1, temp1, ShifterOperand(dividend));
2283  }
2284
2285  if (shift != 0) {
2286    __ Asr(temp1, temp1, shift);
2287  }
2288
2289  if (instruction->IsDiv()) {
2290    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2291  } else {
2292    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2293    // TODO: Strength reduction for mls.
2294    __ LoadImmediate(temp2, imm);
2295    __ mls(out, temp1, temp2, dividend);
2296  }
2297}
2298
2299void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2300  DCHECK(instruction->IsDiv() || instruction->IsRem());
2301  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2302
2303  LocationSummary* locations = instruction->GetLocations();
2304  Location second = locations->InAt(1);
2305  DCHECK(second.IsConstant());
2306
2307  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2308  if (imm == 0) {
2309    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2310  } else if (imm == 1 || imm == -1) {
2311    DivRemOneOrMinusOne(instruction);
2312  } else if (IsPowerOfTwo(std::abs(imm))) {
2313    DivRemByPowerOfTwo(instruction);
2314  } else {
2315    DCHECK(imm <= -2 || imm >= 2);
2316    GenerateDivRemWithAnyConstant(instruction);
2317  }
2318}
2319
2320void LocationsBuilderARM::VisitDiv(HDiv* div) {
2321  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2322  if (div->GetResultType() == Primitive::kPrimLong) {
2323    // pLdiv runtime call.
2324    call_kind = LocationSummary::kCall;
2325  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2326    // sdiv will be replaced by other instruction sequence.
2327  } else if (div->GetResultType() == Primitive::kPrimInt &&
2328             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2329    // pIdivmod runtime call.
2330    call_kind = LocationSummary::kCall;
2331  }
2332
2333  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2334
2335  switch (div->GetResultType()) {
2336    case Primitive::kPrimInt: {
2337      if (div->InputAt(1)->IsConstant()) {
2338        locations->SetInAt(0, Location::RequiresRegister());
2339        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2340        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2341        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2342        if (abs_imm <= 1) {
2343          // No temp register required.
2344        } else {
2345          locations->AddTemp(Location::RequiresRegister());
2346          if (!IsPowerOfTwo(abs_imm)) {
2347            locations->AddTemp(Location::RequiresRegister());
2348          }
2349        }
2350      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2351        locations->SetInAt(0, Location::RequiresRegister());
2352        locations->SetInAt(1, Location::RequiresRegister());
2353        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2354      } else {
2355        InvokeRuntimeCallingConvention calling_convention;
2356        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2357        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2358        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2359        //       we only need the former.
2360        locations->SetOut(Location::RegisterLocation(R0));
2361      }
2362      break;
2363    }
2364    case Primitive::kPrimLong: {
2365      InvokeRuntimeCallingConvention calling_convention;
2366      locations->SetInAt(0, Location::RegisterPairLocation(
2367          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2368      locations->SetInAt(1, Location::RegisterPairLocation(
2369          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2370      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2371      break;
2372    }
2373    case Primitive::kPrimFloat:
2374    case Primitive::kPrimDouble: {
2375      locations->SetInAt(0, Location::RequiresFpuRegister());
2376      locations->SetInAt(1, Location::RequiresFpuRegister());
2377      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2378      break;
2379    }
2380
2381    default:
2382      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2383  }
2384}
2385
2386void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2387  LocationSummary* locations = div->GetLocations();
2388  Location out = locations->Out();
2389  Location first = locations->InAt(0);
2390  Location second = locations->InAt(1);
2391
2392  switch (div->GetResultType()) {
2393    case Primitive::kPrimInt: {
2394      if (second.IsConstant()) {
2395        GenerateDivRemConstantIntegral(div);
2396      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2397        __ sdiv(out.AsRegister<Register>(),
2398                first.AsRegister<Register>(),
2399                second.AsRegister<Register>());
2400      } else {
2401        InvokeRuntimeCallingConvention calling_convention;
2402        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2403        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2404        DCHECK_EQ(R0, out.AsRegister<Register>());
2405
2406        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2407      }
2408      break;
2409    }
2410
2411    case Primitive::kPrimLong: {
2412      InvokeRuntimeCallingConvention calling_convention;
2413      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2414      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2415      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2416      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2417      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2418      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2419
2420      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2421      break;
2422    }
2423
2424    case Primitive::kPrimFloat: {
2425      __ vdivs(out.AsFpuRegister<SRegister>(),
2426               first.AsFpuRegister<SRegister>(),
2427               second.AsFpuRegister<SRegister>());
2428      break;
2429    }
2430
2431    case Primitive::kPrimDouble: {
2432      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2433               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2434               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2435      break;
2436    }
2437
2438    default:
2439      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2440  }
2441}
2442
2443void LocationsBuilderARM::VisitRem(HRem* rem) {
2444  Primitive::Type type = rem->GetResultType();
2445
2446  // Most remainders are implemented in the runtime.
2447  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2448  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2449    // sdiv will be replaced by other instruction sequence.
2450    call_kind = LocationSummary::kNoCall;
2451  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2452             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2453    // Have hardware divide instruction for int, do it with three instructions.
2454    call_kind = LocationSummary::kNoCall;
2455  }
2456
2457  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2458
2459  switch (type) {
2460    case Primitive::kPrimInt: {
2461      if (rem->InputAt(1)->IsConstant()) {
2462        locations->SetInAt(0, Location::RequiresRegister());
2463        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2464        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2465        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2466        if (abs_imm <= 1) {
2467          // No temp register required.
2468        } else {
2469          locations->AddTemp(Location::RequiresRegister());
2470          if (!IsPowerOfTwo(abs_imm)) {
2471            locations->AddTemp(Location::RequiresRegister());
2472          }
2473        }
2474      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2475        locations->SetInAt(0, Location::RequiresRegister());
2476        locations->SetInAt(1, Location::RequiresRegister());
2477        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2478        locations->AddTemp(Location::RequiresRegister());
2479      } else {
2480        InvokeRuntimeCallingConvention calling_convention;
2481        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2482        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2483        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2484        //       we only need the latter.
2485        locations->SetOut(Location::RegisterLocation(R1));
2486      }
2487      break;
2488    }
2489    case Primitive::kPrimLong: {
2490      InvokeRuntimeCallingConvention calling_convention;
2491      locations->SetInAt(0, Location::RegisterPairLocation(
2492          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2493      locations->SetInAt(1, Location::RegisterPairLocation(
2494          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2495      // The runtime helper puts the output in R2,R3.
2496      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2497      break;
2498    }
2499    case Primitive::kPrimFloat: {
2500      InvokeRuntimeCallingConvention calling_convention;
2501      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2502      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2503      locations->SetOut(Location::FpuRegisterLocation(S0));
2504      break;
2505    }
2506
2507    case Primitive::kPrimDouble: {
2508      InvokeRuntimeCallingConvention calling_convention;
2509      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2510          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2511      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2512          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2513      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2514      break;
2515    }
2516
2517    default:
2518      LOG(FATAL) << "Unexpected rem type " << type;
2519  }
2520}
2521
2522void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2523  LocationSummary* locations = rem->GetLocations();
2524  Location out = locations->Out();
2525  Location first = locations->InAt(0);
2526  Location second = locations->InAt(1);
2527
2528  Primitive::Type type = rem->GetResultType();
2529  switch (type) {
2530    case Primitive::kPrimInt: {
2531        if (second.IsConstant()) {
2532          GenerateDivRemConstantIntegral(rem);
2533        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2534        Register reg1 = first.AsRegister<Register>();
2535        Register reg2 = second.AsRegister<Register>();
2536        Register temp = locations->GetTemp(0).AsRegister<Register>();
2537
2538        // temp = reg1 / reg2  (integer division)
2539        // temp = temp * reg2
2540        // dest = reg1 - temp
2541        __ sdiv(temp, reg1, reg2);
2542        __ mul(temp, temp, reg2);
2543        __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
2544      } else {
2545        InvokeRuntimeCallingConvention calling_convention;
2546        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2547        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2548        DCHECK_EQ(R1, out.AsRegister<Register>());
2549
2550        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2551      }
2552      break;
2553    }
2554
2555    case Primitive::kPrimLong: {
2556      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2557      break;
2558    }
2559
2560    case Primitive::kPrimFloat: {
2561      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2562      break;
2563    }
2564
2565    case Primitive::kPrimDouble: {
2566      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2567      break;
2568    }
2569
2570    default:
2571      LOG(FATAL) << "Unexpected rem type " << type;
2572  }
2573}
2574
2575void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2576  LocationSummary* locations =
2577      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2578  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2579  if (instruction->HasUses()) {
2580    locations->SetOut(Location::SameAsFirstInput());
2581  }
2582}
2583
2584void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2585  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2586  codegen_->AddSlowPath(slow_path);
2587
2588  LocationSummary* locations = instruction->GetLocations();
2589  Location value = locations->InAt(0);
2590
2591  switch (instruction->GetType()) {
2592    case Primitive::kPrimInt: {
2593      if (value.IsRegister()) {
2594        __ cmp(value.AsRegister<Register>(), ShifterOperand(0));
2595        __ b(slow_path->GetEntryLabel(), EQ);
2596      } else {
2597        DCHECK(value.IsConstant()) << value;
2598        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2599          __ b(slow_path->GetEntryLabel());
2600        }
2601      }
2602      break;
2603    }
2604    case Primitive::kPrimLong: {
2605      if (value.IsRegisterPair()) {
2606        __ orrs(IP,
2607                value.AsRegisterPairLow<Register>(),
2608                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2609        __ b(slow_path->GetEntryLabel(), EQ);
2610      } else {
2611        DCHECK(value.IsConstant()) << value;
2612        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2613          __ b(slow_path->GetEntryLabel());
2614        }
2615      }
2616      break;
2617    default:
2618      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2619    }
2620  }
2621}
2622
2623void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2624  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2625
2626  LocationSummary* locations =
2627      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2628
2629  switch (op->GetResultType()) {
2630    case Primitive::kPrimInt: {
2631      locations->SetInAt(0, Location::RequiresRegister());
2632      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2633      // Make the output overlap, as it will be used to hold the masked
2634      // second input.
2635      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2636      break;
2637    }
2638    case Primitive::kPrimLong: {
2639      locations->SetInAt(0, Location::RequiresRegister());
2640      locations->SetInAt(1, Location::RequiresRegister());
2641      locations->AddTemp(Location::RequiresRegister());
2642      locations->SetOut(Location::RequiresRegister());
2643      break;
2644    }
2645    default:
2646      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2647  }
2648}
2649
2650void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2651  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2652
2653  LocationSummary* locations = op->GetLocations();
2654  Location out = locations->Out();
2655  Location first = locations->InAt(0);
2656  Location second = locations->InAt(1);
2657
2658  Primitive::Type type = op->GetResultType();
2659  switch (type) {
2660    case Primitive::kPrimInt: {
2661      Register out_reg = out.AsRegister<Register>();
2662      Register first_reg = first.AsRegister<Register>();
2663      // Arm doesn't mask the shift count so we need to do it ourselves.
2664      if (second.IsRegister()) {
2665        Register second_reg = second.AsRegister<Register>();
2666        __ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2667        if (op->IsShl()) {
2668          __ Lsl(out_reg, first_reg, out_reg);
2669        } else if (op->IsShr()) {
2670          __ Asr(out_reg, first_reg, out_reg);
2671        } else {
2672          __ Lsr(out_reg, first_reg, out_reg);
2673        }
2674      } else {
2675        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2676        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2677        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
2678          __ Mov(out_reg, first_reg);
2679        } else if (op->IsShl()) {
2680          __ Lsl(out_reg, first_reg, shift_value);
2681        } else if (op->IsShr()) {
2682          __ Asr(out_reg, first_reg, shift_value);
2683        } else {
2684          __ Lsr(out_reg, first_reg, shift_value);
2685        }
2686      }
2687      break;
2688    }
2689    case Primitive::kPrimLong: {
2690      Register o_h = out.AsRegisterPairHigh<Register>();
2691      Register o_l = out.AsRegisterPairLow<Register>();
2692
2693      Register temp = locations->GetTemp(0).AsRegister<Register>();
2694
2695      Register high = first.AsRegisterPairHigh<Register>();
2696      Register low = first.AsRegisterPairLow<Register>();
2697
2698      Register second_reg = second.AsRegister<Register>();
2699
2700      if (op->IsShl()) {
2701        __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
2702        // Shift the high part
2703        __ Lsl(o_h, high, o_l);
2704        // Shift the low part and `or` what overflew on the high part
2705        __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
2706        __ Lsr(temp, low, temp);
2707        __ orr(o_h, o_h, ShifterOperand(temp));
2708        // If the shift is > 32 bits, override the high part
2709        __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
2710        __ it(PL);
2711        __ Lsl(o_h, low, temp, false, PL);
2712        // Shift the low part
2713        __ Lsl(o_l, low, o_l);
2714      } else if (op->IsShr()) {
2715        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2716        // Shift the low part
2717        __ Lsr(o_l, low, o_h);
2718        // Shift the high part and `or` what underflew on the low part
2719        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2720        __ Lsl(temp, high, temp);
2721        __ orr(o_l, o_l, ShifterOperand(temp));
2722        // If the shift is > 32 bits, override the low part
2723        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
2724        __ it(PL);
2725        __ Asr(o_l, high, temp, false, PL);
2726        // Shift the high part
2727        __ Asr(o_h, high, o_h);
2728      } else {
2729        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2730        // same as Shr except we use `Lsr`s and not `Asr`s
2731        __ Lsr(o_l, low, o_h);
2732        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2733        __ Lsl(temp, high, temp);
2734        __ orr(o_l, o_l, ShifterOperand(temp));
2735        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
2736        __ it(PL);
2737        __ Lsr(o_l, high, temp, false, PL);
2738        __ Lsr(o_h, high, o_h);
2739      }
2740      break;
2741    }
2742    default:
2743      LOG(FATAL) << "Unexpected operation type " << type;
2744  }
2745}
2746
2747void LocationsBuilderARM::VisitShl(HShl* shl) {
2748  HandleShift(shl);
2749}
2750
2751void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
2752  HandleShift(shl);
2753}
2754
2755void LocationsBuilderARM::VisitShr(HShr* shr) {
2756  HandleShift(shr);
2757}
2758
2759void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
2760  HandleShift(shr);
2761}
2762
2763void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
2764  HandleShift(ushr);
2765}
2766
2767void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
2768  HandleShift(ushr);
2769}
2770
2771void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
2772  LocationSummary* locations =
2773      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2774  InvokeRuntimeCallingConvention calling_convention;
2775  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2776  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2777  locations->SetOut(Location::RegisterLocation(R0));
2778}
2779
2780void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
2781  InvokeRuntimeCallingConvention calling_convention;
2782  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2783  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2784                          instruction,
2785                          instruction->GetDexPc(),
2786                          nullptr);
2787}
2788
2789void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
2790  LocationSummary* locations =
2791      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2792  InvokeRuntimeCallingConvention calling_convention;
2793  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2794  locations->SetOut(Location::RegisterLocation(R0));
2795  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2796  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2797}
2798
2799void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
2800  InvokeRuntimeCallingConvention calling_convention;
2801  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
2802  codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
2803                          instruction,
2804                          instruction->GetDexPc(),
2805                          nullptr);
2806}
2807
2808void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
2809  LocationSummary* locations =
2810      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2811  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2812  if (location.IsStackSlot()) {
2813    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2814  } else if (location.IsDoubleStackSlot()) {
2815    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2816  }
2817  locations->SetOut(location);
2818}
2819
2820void InstructionCodeGeneratorARM::VisitParameterValue(
2821    HParameterValue* instruction ATTRIBUTE_UNUSED) {
2822  // Nothing to do, the parameter is already at its location.
2823}
2824
2825void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
2826  LocationSummary* locations =
2827      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2828  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2829}
2830
2831void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
2832  // Nothing to do, the method is already at its location.
2833}
2834
2835void LocationsBuilderARM::VisitNot(HNot* not_) {
2836  LocationSummary* locations =
2837      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
2838  locations->SetInAt(0, Location::RequiresRegister());
2839  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2840}
2841
2842void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
2843  LocationSummary* locations = not_->GetLocations();
2844  Location out = locations->Out();
2845  Location in = locations->InAt(0);
2846  switch (not_->GetResultType()) {
2847    case Primitive::kPrimInt:
2848      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
2849      break;
2850
2851    case Primitive::kPrimLong:
2852      __ mvn(out.AsRegisterPairLow<Register>(),
2853             ShifterOperand(in.AsRegisterPairLow<Register>()));
2854      __ mvn(out.AsRegisterPairHigh<Register>(),
2855             ShifterOperand(in.AsRegisterPairHigh<Register>()));
2856      break;
2857
2858    default:
2859      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
2860  }
2861}
2862
2863void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
2864  LocationSummary* locations =
2865      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
2866  locations->SetInAt(0, Location::RequiresRegister());
2867  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2868}
2869
2870void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
2871  LocationSummary* locations = bool_not->GetLocations();
2872  Location out = locations->Out();
2873  Location in = locations->InAt(0);
2874  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
2875}
2876
2877void LocationsBuilderARM::VisitCompare(HCompare* compare) {
2878  LocationSummary* locations =
2879      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
2880  switch (compare->InputAt(0)->GetType()) {
2881    case Primitive::kPrimLong: {
2882      locations->SetInAt(0, Location::RequiresRegister());
2883      locations->SetInAt(1, Location::RequiresRegister());
2884      // Output overlaps because it is written before doing the low comparison.
2885      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2886      break;
2887    }
2888    case Primitive::kPrimFloat:
2889    case Primitive::kPrimDouble: {
2890      locations->SetInAt(0, Location::RequiresFpuRegister());
2891      locations->SetInAt(1, Location::RequiresFpuRegister());
2892      locations->SetOut(Location::RequiresRegister());
2893      break;
2894    }
2895    default:
2896      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
2897  }
2898}
2899
2900void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
2901  LocationSummary* locations = compare->GetLocations();
2902  Register out = locations->Out().AsRegister<Register>();
2903  Location left = locations->InAt(0);
2904  Location right = locations->InAt(1);
2905
2906  Label less, greater, done;
2907  Primitive::Type type = compare->InputAt(0)->GetType();
2908  switch (type) {
2909    case Primitive::kPrimLong: {
2910      __ cmp(left.AsRegisterPairHigh<Register>(),
2911             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
2912      __ b(&less, LT);
2913      __ b(&greater, GT);
2914      // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags.
2915      __ LoadImmediate(out, 0);
2916      __ cmp(left.AsRegisterPairLow<Register>(),
2917             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
2918      break;
2919    }
2920    case Primitive::kPrimFloat:
2921    case Primitive::kPrimDouble: {
2922      __ LoadImmediate(out, 0);
2923      if (type == Primitive::kPrimFloat) {
2924        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
2925      } else {
2926        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
2927                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
2928      }
2929      __ vmstat();  // transfer FP status register to ARM APSR.
2930      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
2931      break;
2932    }
2933    default:
2934      LOG(FATAL) << "Unexpected compare type " << type;
2935  }
2936  __ b(&done, EQ);
2937  __ b(&less, CC);  // CC is for both: unsigned compare for longs and 'less than' for floats.
2938
2939  __ Bind(&greater);
2940  __ LoadImmediate(out, 1);
2941  __ b(&done);
2942
2943  __ Bind(&less);
2944  __ LoadImmediate(out, -1);
2945
2946  __ Bind(&done);
2947}
2948
2949void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
2950  LocationSummary* locations =
2951      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2952  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2953    locations->SetInAt(i, Location::Any());
2954  }
2955  locations->SetOut(Location::Any());
2956}
2957
2958void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
2959  UNUSED(instruction);
2960  LOG(FATAL) << "Unreachable";
2961}
2962
2963void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
2964  // TODO (ported from quick): revisit Arm barrier kinds
2965  DmbOptions flavor = DmbOptions::ISH;  // quiet c++ warnings
2966  switch (kind) {
2967    case MemBarrierKind::kAnyStore:
2968    case MemBarrierKind::kLoadAny:
2969    case MemBarrierKind::kAnyAny: {
2970      flavor = DmbOptions::ISH;
2971      break;
2972    }
2973    case MemBarrierKind::kStoreStore: {
2974      flavor = DmbOptions::ISHST;
2975      break;
2976    }
2977    default:
2978      LOG(FATAL) << "Unexpected memory barrier " << kind;
2979  }
2980  __ dmb(flavor);
2981}
2982
2983void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
2984                                                         uint32_t offset,
2985                                                         Register out_lo,
2986                                                         Register out_hi) {
2987  if (offset != 0) {
2988    __ LoadImmediate(out_lo, offset);
2989    __ add(IP, addr, ShifterOperand(out_lo));
2990    addr = IP;
2991  }
2992  __ ldrexd(out_lo, out_hi, addr);
2993}
2994
2995void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
2996                                                          uint32_t offset,
2997                                                          Register value_lo,
2998                                                          Register value_hi,
2999                                                          Register temp1,
3000                                                          Register temp2,
3001                                                          HInstruction* instruction) {
3002  Label fail;
3003  if (offset != 0) {
3004    __ LoadImmediate(temp1, offset);
3005    __ add(IP, addr, ShifterOperand(temp1));
3006    addr = IP;
3007  }
3008  __ Bind(&fail);
3009  // We need a load followed by store. (The address used in a STREX instruction must
3010  // be the same as the address in the most recently executed LDREX instruction.)
3011  __ ldrexd(temp1, temp2, addr);
3012  codegen_->MaybeRecordImplicitNullCheck(instruction);
3013  __ strexd(temp1, value_lo, value_hi, addr);
3014  __ cmp(temp1, ShifterOperand(0));
3015  __ b(&fail, NE);
3016}
3017
3018void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
3019  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3020
3021  LocationSummary* locations =
3022      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3023  locations->SetInAt(0, Location::RequiresRegister());
3024
3025  Primitive::Type field_type = field_info.GetFieldType();
3026  if (Primitive::IsFloatingPointType(field_type)) {
3027    locations->SetInAt(1, Location::RequiresFpuRegister());
3028  } else {
3029    locations->SetInAt(1, Location::RequiresRegister());
3030  }
3031
3032  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3033  bool generate_volatile = field_info.IsVolatile()
3034      && is_wide
3035      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3036  // Temporary registers for the write barrier.
3037  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3038  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3039    locations->AddTemp(Location::RequiresRegister());
3040    locations->AddTemp(Location::RequiresRegister());
3041  } else if (generate_volatile) {
3042    // Arm encoding have some additional constraints for ldrexd/strexd:
3043    // - registers need to be consecutive
3044    // - the first register should be even but not R14.
3045    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3046    // enable Arm encoding.
3047    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3048
3049    locations->AddTemp(Location::RequiresRegister());
3050    locations->AddTemp(Location::RequiresRegister());
3051    if (field_type == Primitive::kPrimDouble) {
3052      // For doubles we need two more registers to copy the value.
3053      locations->AddTemp(Location::RegisterLocation(R2));
3054      locations->AddTemp(Location::RegisterLocation(R3));
3055    }
3056  }
3057}
3058
3059void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3060                                                 const FieldInfo& field_info,
3061                                                 bool value_can_be_null) {
3062  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3063
3064  LocationSummary* locations = instruction->GetLocations();
3065  Register base = locations->InAt(0).AsRegister<Register>();
3066  Location value = locations->InAt(1);
3067
3068  bool is_volatile = field_info.IsVolatile();
3069  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3070  Primitive::Type field_type = field_info.GetFieldType();
3071  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3072
3073  if (is_volatile) {
3074    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3075  }
3076
3077  switch (field_type) {
3078    case Primitive::kPrimBoolean:
3079    case Primitive::kPrimByte: {
3080      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3081      break;
3082    }
3083
3084    case Primitive::kPrimShort:
3085    case Primitive::kPrimChar: {
3086      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3087      break;
3088    }
3089
3090    case Primitive::kPrimInt:
3091    case Primitive::kPrimNot: {
3092      __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3093      break;
3094    }
3095
3096    case Primitive::kPrimLong: {
3097      if (is_volatile && !atomic_ldrd_strd) {
3098        GenerateWideAtomicStore(base, offset,
3099                                value.AsRegisterPairLow<Register>(),
3100                                value.AsRegisterPairHigh<Register>(),
3101                                locations->GetTemp(0).AsRegister<Register>(),
3102                                locations->GetTemp(1).AsRegister<Register>(),
3103                                instruction);
3104      } else {
3105        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3106        codegen_->MaybeRecordImplicitNullCheck(instruction);
3107      }
3108      break;
3109    }
3110
3111    case Primitive::kPrimFloat: {
3112      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3113      break;
3114    }
3115
3116    case Primitive::kPrimDouble: {
3117      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3118      if (is_volatile && !atomic_ldrd_strd) {
3119        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3120        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3121
3122        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3123
3124        GenerateWideAtomicStore(base, offset,
3125                                value_reg_lo,
3126                                value_reg_hi,
3127                                locations->GetTemp(2).AsRegister<Register>(),
3128                                locations->GetTemp(3).AsRegister<Register>(),
3129                                instruction);
3130      } else {
3131        __ StoreDToOffset(value_reg, base, offset);
3132        codegen_->MaybeRecordImplicitNullCheck(instruction);
3133      }
3134      break;
3135    }
3136
3137    case Primitive::kPrimVoid:
3138      LOG(FATAL) << "Unreachable type " << field_type;
3139      UNREACHABLE();
3140  }
3141
3142  // Longs and doubles are handled in the switch.
3143  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3144    codegen_->MaybeRecordImplicitNullCheck(instruction);
3145  }
3146
3147  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3148    Register temp = locations->GetTemp(0).AsRegister<Register>();
3149    Register card = locations->GetTemp(1).AsRegister<Register>();
3150    codegen_->MarkGCCard(
3151        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3152  }
3153
3154  if (is_volatile) {
3155    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3156  }
3157}
3158
3159void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3160  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3161  LocationSummary* locations =
3162      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3163  locations->SetInAt(0, Location::RequiresRegister());
3164
3165  bool volatile_for_double = field_info.IsVolatile()
3166      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3167      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3168  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3169
3170  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3171    locations->SetOut(Location::RequiresFpuRegister());
3172  } else {
3173    locations->SetOut(Location::RequiresRegister(),
3174                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3175  }
3176  if (volatile_for_double) {
3177    // Arm encoding have some additional constraints for ldrexd/strexd:
3178    // - registers need to be consecutive
3179    // - the first register should be even but not R14.
3180    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3181    // enable Arm encoding.
3182    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3183    locations->AddTemp(Location::RequiresRegister());
3184    locations->AddTemp(Location::RequiresRegister());
3185  }
3186}
3187
3188void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3189                                                 const FieldInfo& field_info) {
3190  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3191
3192  LocationSummary* locations = instruction->GetLocations();
3193  Register base = locations->InAt(0).AsRegister<Register>();
3194  Location out = locations->Out();
3195  bool is_volatile = field_info.IsVolatile();
3196  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3197  Primitive::Type field_type = field_info.GetFieldType();
3198  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3199
3200  switch (field_type) {
3201    case Primitive::kPrimBoolean: {
3202      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3203      break;
3204    }
3205
3206    case Primitive::kPrimByte: {
3207      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3208      break;
3209    }
3210
3211    case Primitive::kPrimShort: {
3212      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3213      break;
3214    }
3215
3216    case Primitive::kPrimChar: {
3217      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3218      break;
3219    }
3220
3221    case Primitive::kPrimInt:
3222    case Primitive::kPrimNot: {
3223      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3224      break;
3225    }
3226
3227    case Primitive::kPrimLong: {
3228      if (is_volatile && !atomic_ldrd_strd) {
3229        GenerateWideAtomicLoad(base, offset,
3230                               out.AsRegisterPairLow<Register>(),
3231                               out.AsRegisterPairHigh<Register>());
3232      } else {
3233        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3234      }
3235      break;
3236    }
3237
3238    case Primitive::kPrimFloat: {
3239      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3240      break;
3241    }
3242
3243    case Primitive::kPrimDouble: {
3244      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3245      if (is_volatile && !atomic_ldrd_strd) {
3246        Register lo = locations->GetTemp(0).AsRegister<Register>();
3247        Register hi = locations->GetTemp(1).AsRegister<Register>();
3248        GenerateWideAtomicLoad(base, offset, lo, hi);
3249        codegen_->MaybeRecordImplicitNullCheck(instruction);
3250        __ vmovdrr(out_reg, lo, hi);
3251      } else {
3252        __ LoadDFromOffset(out_reg, base, offset);
3253        codegen_->MaybeRecordImplicitNullCheck(instruction);
3254      }
3255      break;
3256    }
3257
3258    case Primitive::kPrimVoid:
3259      LOG(FATAL) << "Unreachable type " << field_type;
3260      UNREACHABLE();
3261  }
3262
3263  // Doubles are handled in the switch.
3264  if (field_type != Primitive::kPrimDouble) {
3265    codegen_->MaybeRecordImplicitNullCheck(instruction);
3266  }
3267
3268  if (is_volatile) {
3269    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3270  }
3271}
3272
3273void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3274  HandleFieldSet(instruction, instruction->GetFieldInfo());
3275}
3276
3277void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3278  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3279}
3280
3281void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3282  HandleFieldGet(instruction, instruction->GetFieldInfo());
3283}
3284
3285void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3286  HandleFieldGet(instruction, instruction->GetFieldInfo());
3287}
3288
3289void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3290  HandleFieldGet(instruction, instruction->GetFieldInfo());
3291}
3292
3293void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3294  HandleFieldGet(instruction, instruction->GetFieldInfo());
3295}
3296
3297void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3298  HandleFieldSet(instruction, instruction->GetFieldInfo());
3299}
3300
3301void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3302  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3303}
3304
3305void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3306  LocationSummary* locations =
3307      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3308  locations->SetInAt(0, Location::RequiresRegister());
3309  if (instruction->HasUses()) {
3310    locations->SetOut(Location::SameAsFirstInput());
3311  }
3312}
3313
3314void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3315  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3316    return;
3317  }
3318  Location obj = instruction->GetLocations()->InAt(0);
3319
3320  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3321  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3322}
3323
3324void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3325  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3326  codegen_->AddSlowPath(slow_path);
3327
3328  LocationSummary* locations = instruction->GetLocations();
3329  Location obj = locations->InAt(0);
3330
3331  __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
3332  __ b(slow_path->GetEntryLabel(), EQ);
3333}
3334
3335void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3336  if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
3337    GenerateImplicitNullCheck(instruction);
3338  } else {
3339    GenerateExplicitNullCheck(instruction);
3340  }
3341}
3342
3343void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3344  LocationSummary* locations =
3345      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3346  locations->SetInAt(0, Location::RequiresRegister());
3347  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3348  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3349    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3350  } else {
3351    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3352  }
3353}
3354
3355void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3356  LocationSummary* locations = instruction->GetLocations();
3357  Register obj = locations->InAt(0).AsRegister<Register>();
3358  Location index = locations->InAt(1);
3359
3360  switch (instruction->GetType()) {
3361    case Primitive::kPrimBoolean: {
3362      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3363      Register out = locations->Out().AsRegister<Register>();
3364      if (index.IsConstant()) {
3365        size_t offset =
3366            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3367        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3368      } else {
3369        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3370        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3371      }
3372      break;
3373    }
3374
3375    case Primitive::kPrimByte: {
3376      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3377      Register out = locations->Out().AsRegister<Register>();
3378      if (index.IsConstant()) {
3379        size_t offset =
3380            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3381        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3382      } else {
3383        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3384        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3385      }
3386      break;
3387    }
3388
3389    case Primitive::kPrimShort: {
3390      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3391      Register out = locations->Out().AsRegister<Register>();
3392      if (index.IsConstant()) {
3393        size_t offset =
3394            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3395        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3396      } else {
3397        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3398        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3399      }
3400      break;
3401    }
3402
3403    case Primitive::kPrimChar: {
3404      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3405      Register out = locations->Out().AsRegister<Register>();
3406      if (index.IsConstant()) {
3407        size_t offset =
3408            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3409        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3410      } else {
3411        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3412        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3413      }
3414      break;
3415    }
3416
3417    case Primitive::kPrimInt:
3418    case Primitive::kPrimNot: {
3419      static_assert(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
3420                    "art::mirror::HeapReference<mirror::Object> and int32_t have different sizes.");
3421      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3422      Register out = locations->Out().AsRegister<Register>();
3423      if (index.IsConstant()) {
3424        size_t offset =
3425            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3426        __ LoadFromOffset(kLoadWord, out, obj, offset);
3427      } else {
3428        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3429        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3430      }
3431      break;
3432    }
3433
3434    case Primitive::kPrimLong: {
3435      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3436      Location out = locations->Out();
3437      if (index.IsConstant()) {
3438        size_t offset =
3439            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3440        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3441      } else {
3442        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3443        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3444      }
3445      break;
3446    }
3447
3448    case Primitive::kPrimFloat: {
3449      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3450      Location out = locations->Out();
3451      DCHECK(out.IsFpuRegister());
3452      if (index.IsConstant()) {
3453        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3454        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3455      } else {
3456        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3457        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3458      }
3459      break;
3460    }
3461
3462    case Primitive::kPrimDouble: {
3463      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3464      Location out = locations->Out();
3465      DCHECK(out.IsFpuRegisterPair());
3466      if (index.IsConstant()) {
3467        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3468        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3469      } else {
3470        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3471        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3472      }
3473      break;
3474    }
3475
3476    case Primitive::kPrimVoid:
3477      LOG(FATAL) << "Unreachable type " << instruction->GetType();
3478      UNREACHABLE();
3479  }
3480  codegen_->MaybeRecordImplicitNullCheck(instruction);
3481}
3482
3483void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3484  Primitive::Type value_type = instruction->GetComponentType();
3485
3486  bool needs_write_barrier =
3487      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3488  bool needs_runtime_call = instruction->NeedsTypeCheck();
3489
3490  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3491      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3492  if (needs_runtime_call) {
3493    InvokeRuntimeCallingConvention calling_convention;
3494    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3495    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3496    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3497  } else {
3498    locations->SetInAt(0, Location::RequiresRegister());
3499    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3500    if (Primitive::IsFloatingPointType(value_type)) {
3501      locations->SetInAt(2, Location::RequiresFpuRegister());
3502    } else {
3503      locations->SetInAt(2, Location::RequiresRegister());
3504    }
3505
3506    if (needs_write_barrier) {
3507      // Temporary registers for the write barrier.
3508      locations->AddTemp(Location::RequiresRegister());
3509      locations->AddTemp(Location::RequiresRegister());
3510    }
3511  }
3512}
3513
3514void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3515  LocationSummary* locations = instruction->GetLocations();
3516  Register obj = locations->InAt(0).AsRegister<Register>();
3517  Location index = locations->InAt(1);
3518  Primitive::Type value_type = instruction->GetComponentType();
3519  bool needs_runtime_call = locations->WillCall();
3520  bool needs_write_barrier =
3521      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3522
3523  switch (value_type) {
3524    case Primitive::kPrimBoolean:
3525    case Primitive::kPrimByte: {
3526      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3527      Register value = locations->InAt(2).AsRegister<Register>();
3528      if (index.IsConstant()) {
3529        size_t offset =
3530            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3531        __ StoreToOffset(kStoreByte, value, obj, offset);
3532      } else {
3533        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3534        __ StoreToOffset(kStoreByte, value, IP, data_offset);
3535      }
3536      break;
3537    }
3538
3539    case Primitive::kPrimShort:
3540    case Primitive::kPrimChar: {
3541      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3542      Register value = locations->InAt(2).AsRegister<Register>();
3543      if (index.IsConstant()) {
3544        size_t offset =
3545            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3546        __ StoreToOffset(kStoreHalfword, value, obj, offset);
3547      } else {
3548        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3549        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3550      }
3551      break;
3552    }
3553
3554    case Primitive::kPrimInt:
3555    case Primitive::kPrimNot: {
3556      if (!needs_runtime_call) {
3557        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3558        Register value = locations->InAt(2).AsRegister<Register>();
3559        if (index.IsConstant()) {
3560          size_t offset =
3561              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3562          __ StoreToOffset(kStoreWord, value, obj, offset);
3563        } else {
3564          DCHECK(index.IsRegister()) << index;
3565          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3566          __ StoreToOffset(kStoreWord, value, IP, data_offset);
3567        }
3568        codegen_->MaybeRecordImplicitNullCheck(instruction);
3569        if (needs_write_barrier) {
3570          DCHECK_EQ(value_type, Primitive::kPrimNot);
3571          Register temp = locations->GetTemp(0).AsRegister<Register>();
3572          Register card = locations->GetTemp(1).AsRegister<Register>();
3573          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
3574        }
3575      } else {
3576        DCHECK_EQ(value_type, Primitive::kPrimNot);
3577        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3578                                instruction,
3579                                instruction->GetDexPc(),
3580                                nullptr);
3581      }
3582      break;
3583    }
3584
3585    case Primitive::kPrimLong: {
3586      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3587      Location value = locations->InAt(2);
3588      if (index.IsConstant()) {
3589        size_t offset =
3590            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3591        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3592      } else {
3593        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3594        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3595      }
3596      break;
3597    }
3598
3599    case Primitive::kPrimFloat: {
3600      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3601      Location value = locations->InAt(2);
3602      DCHECK(value.IsFpuRegister());
3603      if (index.IsConstant()) {
3604        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3605        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3606      } else {
3607        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3608        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3609      }
3610      break;
3611    }
3612
3613    case Primitive::kPrimDouble: {
3614      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3615      Location value = locations->InAt(2);
3616      DCHECK(value.IsFpuRegisterPair());
3617      if (index.IsConstant()) {
3618        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3619        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3620      } else {
3621        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3622        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3623      }
3624
3625      break;
3626    }
3627
3628    case Primitive::kPrimVoid:
3629      LOG(FATAL) << "Unreachable type " << value_type;
3630      UNREACHABLE();
3631  }
3632
3633  // Ints and objects are handled in the switch.
3634  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
3635    codegen_->MaybeRecordImplicitNullCheck(instruction);
3636  }
3637}
3638
3639void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
3640  LocationSummary* locations =
3641      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3642  locations->SetInAt(0, Location::RequiresRegister());
3643  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3644}
3645
3646void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
3647  LocationSummary* locations = instruction->GetLocations();
3648  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
3649  Register obj = locations->InAt(0).AsRegister<Register>();
3650  Register out = locations->Out().AsRegister<Register>();
3651  __ LoadFromOffset(kLoadWord, out, obj, offset);
3652  codegen_->MaybeRecordImplicitNullCheck(instruction);
3653}
3654
3655void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3656  LocationSummary* locations =
3657      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3658  locations->SetInAt(0, Location::RequiresRegister());
3659  locations->SetInAt(1, Location::RequiresRegister());
3660  if (instruction->HasUses()) {
3661    locations->SetOut(Location::SameAsFirstInput());
3662  }
3663}
3664
3665void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3666  LocationSummary* locations = instruction->GetLocations();
3667  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
3668      instruction, locations->InAt(0), locations->InAt(1));
3669  codegen_->AddSlowPath(slow_path);
3670
3671  Register index = locations->InAt(0).AsRegister<Register>();
3672  Register length = locations->InAt(1).AsRegister<Register>();
3673
3674  __ cmp(index, ShifterOperand(length));
3675  __ b(slow_path->GetEntryLabel(), CS);
3676}
3677
3678void CodeGeneratorARM::MarkGCCard(Register temp,
3679                                  Register card,
3680                                  Register object,
3681                                  Register value,
3682                                  bool can_be_null) {
3683  Label is_null;
3684  if (can_be_null) {
3685    __ CompareAndBranchIfZero(value, &is_null);
3686  }
3687  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
3688  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
3689  __ strb(card, Address(card, temp));
3690  if (can_be_null) {
3691    __ Bind(&is_null);
3692  }
3693}
3694
3695void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
3696  temp->SetLocations(nullptr);
3697}
3698
3699void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
3700  // Nothing to do, this is driven by the code generator.
3701  UNUSED(temp);
3702}
3703
3704void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
3705  UNUSED(instruction);
3706  LOG(FATAL) << "Unreachable";
3707}
3708
3709void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
3710  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3711}
3712
3713void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3714  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3715}
3716
3717void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3718  HBasicBlock* block = instruction->GetBlock();
3719  if (block->GetLoopInformation() != nullptr) {
3720    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3721    // The back edge will generate the suspend check.
3722    return;
3723  }
3724  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3725    // The goto will generate the suspend check.
3726    return;
3727  }
3728  GenerateSuspendCheck(instruction, nullptr);
3729}
3730
3731void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
3732                                                       HBasicBlock* successor) {
3733  SuspendCheckSlowPathARM* slow_path =
3734      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
3735  if (slow_path == nullptr) {
3736    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
3737    instruction->SetSlowPath(slow_path);
3738    codegen_->AddSlowPath(slow_path);
3739    if (successor != nullptr) {
3740      DCHECK(successor->IsLoopHeader());
3741      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
3742    }
3743  } else {
3744    DCHECK_EQ(slow_path->GetSuccessor(), successor);
3745  }
3746
3747  __ LoadFromOffset(
3748      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
3749  __ cmp(IP, ShifterOperand(0));
3750  // TODO: Figure out the branch offsets and use cbz/cbnz.
3751  if (successor == nullptr) {
3752    __ b(slow_path->GetEntryLabel(), NE);
3753    __ Bind(slow_path->GetReturnLabel());
3754  } else {
3755    __ b(codegen_->GetLabelOf(successor), EQ);
3756    __ b(slow_path->GetEntryLabel());
3757  }
3758}
3759
3760ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
3761  return codegen_->GetAssembler();
3762}
3763
3764void ParallelMoveResolverARM::EmitMove(size_t index) {
3765  MoveOperands* move = moves_.Get(index);
3766  Location source = move->GetSource();
3767  Location destination = move->GetDestination();
3768
3769  if (source.IsRegister()) {
3770    if (destination.IsRegister()) {
3771      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
3772    } else {
3773      DCHECK(destination.IsStackSlot());
3774      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
3775                       SP, destination.GetStackIndex());
3776    }
3777  } else if (source.IsStackSlot()) {
3778    if (destination.IsRegister()) {
3779      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
3780                        SP, source.GetStackIndex());
3781    } else if (destination.IsFpuRegister()) {
3782      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
3783    } else {
3784      DCHECK(destination.IsStackSlot());
3785      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
3786      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3787    }
3788  } else if (source.IsFpuRegister()) {
3789    if (destination.IsFpuRegister()) {
3790      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
3791    } else {
3792      DCHECK(destination.IsStackSlot());
3793      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
3794    }
3795  } else if (source.IsDoubleStackSlot()) {
3796    if (destination.IsDoubleStackSlot()) {
3797      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
3798      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
3799    } else if (destination.IsRegisterPair()) {
3800      DCHECK(ExpectedPairLayout(destination));
3801      __ LoadFromOffset(
3802          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
3803    } else {
3804      DCHECK(destination.IsFpuRegisterPair()) << destination;
3805      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3806                         SP,
3807                         source.GetStackIndex());
3808    }
3809  } else if (source.IsRegisterPair()) {
3810    if (destination.IsRegisterPair()) {
3811      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
3812      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
3813    } else {
3814      DCHECK(destination.IsDoubleStackSlot()) << destination;
3815      DCHECK(ExpectedPairLayout(source));
3816      __ StoreToOffset(
3817          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
3818    }
3819  } else if (source.IsFpuRegisterPair()) {
3820    if (destination.IsFpuRegisterPair()) {
3821      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
3822               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
3823    } else {
3824      DCHECK(destination.IsDoubleStackSlot()) << destination;
3825      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
3826                        SP,
3827                        destination.GetStackIndex());
3828    }
3829  } else {
3830    DCHECK(source.IsConstant()) << source;
3831    HConstant* constant = source.GetConstant();
3832    if (constant->IsIntConstant() || constant->IsNullConstant()) {
3833      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
3834      if (destination.IsRegister()) {
3835        __ LoadImmediate(destination.AsRegister<Register>(), value);
3836      } else {
3837        DCHECK(destination.IsStackSlot());
3838        __ LoadImmediate(IP, value);
3839        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3840      }
3841    } else if (constant->IsLongConstant()) {
3842      int64_t value = constant->AsLongConstant()->GetValue();
3843      if (destination.IsRegisterPair()) {
3844        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
3845        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
3846      } else {
3847        DCHECK(destination.IsDoubleStackSlot()) << destination;
3848        __ LoadImmediate(IP, Low32Bits(value));
3849        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3850        __ LoadImmediate(IP, High32Bits(value));
3851        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3852      }
3853    } else if (constant->IsDoubleConstant()) {
3854      double value = constant->AsDoubleConstant()->GetValue();
3855      if (destination.IsFpuRegisterPair()) {
3856        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
3857      } else {
3858        DCHECK(destination.IsDoubleStackSlot()) << destination;
3859        uint64_t int_value = bit_cast<uint64_t, double>(value);
3860        __ LoadImmediate(IP, Low32Bits(int_value));
3861        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3862        __ LoadImmediate(IP, High32Bits(int_value));
3863        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
3864      }
3865    } else {
3866      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
3867      float value = constant->AsFloatConstant()->GetValue();
3868      if (destination.IsFpuRegister()) {
3869        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
3870      } else {
3871        DCHECK(destination.IsStackSlot());
3872        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
3873        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
3874      }
3875    }
3876  }
3877}
3878
3879void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
3880  __ Mov(IP, reg);
3881  __ LoadFromOffset(kLoadWord, reg, SP, mem);
3882  __ StoreToOffset(kStoreWord, IP, SP, mem);
3883}
3884
3885void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
3886  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
3887  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
3888  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
3889                    SP, mem1 + stack_offset);
3890  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
3891  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
3892                   SP, mem2 + stack_offset);
3893  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
3894}
3895
3896void ParallelMoveResolverARM::EmitSwap(size_t index) {
3897  MoveOperands* move = moves_.Get(index);
3898  Location source = move->GetSource();
3899  Location destination = move->GetDestination();
3900
3901  if (source.IsRegister() && destination.IsRegister()) {
3902    DCHECK_NE(source.AsRegister<Register>(), IP);
3903    DCHECK_NE(destination.AsRegister<Register>(), IP);
3904    __ Mov(IP, source.AsRegister<Register>());
3905    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
3906    __ Mov(destination.AsRegister<Register>(), IP);
3907  } else if (source.IsRegister() && destination.IsStackSlot()) {
3908    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
3909  } else if (source.IsStackSlot() && destination.IsRegister()) {
3910    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
3911  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
3912    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3913  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
3914    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
3915    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
3916    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
3917  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
3918    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
3919    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
3920    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
3921    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
3922               destination.AsRegisterPairHigh<Register>(),
3923               DTMP);
3924  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
3925    Register low_reg = source.IsRegisterPair()
3926        ? source.AsRegisterPairLow<Register>()
3927        : destination.AsRegisterPairLow<Register>();
3928    int mem = source.IsRegisterPair()
3929        ? destination.GetStackIndex()
3930        : source.GetStackIndex();
3931    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
3932    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
3933    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
3934    __ StoreDToOffset(DTMP, SP, mem);
3935  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
3936    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
3937    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3938    __ vmovd(DTMP, first);
3939    __ vmovd(first, second);
3940    __ vmovd(second, DTMP);
3941  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
3942    DRegister reg = source.IsFpuRegisterPair()
3943        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
3944        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
3945    int mem = source.IsFpuRegisterPair()
3946        ? destination.GetStackIndex()
3947        : source.GetStackIndex();
3948    __ vmovd(DTMP, reg);
3949    __ LoadDFromOffset(reg, SP, mem);
3950    __ StoreDToOffset(DTMP, SP, mem);
3951  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
3952    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
3953                                           : destination.AsFpuRegister<SRegister>();
3954    int mem = source.IsFpuRegister()
3955        ? destination.GetStackIndex()
3956        : source.GetStackIndex();
3957
3958    __ vmovrs(IP, reg);
3959    __ LoadSFromOffset(reg, SP, mem);
3960    __ StoreToOffset(kStoreWord, IP, SP, mem);
3961  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
3962    Exchange(source.GetStackIndex(), destination.GetStackIndex());
3963    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
3964  } else {
3965    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
3966  }
3967}
3968
3969void ParallelMoveResolverARM::SpillScratch(int reg) {
3970  __ Push(static_cast<Register>(reg));
3971}
3972
3973void ParallelMoveResolverARM::RestoreScratch(int reg) {
3974  __ Pop(static_cast<Register>(reg));
3975}
3976
3977void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
3978  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
3979      ? LocationSummary::kCallOnSlowPath
3980      : LocationSummary::kNoCall;
3981  LocationSummary* locations =
3982      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
3983  locations->SetInAt(0, Location::RequiresRegister());
3984  locations->SetOut(Location::RequiresRegister());
3985}
3986
3987void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
3988  LocationSummary* locations = cls->GetLocations();
3989  Register out = locations->Out().AsRegister<Register>();
3990  Register current_method = locations->InAt(0).AsRegister<Register>();
3991  if (cls->IsReferrersClass()) {
3992    DCHECK(!cls->CanCallRuntime());
3993    DCHECK(!cls->MustGenerateClinitCheck());
3994    __ LoadFromOffset(
3995        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
3996  } else {
3997    DCHECK(cls->CanCallRuntime());
3998    __ LoadFromOffset(kLoadWord,
3999                      out,
4000                      current_method,
4001                      ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
4002    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
4003
4004    SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4005        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
4006    codegen_->AddSlowPath(slow_path);
4007    __ cmp(out, ShifterOperand(0));
4008    __ b(slow_path->GetEntryLabel(), EQ);
4009    if (cls->MustGenerateClinitCheck()) {
4010      GenerateClassInitializationCheck(slow_path, out);
4011    } else {
4012      __ Bind(slow_path->GetExitLabel());
4013    }
4014  }
4015}
4016
4017void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
4018  LocationSummary* locations =
4019      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
4020  locations->SetInAt(0, Location::RequiresRegister());
4021  if (check->HasUses()) {
4022    locations->SetOut(Location::SameAsFirstInput());
4023  }
4024}
4025
4026void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
4027  // We assume the class is not null.
4028  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4029      check->GetLoadClass(), check, check->GetDexPc(), true);
4030  codegen_->AddSlowPath(slow_path);
4031  GenerateClassInitializationCheck(slow_path,
4032                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4033}
4034
4035void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4036    SlowPathCodeARM* slow_path, Register class_reg) {
4037  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4038  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4039  __ b(slow_path->GetEntryLabel(), LT);
4040  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4041  // properly. Therefore, we do a memory fence.
4042  __ dmb(ISH);
4043  __ Bind(slow_path->GetExitLabel());
4044}
4045
4046void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4047  LocationSummary* locations =
4048      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4049  locations->SetInAt(0, Location::RequiresRegister());
4050  locations->SetOut(Location::RequiresRegister());
4051}
4052
4053void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4054  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4055  codegen_->AddSlowPath(slow_path);
4056
4057  LocationSummary* locations = load->GetLocations();
4058  Register out = locations->Out().AsRegister<Register>();
4059  Register current_method = locations->InAt(0).AsRegister<Register>();
4060  __ LoadFromOffset(
4061      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4062  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4063  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4064  __ cmp(out, ShifterOperand(0));
4065  __ b(slow_path->GetEntryLabel(), EQ);
4066  __ Bind(slow_path->GetExitLabel());
4067}
4068
4069void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4070  LocationSummary* locations =
4071      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4072  locations->SetOut(Location::RequiresRegister());
4073}
4074
4075void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4076  Register out = load->GetLocations()->Out().AsRegister<Register>();
4077  int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4078  __ LoadFromOffset(kLoadWord, out, TR, offset);
4079  __ LoadImmediate(IP, 0);
4080  __ StoreToOffset(kStoreWord, IP, TR, offset);
4081}
4082
4083void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4084  LocationSummary* locations =
4085      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4086  InvokeRuntimeCallingConvention calling_convention;
4087  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4088}
4089
4090void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4091  codegen_->InvokeRuntime(
4092      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4093}
4094
4095void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4096  LocationSummary::CallKind call_kind = instruction->IsClassFinal()
4097      ? LocationSummary::kNoCall
4098      : LocationSummary::kCallOnSlowPath;
4099  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4100  locations->SetInAt(0, Location::RequiresRegister());
4101  locations->SetInAt(1, Location::RequiresRegister());
4102  // The out register is used as a temporary, so it overlaps with the inputs.
4103  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4104}
4105
4106void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4107  LocationSummary* locations = instruction->GetLocations();
4108  Register obj = locations->InAt(0).AsRegister<Register>();
4109  Register cls = locations->InAt(1).AsRegister<Register>();
4110  Register out = locations->Out().AsRegister<Register>();
4111  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4112  Label done, zero;
4113  SlowPathCodeARM* slow_path = nullptr;
4114
4115  // Return 0 if `obj` is null.
4116  // avoid null check if we know obj is not null.
4117  if (instruction->MustDoNullCheck()) {
4118    __ CompareAndBranchIfZero(obj, &zero);
4119  }
4120  // Compare the class of `obj` with `cls`.
4121  __ LoadFromOffset(kLoadWord, out, obj, class_offset);
4122  __ cmp(out, ShifterOperand(cls));
4123  if (instruction->IsClassFinal()) {
4124    // Classes must be equal for the instanceof to succeed.
4125    __ b(&zero, NE);
4126    __ LoadImmediate(out, 1);
4127    __ b(&done);
4128  } else {
4129    // If the classes are not equal, we go into a slow path.
4130    DCHECK(locations->OnlyCallsOnSlowPath());
4131    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4132        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
4133    codegen_->AddSlowPath(slow_path);
4134    __ b(slow_path->GetEntryLabel(), NE);
4135    __ LoadImmediate(out, 1);
4136    __ b(&done);
4137  }
4138
4139  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
4140    __ Bind(&zero);
4141    __ LoadImmediate(out, 0);
4142  }
4143
4144  if (slow_path != nullptr) {
4145    __ Bind(slow_path->GetExitLabel());
4146  }
4147  __ Bind(&done);
4148}
4149
4150void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4151  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4152      instruction, LocationSummary::kCallOnSlowPath);
4153  locations->SetInAt(0, Location::RequiresRegister());
4154  locations->SetInAt(1, Location::RequiresRegister());
4155  locations->AddTemp(Location::RequiresRegister());
4156}
4157
4158void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4159  LocationSummary* locations = instruction->GetLocations();
4160  Register obj = locations->InAt(0).AsRegister<Register>();
4161  Register cls = locations->InAt(1).AsRegister<Register>();
4162  Register temp = locations->GetTemp(0).AsRegister<Register>();
4163  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4164
4165  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4166      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
4167  codegen_->AddSlowPath(slow_path);
4168
4169  // avoid null check if we know obj is not null.
4170  if (instruction->MustDoNullCheck()) {
4171    __ CompareAndBranchIfZero(obj, slow_path->GetExitLabel());
4172  }
4173  // Compare the class of `obj` with `cls`.
4174  __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4175  __ cmp(temp, ShifterOperand(cls));
4176  __ b(slow_path->GetEntryLabel(), NE);
4177  __ Bind(slow_path->GetExitLabel());
4178}
4179
4180void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4181  LocationSummary* locations =
4182      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4183  InvokeRuntimeCallingConvention calling_convention;
4184  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4185}
4186
4187void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4188  codegen_->InvokeRuntime(instruction->IsEnter()
4189        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4190      instruction,
4191      instruction->GetDexPc(),
4192      nullptr);
4193}
4194
4195void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
4196void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
4197void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4198
4199void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4200  LocationSummary* locations =
4201      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4202  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4203         || instruction->GetResultType() == Primitive::kPrimLong);
4204  locations->SetInAt(0, Location::RequiresRegister());
4205  locations->SetInAt(1, Location::RequiresRegister());
4206  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4207}
4208
4209void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4210  HandleBitwiseOperation(instruction);
4211}
4212
4213void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4214  HandleBitwiseOperation(instruction);
4215}
4216
4217void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4218  HandleBitwiseOperation(instruction);
4219}
4220
4221void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4222  LocationSummary* locations = instruction->GetLocations();
4223
4224  if (instruction->GetResultType() == Primitive::kPrimInt) {
4225    Register first = locations->InAt(0).AsRegister<Register>();
4226    Register second = locations->InAt(1).AsRegister<Register>();
4227    Register out = locations->Out().AsRegister<Register>();
4228    if (instruction->IsAnd()) {
4229      __ and_(out, first, ShifterOperand(second));
4230    } else if (instruction->IsOr()) {
4231      __ orr(out, first, ShifterOperand(second));
4232    } else {
4233      DCHECK(instruction->IsXor());
4234      __ eor(out, first, ShifterOperand(second));
4235    }
4236  } else {
4237    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4238    Location first = locations->InAt(0);
4239    Location second = locations->InAt(1);
4240    Location out = locations->Out();
4241    if (instruction->IsAnd()) {
4242      __ and_(out.AsRegisterPairLow<Register>(),
4243              first.AsRegisterPairLow<Register>(),
4244              ShifterOperand(second.AsRegisterPairLow<Register>()));
4245      __ and_(out.AsRegisterPairHigh<Register>(),
4246              first.AsRegisterPairHigh<Register>(),
4247              ShifterOperand(second.AsRegisterPairHigh<Register>()));
4248    } else if (instruction->IsOr()) {
4249      __ orr(out.AsRegisterPairLow<Register>(),
4250             first.AsRegisterPairLow<Register>(),
4251             ShifterOperand(second.AsRegisterPairLow<Register>()));
4252      __ orr(out.AsRegisterPairHigh<Register>(),
4253             first.AsRegisterPairHigh<Register>(),
4254             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4255    } else {
4256      DCHECK(instruction->IsXor());
4257      __ eor(out.AsRegisterPairLow<Register>(),
4258             first.AsRegisterPairLow<Register>(),
4259             ShifterOperand(second.AsRegisterPairLow<Register>()));
4260      __ eor(out.AsRegisterPairHigh<Register>(),
4261             first.AsRegisterPairHigh<Register>(),
4262             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4263    }
4264  }
4265}
4266
4267void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
4268  // TODO: Implement all kinds of calls:
4269  // 1) boot -> boot
4270  // 2) app -> boot
4271  // 3) app -> app
4272  //
4273  // Currently we implement the app -> app logic, which looks up in the resolve cache.
4274
4275  if (invoke->IsStringInit()) {
4276    Register reg = temp.AsRegister<Register>();
4277    // temp = thread->string_init_entrypoint
4278    __ LoadFromOffset(kLoadWord, reg, TR, invoke->GetStringInitOffset());
4279    // LR = temp[offset_of_quick_compiled_code]
4280    __ LoadFromOffset(kLoadWord, LR, reg,
4281                      ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4282                          kArmWordSize).Int32Value());
4283    // LR()
4284    __ blx(LR);
4285  } else if (invoke->IsRecursive()) {
4286    __ bl(GetFrameEntryLabel());
4287  } else {
4288    Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
4289    Register method_reg;
4290    Register reg = temp.AsRegister<Register>();
4291    if (current_method.IsRegister()) {
4292      method_reg = current_method.AsRegister<Register>();
4293    } else {
4294      DCHECK(invoke->GetLocations()->Intrinsified());
4295      DCHECK(!current_method.IsValid());
4296      method_reg = reg;
4297      __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
4298    }
4299    // reg = current_method->dex_cache_resolved_methods_;
4300    __ LoadFromOffset(
4301        kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
4302    // reg = reg[index_in_cache]
4303    __ LoadFromOffset(
4304        kLoadWord, reg, reg, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
4305    // LR = reg[offset_of_quick_compiled_code]
4306    __ LoadFromOffset(kLoadWord, LR, reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4307        kArmWordSize).Int32Value());
4308    // LR()
4309    __ blx(LR);
4310  }
4311
4312  DCHECK(!IsLeafMethod());
4313}
4314
4315void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4316  // Nothing to do, this should be removed during prepare for register allocator.
4317  UNUSED(instruction);
4318  LOG(FATAL) << "Unreachable";
4319}
4320
4321void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4322  // Nothing to do, this should be removed during prepare for register allocator.
4323  UNUSED(instruction);
4324  LOG(FATAL) << "Unreachable";
4325}
4326
4327}  // namespace arm
4328}  // namespace art
4329