code_generator_arm.cc revision 4b8f1ecd3aa5a29ec1463ff88fee9db365f257dc
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "compiled_method.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "gc/accounting/card_table.h"
25#include "intrinsics.h"
26#include "intrinsics_arm.h"
27#include "mirror/array-inl.h"
28#include "mirror/class-inl.h"
29#include "thread.h"
30#include "utils/arm/assembler_arm.h"
31#include "utils/arm/managed_register_arm.h"
32#include "utils/assembler.h"
33#include "utils/stack_checks.h"
34
35namespace art {
36
37namespace arm {
38
39static bool ExpectedPairLayout(Location location) {
40  // We expected this for both core and fpu register pairs.
41  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
42}
43
44static constexpr int kCurrentMethodStackOffset = 0;
45static constexpr Register kMethodRegisterArgument = R0;
46
47// We unconditionally allocate R5 to ensure we can do long operations
48// with baseline.
49static constexpr Register kCoreSavedRegisterForBaseline = R5;
50static constexpr Register kCoreCalleeSaves[] =
51    { R5, R6, R7, R8, R10, R11, LR };
52static constexpr SRegister kFpuCalleeSaves[] =
53    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
54
55// D31 cannot be split into two S registers, and the register allocator only works on
56// S registers. Therefore there is no need to block it.
57static constexpr DRegister DTMP = D31;
58
59#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
60#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
61
62class NullCheckSlowPathARM : public SlowPathCode {
63 public:
64  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
65
66  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
67    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
68    __ Bind(GetEntryLabel());
69    if (instruction_->CanThrowIntoCatchBlock()) {
70      // Live registers will be restored in the catch block if caught.
71      SaveLiveRegisters(codegen, instruction_->GetLocations());
72    }
73    arm_codegen->InvokeRuntime(
74        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
75  }
76
77  bool IsFatal() const OVERRIDE { return true; }
78
79  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
80
81 private:
82  HNullCheck* const instruction_;
83  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
84};
85
86class DivZeroCheckSlowPathARM : public SlowPathCode {
87 public:
88  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
89
90  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
91    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
92    __ Bind(GetEntryLabel());
93    if (instruction_->CanThrowIntoCatchBlock()) {
94      // Live registers will be restored in the catch block if caught.
95      SaveLiveRegisters(codegen, instruction_->GetLocations());
96    }
97    arm_codegen->InvokeRuntime(
98        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
99  }
100
101  bool IsFatal() const OVERRIDE { return true; }
102
103  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
104
105 private:
106  HDivZeroCheck* const instruction_;
107  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
108};
109
110class SuspendCheckSlowPathARM : public SlowPathCode {
111 public:
112  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
113      : instruction_(instruction), successor_(successor) {}
114
115  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
116    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
117    __ Bind(GetEntryLabel());
118    SaveLiveRegisters(codegen, instruction_->GetLocations());
119    arm_codegen->InvokeRuntime(
120        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
121    RestoreLiveRegisters(codegen, instruction_->GetLocations());
122    if (successor_ == nullptr) {
123      __ b(GetReturnLabel());
124    } else {
125      __ b(arm_codegen->GetLabelOf(successor_));
126    }
127  }
128
129  Label* GetReturnLabel() {
130    DCHECK(successor_ == nullptr);
131    return &return_label_;
132  }
133
134  HBasicBlock* GetSuccessor() const {
135    return successor_;
136  }
137
138  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; }
139
140 private:
141  HSuspendCheck* const instruction_;
142  // If not null, the block to branch to after the suspend check.
143  HBasicBlock* const successor_;
144
145  // If `successor_` is null, the label to branch to after the suspend check.
146  Label return_label_;
147
148  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
149};
150
151class BoundsCheckSlowPathARM : public SlowPathCode {
152 public:
153  explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction)
154      : instruction_(instruction) {}
155
156  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
157    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
158    LocationSummary* locations = instruction_->GetLocations();
159
160    __ Bind(GetEntryLabel());
161    if (instruction_->CanThrowIntoCatchBlock()) {
162      // Live registers will be restored in the catch block if caught.
163      SaveLiveRegisters(codegen, instruction_->GetLocations());
164    }
165    // We're moving two locations to locations that could overlap, so we need a parallel
166    // move resolver.
167    InvokeRuntimeCallingConvention calling_convention;
168    codegen->EmitParallelMoves(
169        locations->InAt(0),
170        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
171        Primitive::kPrimInt,
172        locations->InAt(1),
173        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
174        Primitive::kPrimInt);
175    arm_codegen->InvokeRuntime(
176        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
177  }
178
179  bool IsFatal() const OVERRIDE { return true; }
180
181  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
182
183 private:
184  HBoundsCheck* const instruction_;
185
186  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
187};
188
189class LoadClassSlowPathARM : public SlowPathCode {
190 public:
191  LoadClassSlowPathARM(HLoadClass* cls,
192                       HInstruction* at,
193                       uint32_t dex_pc,
194                       bool do_clinit)
195      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
196    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
197  }
198
199  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
200    LocationSummary* locations = at_->GetLocations();
201
202    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
203    __ Bind(GetEntryLabel());
204    SaveLiveRegisters(codegen, locations);
205
206    InvokeRuntimeCallingConvention calling_convention;
207    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
208    int32_t entry_point_offset = do_clinit_
209        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
210        : QUICK_ENTRY_POINT(pInitializeType);
211    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
212
213    // Move the class to the desired location.
214    Location out = locations->Out();
215    if (out.IsValid()) {
216      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
217      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
218    }
219    RestoreLiveRegisters(codegen, locations);
220    __ b(GetExitLabel());
221  }
222
223  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM"; }
224
225 private:
226  // The class this slow path will load.
227  HLoadClass* const cls_;
228
229  // The instruction where this slow path is happening.
230  // (Might be the load class or an initialization check).
231  HInstruction* const at_;
232
233  // The dex PC of `at_`.
234  const uint32_t dex_pc_;
235
236  // Whether to initialize the class.
237  const bool do_clinit_;
238
239  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
240};
241
242class LoadStringSlowPathARM : public SlowPathCode {
243 public:
244  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
245
246  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
247    LocationSummary* locations = instruction_->GetLocations();
248    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
249
250    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
251    __ Bind(GetEntryLabel());
252    SaveLiveRegisters(codegen, locations);
253
254    InvokeRuntimeCallingConvention calling_convention;
255    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
256    arm_codegen->InvokeRuntime(
257        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
258    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
259
260    RestoreLiveRegisters(codegen, locations);
261    __ b(GetExitLabel());
262  }
263
264  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
265
266 private:
267  HLoadString* const instruction_;
268
269  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
270};
271
272class TypeCheckSlowPathARM : public SlowPathCode {
273 public:
274  TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal)
275      : instruction_(instruction), is_fatal_(is_fatal) {}
276
277  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
278    LocationSummary* locations = instruction_->GetLocations();
279    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
280                                                        : locations->Out();
281    DCHECK(instruction_->IsCheckCast()
282           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
283
284    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
285    __ Bind(GetEntryLabel());
286
287    if (instruction_->IsCheckCast()) {
288      // The codegen for the instruction overwrites `temp`, so put it back in place.
289      Register obj = locations->InAt(0).AsRegister<Register>();
290      Register temp = locations->GetTemp(0).AsRegister<Register>();
291      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
292      __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
293      __ MaybeUnpoisonHeapReference(temp);
294    }
295
296    if (!is_fatal_) {
297      SaveLiveRegisters(codegen, locations);
298    }
299
300    // We're moving two locations to locations that could overlap, so we need a parallel
301    // move resolver.
302    InvokeRuntimeCallingConvention calling_convention;
303    codegen->EmitParallelMoves(
304        locations->InAt(1),
305        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
306        Primitive::kPrimNot,
307        object_class,
308        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
309        Primitive::kPrimNot);
310
311    if (instruction_->IsInstanceOf()) {
312      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
313                                 instruction_,
314                                 instruction_->GetDexPc(),
315                                 this);
316      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
317    } else {
318      DCHECK(instruction_->IsCheckCast());
319      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
320                                 instruction_,
321                                 instruction_->GetDexPc(),
322                                 this);
323    }
324
325    if (!is_fatal_) {
326      RestoreLiveRegisters(codegen, locations);
327      __ b(GetExitLabel());
328    }
329  }
330
331  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; }
332
333  bool IsFatal() const OVERRIDE { return is_fatal_; }
334
335 private:
336  HInstruction* const instruction_;
337  const bool is_fatal_;
338
339  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
340};
341
342class DeoptimizationSlowPathARM : public SlowPathCode {
343 public:
344  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
345    : instruction_(instruction) {}
346
347  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
348    __ Bind(GetEntryLabel());
349    SaveLiveRegisters(codegen, instruction_->GetLocations());
350    DCHECK(instruction_->IsDeoptimize());
351    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
352    uint32_t dex_pc = deoptimize->GetDexPc();
353    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
354    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
355  }
356
357  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
358
359 private:
360  HInstruction* const instruction_;
361  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
362};
363
364class ArraySetSlowPathARM : public SlowPathCode {
365 public:
366  explicit ArraySetSlowPathARM(HInstruction* instruction) : instruction_(instruction) {}
367
368  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
369    LocationSummary* locations = instruction_->GetLocations();
370    __ Bind(GetEntryLabel());
371    SaveLiveRegisters(codegen, locations);
372
373    InvokeRuntimeCallingConvention calling_convention;
374    HParallelMove parallel_move(codegen->GetGraph()->GetArena());
375    parallel_move.AddMove(
376        locations->InAt(0),
377        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
378        Primitive::kPrimNot,
379        nullptr);
380    parallel_move.AddMove(
381        locations->InAt(1),
382        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
383        Primitive::kPrimInt,
384        nullptr);
385    parallel_move.AddMove(
386        locations->InAt(2),
387        Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
388        Primitive::kPrimNot,
389        nullptr);
390    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
391
392    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
393    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
394                               instruction_,
395                               instruction_->GetDexPc(),
396                               this);
397    RestoreLiveRegisters(codegen, locations);
398    __ b(GetExitLabel());
399  }
400
401  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM"; }
402
403 private:
404  HInstruction* const instruction_;
405
406  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM);
407};
408
409#undef __
410#define __ down_cast<ArmAssembler*>(GetAssembler())->
411
412inline Condition ARMCondition(IfCondition cond) {
413  switch (cond) {
414    case kCondEQ: return EQ;
415    case kCondNE: return NE;
416    case kCondLT: return LT;
417    case kCondLE: return LE;
418    case kCondGT: return GT;
419    case kCondGE: return GE;
420    case kCondB:  return LO;
421    case kCondBE: return LS;
422    case kCondA:  return HI;
423    case kCondAE: return HS;
424  }
425  LOG(FATAL) << "Unreachable";
426  UNREACHABLE();
427}
428
429// Maps signed condition to unsigned condition.
430inline Condition ARMUnsignedCondition(IfCondition cond) {
431  switch (cond) {
432    case kCondEQ: return EQ;
433    case kCondNE: return NE;
434    // Signed to unsigned.
435    case kCondLT: return LO;
436    case kCondLE: return LS;
437    case kCondGT: return HI;
438    case kCondGE: return HS;
439    // Unsigned remain unchanged.
440    case kCondB:  return LO;
441    case kCondBE: return LS;
442    case kCondA:  return HI;
443    case kCondAE: return HS;
444  }
445  LOG(FATAL) << "Unreachable";
446  UNREACHABLE();
447}
448
449void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
450  stream << Register(reg);
451}
452
453void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
454  stream << SRegister(reg);
455}
456
457size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
458  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
459  return kArmWordSize;
460}
461
462size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
463  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
464  return kArmWordSize;
465}
466
467size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
468  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
469  return kArmWordSize;
470}
471
472size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
473  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
474  return kArmWordSize;
475}
476
477CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
478                                   const ArmInstructionSetFeatures& isa_features,
479                                   const CompilerOptions& compiler_options,
480                                   OptimizingCompilerStats* stats)
481    : CodeGenerator(graph,
482                    kNumberOfCoreRegisters,
483                    kNumberOfSRegisters,
484                    kNumberOfRegisterPairs,
485                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
486                                        arraysize(kCoreCalleeSaves)),
487                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
488                                        arraysize(kFpuCalleeSaves)),
489                    compiler_options,
490                    stats),
491      block_labels_(nullptr),
492      location_builder_(graph, this),
493      instruction_visitor_(graph, this),
494      move_resolver_(graph->GetArena(), this),
495      assembler_(),
496      isa_features_(isa_features),
497      method_patches_(MethodReferenceComparator(),
498                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
499      call_patches_(MethodReferenceComparator(),
500                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
501      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
502  // Always save the LR register to mimic Quick.
503  AddAllocatedRegister(Location::RegisterLocation(LR));
504}
505
506void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
507  // Ensure that we fix up branches and literal loads and emit the literal pool.
508  __ FinalizeCode();
509
510  // Adjust native pc offsets in stack maps.
511  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
512    uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
513    uint32_t new_position = __ GetAdjustedPosition(old_position);
514    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
515  }
516  // Adjust native pc offsets of block labels.
517  for (HBasicBlock* block : *block_order_) {
518    // Get the label directly from block_labels_ rather than through GetLabelOf() to avoid
519    // FirstNonEmptyBlock() which could lead to adjusting a label more than once.
520    DCHECK_LT(block->GetBlockId(), GetGraph()->GetBlocks().size());
521    Label* block_label = &block_labels_[block->GetBlockId()];
522    DCHECK_EQ(block_label->IsBound(), !block->IsSingleJump());
523    if (block_label->IsBound()) {
524      __ AdjustLabelPosition(block_label);
525    }
526  }
527  // Adjust pc offsets for the disassembly information.
528  if (disasm_info_ != nullptr) {
529    GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
530    frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
531    frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
532    for (auto& it : *disasm_info_->GetInstructionIntervals()) {
533      it.second.start = __ GetAdjustedPosition(it.second.start);
534      it.second.end = __ GetAdjustedPosition(it.second.end);
535    }
536    for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
537      it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
538      it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
539    }
540  }
541  // Adjust pc offsets for relative call patches.
542  for (MethodPatchInfo<Label>& info : relative_call_patches_) {
543    __ AdjustLabelPosition(&info.label);
544  }
545
546  CodeGenerator::Finalize(allocator);
547}
548
549Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
550  switch (type) {
551    case Primitive::kPrimLong: {
552      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
553      ArmManagedRegister pair =
554          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
555      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
556      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
557
558      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
559      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
560      UpdateBlockedPairRegisters();
561      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
562    }
563
564    case Primitive::kPrimByte:
565    case Primitive::kPrimBoolean:
566    case Primitive::kPrimChar:
567    case Primitive::kPrimShort:
568    case Primitive::kPrimInt:
569    case Primitive::kPrimNot: {
570      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
571      // Block all register pairs that contain `reg`.
572      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
573        ArmManagedRegister current =
574            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
575        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
576          blocked_register_pairs_[i] = true;
577        }
578      }
579      return Location::RegisterLocation(reg);
580    }
581
582    case Primitive::kPrimFloat: {
583      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
584      return Location::FpuRegisterLocation(reg);
585    }
586
587    case Primitive::kPrimDouble: {
588      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
589      DCHECK_EQ(reg % 2, 0);
590      return Location::FpuRegisterPairLocation(reg, reg + 1);
591    }
592
593    case Primitive::kPrimVoid:
594      LOG(FATAL) << "Unreachable type " << type;
595  }
596
597  return Location();
598}
599
600void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
601  // Don't allocate the dalvik style register pair passing.
602  blocked_register_pairs_[R1_R2] = true;
603
604  // Stack register, LR and PC are always reserved.
605  blocked_core_registers_[SP] = true;
606  blocked_core_registers_[LR] = true;
607  blocked_core_registers_[PC] = true;
608
609  // Reserve thread register.
610  blocked_core_registers_[TR] = true;
611
612  // Reserve temp register.
613  blocked_core_registers_[IP] = true;
614
615  if (is_baseline) {
616    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
617      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
618    }
619
620    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
621  }
622
623  if (is_baseline || GetGraph()->IsDebuggable()) {
624    // Stubs do not save callee-save floating point registers. If the graph
625    // is debuggable, we need to deal with these registers differently. For
626    // now, just block them.
627    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
628      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
629    }
630  }
631
632  UpdateBlockedPairRegisters();
633}
634
635void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
636  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
637    ArmManagedRegister current =
638        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
639    if (blocked_core_registers_[current.AsRegisterPairLow()]
640        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
641      blocked_register_pairs_[i] = true;
642    }
643  }
644}
645
646InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
647      : HGraphVisitor(graph),
648        assembler_(codegen->GetAssembler()),
649        codegen_(codegen) {}
650
651void CodeGeneratorARM::ComputeSpillMask() {
652  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
653  // Save one extra register for baseline. Note that on thumb2, there is no easy
654  // instruction to restore just the PC, so this actually helps both baseline
655  // and non-baseline to save and restore at least two registers at entry and exit.
656  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
657  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
658  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
659  // We use vpush and vpop for saving and restoring floating point registers, which take
660  // a SRegister and the number of registers to save/restore after that SRegister. We
661  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
662  // but in the range.
663  if (fpu_spill_mask_ != 0) {
664    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
665    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
666    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
667      fpu_spill_mask_ |= (1 << i);
668    }
669  }
670}
671
672static dwarf::Reg DWARFReg(Register reg) {
673  return dwarf::Reg::ArmCore(static_cast<int>(reg));
674}
675
676static dwarf::Reg DWARFReg(SRegister reg) {
677  return dwarf::Reg::ArmFp(static_cast<int>(reg));
678}
679
680void CodeGeneratorARM::GenerateFrameEntry() {
681  bool skip_overflow_check =
682      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
683  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
684  __ Bind(&frame_entry_label_);
685
686  if (HasEmptyFrame()) {
687    return;
688  }
689
690  if (!skip_overflow_check) {
691    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
692    __ LoadFromOffset(kLoadWord, IP, IP, 0);
693    RecordPcInfo(nullptr, 0);
694  }
695
696  __ PushList(core_spill_mask_);
697  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
698  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, core_spill_mask_, kArmWordSize);
699  if (fpu_spill_mask_ != 0) {
700    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
701    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
702    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
703    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
704  }
705  int adjust = GetFrameSize() - FrameEntrySpillSize();
706  __ AddConstant(SP, -adjust);
707  __ cfi().AdjustCFAOffset(adjust);
708  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
709}
710
711void CodeGeneratorARM::GenerateFrameExit() {
712  if (HasEmptyFrame()) {
713    __ bx(LR);
714    return;
715  }
716  __ cfi().RememberState();
717  int adjust = GetFrameSize() - FrameEntrySpillSize();
718  __ AddConstant(SP, adjust);
719  __ cfi().AdjustCFAOffset(-adjust);
720  if (fpu_spill_mask_ != 0) {
721    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
722    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
723    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
724    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
725  }
726  // Pop LR into PC to return.
727  DCHECK_NE(core_spill_mask_ & (1 << LR), 0U);
728  uint32_t pop_mask = (core_spill_mask_ & (~(1 << LR))) | 1 << PC;
729  __ PopList(pop_mask);
730  __ cfi().RestoreState();
731  __ cfi().DefCFAOffset(GetFrameSize());
732}
733
734void CodeGeneratorARM::Bind(HBasicBlock* block) {
735  __ Bind(GetLabelOf(block));
736}
737
738Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
739  switch (load->GetType()) {
740    case Primitive::kPrimLong:
741    case Primitive::kPrimDouble:
742      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
743
744    case Primitive::kPrimInt:
745    case Primitive::kPrimNot:
746    case Primitive::kPrimFloat:
747      return Location::StackSlot(GetStackSlot(load->GetLocal()));
748
749    case Primitive::kPrimBoolean:
750    case Primitive::kPrimByte:
751    case Primitive::kPrimChar:
752    case Primitive::kPrimShort:
753    case Primitive::kPrimVoid:
754      LOG(FATAL) << "Unexpected type " << load->GetType();
755      UNREACHABLE();
756  }
757
758  LOG(FATAL) << "Unreachable";
759  UNREACHABLE();
760}
761
762Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
763  switch (type) {
764    case Primitive::kPrimBoolean:
765    case Primitive::kPrimByte:
766    case Primitive::kPrimChar:
767    case Primitive::kPrimShort:
768    case Primitive::kPrimInt:
769    case Primitive::kPrimNot: {
770      uint32_t index = gp_index_++;
771      uint32_t stack_index = stack_index_++;
772      if (index < calling_convention.GetNumberOfRegisters()) {
773        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
774      } else {
775        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
776      }
777    }
778
779    case Primitive::kPrimLong: {
780      uint32_t index = gp_index_;
781      uint32_t stack_index = stack_index_;
782      gp_index_ += 2;
783      stack_index_ += 2;
784      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
785        if (calling_convention.GetRegisterAt(index) == R1) {
786          // Skip R1, and use R2_R3 instead.
787          gp_index_++;
788          index++;
789        }
790      }
791      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
792        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
793                  calling_convention.GetRegisterAt(index + 1));
794
795        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
796                                              calling_convention.GetRegisterAt(index + 1));
797      } else {
798        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
799      }
800    }
801
802    case Primitive::kPrimFloat: {
803      uint32_t stack_index = stack_index_++;
804      if (float_index_ % 2 == 0) {
805        float_index_ = std::max(double_index_, float_index_);
806      }
807      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
808        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
809      } else {
810        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
811      }
812    }
813
814    case Primitive::kPrimDouble: {
815      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
816      uint32_t stack_index = stack_index_;
817      stack_index_ += 2;
818      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
819        uint32_t index = double_index_;
820        double_index_ += 2;
821        Location result = Location::FpuRegisterPairLocation(
822          calling_convention.GetFpuRegisterAt(index),
823          calling_convention.GetFpuRegisterAt(index + 1));
824        DCHECK(ExpectedPairLayout(result));
825        return result;
826      } else {
827        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
828      }
829    }
830
831    case Primitive::kPrimVoid:
832      LOG(FATAL) << "Unexpected parameter type " << type;
833      break;
834  }
835  return Location();
836}
837
838Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
839  switch (type) {
840    case Primitive::kPrimBoolean:
841    case Primitive::kPrimByte:
842    case Primitive::kPrimChar:
843    case Primitive::kPrimShort:
844    case Primitive::kPrimInt:
845    case Primitive::kPrimNot: {
846      return Location::RegisterLocation(R0);
847    }
848
849    case Primitive::kPrimFloat: {
850      return Location::FpuRegisterLocation(S0);
851    }
852
853    case Primitive::kPrimLong: {
854      return Location::RegisterPairLocation(R0, R1);
855    }
856
857    case Primitive::kPrimDouble: {
858      return Location::FpuRegisterPairLocation(S0, S1);
859    }
860
861    case Primitive::kPrimVoid:
862      return Location();
863  }
864
865  UNREACHABLE();
866}
867
868Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
869  return Location::RegisterLocation(kMethodRegisterArgument);
870}
871
872void CodeGeneratorARM::Move32(Location destination, Location source) {
873  if (source.Equals(destination)) {
874    return;
875  }
876  if (destination.IsRegister()) {
877    if (source.IsRegister()) {
878      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
879    } else if (source.IsFpuRegister()) {
880      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
881    } else {
882      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
883    }
884  } else if (destination.IsFpuRegister()) {
885    if (source.IsRegister()) {
886      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
887    } else if (source.IsFpuRegister()) {
888      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
889    } else {
890      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
891    }
892  } else {
893    DCHECK(destination.IsStackSlot()) << destination;
894    if (source.IsRegister()) {
895      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
896    } else if (source.IsFpuRegister()) {
897      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
898    } else {
899      DCHECK(source.IsStackSlot()) << source;
900      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
901      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
902    }
903  }
904}
905
906void CodeGeneratorARM::Move64(Location destination, Location source) {
907  if (source.Equals(destination)) {
908    return;
909  }
910  if (destination.IsRegisterPair()) {
911    if (source.IsRegisterPair()) {
912      EmitParallelMoves(
913          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
914          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
915          Primitive::kPrimInt,
916          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
917          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
918          Primitive::kPrimInt);
919    } else if (source.IsFpuRegister()) {
920      UNIMPLEMENTED(FATAL);
921    } else if (source.IsFpuRegisterPair()) {
922      __ vmovrrd(destination.AsRegisterPairLow<Register>(),
923                 destination.AsRegisterPairHigh<Register>(),
924                 FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
925    } else {
926      DCHECK(source.IsDoubleStackSlot());
927      DCHECK(ExpectedPairLayout(destination));
928      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
929                        SP, source.GetStackIndex());
930    }
931  } else if (destination.IsFpuRegisterPair()) {
932    if (source.IsDoubleStackSlot()) {
933      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
934                         SP,
935                         source.GetStackIndex());
936    } else if (source.IsRegisterPair()) {
937      __ vmovdrr(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
938                 source.AsRegisterPairLow<Register>(),
939                 source.AsRegisterPairHigh<Register>());
940    } else {
941      UNIMPLEMENTED(FATAL);
942    }
943  } else {
944    DCHECK(destination.IsDoubleStackSlot());
945    if (source.IsRegisterPair()) {
946      // No conflict possible, so just do the moves.
947      if (source.AsRegisterPairLow<Register>() == R1) {
948        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
949        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
950        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
951      } else {
952        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
953                         SP, destination.GetStackIndex());
954      }
955    } else if (source.IsFpuRegisterPair()) {
956      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
957                        SP,
958                        destination.GetStackIndex());
959    } else {
960      DCHECK(source.IsDoubleStackSlot());
961      EmitParallelMoves(
962          Location::StackSlot(source.GetStackIndex()),
963          Location::StackSlot(destination.GetStackIndex()),
964          Primitive::kPrimInt,
965          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
966          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
967          Primitive::kPrimInt);
968    }
969  }
970}
971
972void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
973  LocationSummary* locations = instruction->GetLocations();
974  if (instruction->IsCurrentMethod()) {
975    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
976  } else if (locations != nullptr && locations->Out().Equals(location)) {
977    return;
978  } else if (locations != nullptr && locations->Out().IsConstant()) {
979    HConstant* const_to_move = locations->Out().GetConstant();
980    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
981      int32_t value = GetInt32ValueOf(const_to_move);
982      if (location.IsRegister()) {
983        __ LoadImmediate(location.AsRegister<Register>(), value);
984      } else {
985        DCHECK(location.IsStackSlot());
986        __ LoadImmediate(IP, value);
987        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
988      }
989    } else {
990      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
991      int64_t value = const_to_move->AsLongConstant()->GetValue();
992      if (location.IsRegisterPair()) {
993        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
994        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
995      } else {
996        DCHECK(location.IsDoubleStackSlot());
997        __ LoadImmediate(IP, Low32Bits(value));
998        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
999        __ LoadImmediate(IP, High32Bits(value));
1000        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
1001      }
1002    }
1003  } else if (instruction->IsLoadLocal()) {
1004    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
1005    switch (instruction->GetType()) {
1006      case Primitive::kPrimBoolean:
1007      case Primitive::kPrimByte:
1008      case Primitive::kPrimChar:
1009      case Primitive::kPrimShort:
1010      case Primitive::kPrimInt:
1011      case Primitive::kPrimNot:
1012      case Primitive::kPrimFloat:
1013        Move32(location, Location::StackSlot(stack_slot));
1014        break;
1015
1016      case Primitive::kPrimLong:
1017      case Primitive::kPrimDouble:
1018        Move64(location, Location::DoubleStackSlot(stack_slot));
1019        break;
1020
1021      default:
1022        LOG(FATAL) << "Unexpected type " << instruction->GetType();
1023    }
1024  } else if (instruction->IsTemporary()) {
1025    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
1026    if (temp_location.IsStackSlot()) {
1027      Move32(location, temp_location);
1028    } else {
1029      DCHECK(temp_location.IsDoubleStackSlot());
1030      Move64(location, temp_location);
1031    }
1032  } else {
1033    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
1034    switch (instruction->GetType()) {
1035      case Primitive::kPrimBoolean:
1036      case Primitive::kPrimByte:
1037      case Primitive::kPrimChar:
1038      case Primitive::kPrimShort:
1039      case Primitive::kPrimNot:
1040      case Primitive::kPrimInt:
1041      case Primitive::kPrimFloat:
1042        Move32(location, locations->Out());
1043        break;
1044
1045      case Primitive::kPrimLong:
1046      case Primitive::kPrimDouble:
1047        Move64(location, locations->Out());
1048        break;
1049
1050      default:
1051        LOG(FATAL) << "Unexpected type " << instruction->GetType();
1052    }
1053  }
1054}
1055
1056void CodeGeneratorARM::MoveConstant(Location location, int32_t value) {
1057  DCHECK(location.IsRegister());
1058  __ LoadImmediate(location.AsRegister<Register>(), value);
1059}
1060
1061void CodeGeneratorARM::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
1062  if (Primitive::Is64BitType(dst_type)) {
1063    Move64(dst, src);
1064  } else {
1065    Move32(dst, src);
1066  }
1067}
1068
1069void CodeGeneratorARM::AddLocationAsTemp(Location location, LocationSummary* locations) {
1070  if (location.IsRegister()) {
1071    locations->AddTemp(location);
1072  } else if (location.IsRegisterPair()) {
1073    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
1074    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
1075  } else {
1076    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
1077  }
1078}
1079
1080void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
1081                                     HInstruction* instruction,
1082                                     uint32_t dex_pc,
1083                                     SlowPathCode* slow_path) {
1084  InvokeRuntime(GetThreadOffset<kArmWordSize>(entrypoint).Int32Value(),
1085                instruction,
1086                dex_pc,
1087                slow_path);
1088}
1089
1090void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
1091                                     HInstruction* instruction,
1092                                     uint32_t dex_pc,
1093                                     SlowPathCode* slow_path) {
1094  ValidateInvokeRuntime(instruction, slow_path);
1095  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
1096  __ blx(LR);
1097  RecordPcInfo(instruction, dex_pc, slow_path);
1098}
1099
1100void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) {
1101  DCHECK(!successor->IsExitBlock());
1102
1103  HBasicBlock* block = got->GetBlock();
1104  HInstruction* previous = got->GetPrevious();
1105
1106  HLoopInformation* info = block->GetLoopInformation();
1107  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1108    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1109    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1110    return;
1111  }
1112
1113  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1114    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1115  }
1116  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
1117    __ b(codegen_->GetLabelOf(successor));
1118  }
1119}
1120
1121void LocationsBuilderARM::VisitGoto(HGoto* got) {
1122  got->SetLocations(nullptr);
1123}
1124
1125void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
1126  HandleGoto(got, got->GetSuccessor());
1127}
1128
1129void LocationsBuilderARM::VisitTryBoundary(HTryBoundary* try_boundary) {
1130  try_boundary->SetLocations(nullptr);
1131}
1132
1133void InstructionCodeGeneratorARM::VisitTryBoundary(HTryBoundary* try_boundary) {
1134  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1135  if (!successor->IsExitBlock()) {
1136    HandleGoto(try_boundary, successor);
1137  }
1138}
1139
1140void LocationsBuilderARM::VisitExit(HExit* exit) {
1141  exit->SetLocations(nullptr);
1142}
1143
1144void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1145}
1146
1147void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
1148  ShifterOperand operand;
1149  if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, right, &operand)) {
1150    __ cmp(left, operand);
1151  } else {
1152    Register temp = IP;
1153    __ LoadImmediate(temp, right);
1154    __ cmp(left, ShifterOperand(temp));
1155  }
1156}
1157
1158void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
1159                                                  Label* true_label,
1160                                                  Label* false_label) {
1161  __ vmstat();  // transfer FP status register to ARM APSR.
1162  // TODO: merge into a single branch (except "equal or unordered" and "not equal")
1163  if (cond->IsFPConditionTrueIfNaN()) {
1164    __ b(true_label, VS);  // VS for unordered.
1165  } else if (cond->IsFPConditionFalseIfNaN()) {
1166    __ b(false_label, VS);  // VS for unordered.
1167  }
1168  __ b(true_label, ARMCondition(cond->GetCondition()));
1169}
1170
1171void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
1172                                                               Label* true_label,
1173                                                               Label* false_label) {
1174  LocationSummary* locations = cond->GetLocations();
1175  Location left = locations->InAt(0);
1176  Location right = locations->InAt(1);
1177  IfCondition if_cond = cond->GetCondition();
1178
1179  Register left_high = left.AsRegisterPairHigh<Register>();
1180  Register left_low = left.AsRegisterPairLow<Register>();
1181  IfCondition true_high_cond = if_cond;
1182  IfCondition false_high_cond = cond->GetOppositeCondition();
1183  Condition final_condition = ARMUnsignedCondition(if_cond);  // unsigned on lower part
1184
1185  // Set the conditions for the test, remembering that == needs to be
1186  // decided using the low words.
1187  // TODO: consider avoiding jumps with temporary and CMP low+SBC high
1188  switch (if_cond) {
1189    case kCondEQ:
1190    case kCondNE:
1191      // Nothing to do.
1192      break;
1193    case kCondLT:
1194      false_high_cond = kCondGT;
1195      break;
1196    case kCondLE:
1197      true_high_cond = kCondLT;
1198      break;
1199    case kCondGT:
1200      false_high_cond = kCondLT;
1201      break;
1202    case kCondGE:
1203      true_high_cond = kCondGT;
1204      break;
1205    case kCondB:
1206      false_high_cond = kCondA;
1207      break;
1208    case kCondBE:
1209      true_high_cond = kCondB;
1210      break;
1211    case kCondA:
1212      false_high_cond = kCondB;
1213      break;
1214    case kCondAE:
1215      true_high_cond = kCondA;
1216      break;
1217  }
1218  if (right.IsConstant()) {
1219    int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
1220    int32_t val_low = Low32Bits(value);
1221    int32_t val_high = High32Bits(value);
1222
1223    GenerateCompareWithImmediate(left_high, val_high);
1224    if (if_cond == kCondNE) {
1225      __ b(true_label, ARMCondition(true_high_cond));
1226    } else if (if_cond == kCondEQ) {
1227      __ b(false_label, ARMCondition(false_high_cond));
1228    } else {
1229      __ b(true_label, ARMCondition(true_high_cond));
1230      __ b(false_label, ARMCondition(false_high_cond));
1231    }
1232    // Must be equal high, so compare the lows.
1233    GenerateCompareWithImmediate(left_low, val_low);
1234  } else {
1235    Register right_high = right.AsRegisterPairHigh<Register>();
1236    Register right_low = right.AsRegisterPairLow<Register>();
1237
1238    __ cmp(left_high, ShifterOperand(right_high));
1239    if (if_cond == kCondNE) {
1240      __ b(true_label, ARMCondition(true_high_cond));
1241    } else if (if_cond == kCondEQ) {
1242      __ b(false_label, ARMCondition(false_high_cond));
1243    } else {
1244      __ b(true_label, ARMCondition(true_high_cond));
1245      __ b(false_label, ARMCondition(false_high_cond));
1246    }
1247    // Must be equal high, so compare the lows.
1248    __ cmp(left_low, ShifterOperand(right_low));
1249  }
1250  // The last comparison might be unsigned.
1251  // TODO: optimize cases where this is always true/false
1252  __ b(true_label, final_condition);
1253}
1254
1255void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HIf* if_instr,
1256                                                               HCondition* condition,
1257                                                               Label* true_target,
1258                                                               Label* false_target,
1259                                                               Label* always_true_target) {
1260  LocationSummary* locations = condition->GetLocations();
1261  Location left = locations->InAt(0);
1262  Location right = locations->InAt(1);
1263
1264  // We don't want true_target as a nullptr.
1265  if (true_target == nullptr) {
1266    true_target = always_true_target;
1267  }
1268  bool falls_through = (false_target == nullptr);
1269
1270  // FP compares don't like null false_targets.
1271  if (false_target == nullptr) {
1272    false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1273  }
1274
1275  Primitive::Type type = condition->InputAt(0)->GetType();
1276  switch (type) {
1277    case Primitive::kPrimLong:
1278      GenerateLongComparesAndJumps(condition, true_target, false_target);
1279      break;
1280    case Primitive::kPrimFloat:
1281      __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
1282      GenerateFPJumps(condition, true_target, false_target);
1283      break;
1284    case Primitive::kPrimDouble:
1285      __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
1286               FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
1287      GenerateFPJumps(condition, true_target, false_target);
1288      break;
1289    default:
1290      LOG(FATAL) << "Unexpected compare type " << type;
1291  }
1292
1293  if (!falls_through) {
1294    __ b(false_target);
1295  }
1296}
1297
1298void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
1299                                                        Label* true_target,
1300                                                        Label* false_target,
1301                                                        Label* always_true_target) {
1302  HInstruction* cond = instruction->InputAt(0);
1303  if (cond->IsIntConstant()) {
1304    // Constant condition, statically compared against 1.
1305    int32_t cond_value = cond->AsIntConstant()->GetValue();
1306    if (cond_value == 1) {
1307      if (always_true_target != nullptr) {
1308        __ b(always_true_target);
1309      }
1310      return;
1311    } else {
1312      DCHECK_EQ(cond_value, 0);
1313    }
1314  } else {
1315    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1316      // Condition has been materialized, compare the output to 0
1317      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
1318      __ CompareAndBranchIfNonZero(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
1319                                   true_target);
1320    } else {
1321      // Condition has not been materialized, use its inputs as the
1322      // comparison and its condition as the branch condition.
1323      Primitive::Type type =
1324          cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
1325      // Is this a long or FP comparison that has been folded into the HCondition?
1326      if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
1327        // Generate the comparison directly.
1328        GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(),
1329                                     true_target, false_target, always_true_target);
1330        return;
1331      }
1332
1333      LocationSummary* locations = cond->GetLocations();
1334      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
1335      Register left = locations->InAt(0).AsRegister<Register>();
1336      Location right = locations->InAt(1);
1337      if (right.IsRegister()) {
1338        __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
1339      } else {
1340        DCHECK(right.IsConstant());
1341        GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
1342      }
1343      __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
1344    }
1345  }
1346  if (false_target != nullptr) {
1347    __ b(false_target);
1348  }
1349}
1350
1351void LocationsBuilderARM::VisitIf(HIf* if_instr) {
1352  LocationSummary* locations =
1353      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
1354  HInstruction* cond = if_instr->InputAt(0);
1355  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1356    locations->SetInAt(0, Location::RequiresRegister());
1357  }
1358}
1359
1360void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1361  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1362  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1363  Label* always_true_target = true_target;
1364  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1365                                if_instr->IfTrueSuccessor())) {
1366    always_true_target = nullptr;
1367  }
1368  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1369                                if_instr->IfFalseSuccessor())) {
1370    false_target = nullptr;
1371  }
1372  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1373}
1374
1375void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1376  LocationSummary* locations = new (GetGraph()->GetArena())
1377      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1378  HInstruction* cond = deoptimize->InputAt(0);
1379  DCHECK(cond->IsCondition());
1380  if (cond->AsCondition()->NeedsMaterialization()) {
1381    locations->SetInAt(0, Location::RequiresRegister());
1382  }
1383}
1384
1385void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1386  SlowPathCode* slow_path = new (GetGraph()->GetArena())
1387      DeoptimizationSlowPathARM(deoptimize);
1388  codegen_->AddSlowPath(slow_path);
1389  Label* slow_path_entry = slow_path->GetEntryLabel();
1390  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1391}
1392
1393void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1394  LocationSummary* locations =
1395      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1396  // Handle the long/FP comparisons made in instruction simplification.
1397  switch (cond->InputAt(0)->GetType()) {
1398    case Primitive::kPrimLong:
1399      locations->SetInAt(0, Location::RequiresRegister());
1400      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1401      if (cond->NeedsMaterialization()) {
1402        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1403      }
1404      break;
1405
1406    case Primitive::kPrimFloat:
1407    case Primitive::kPrimDouble:
1408      locations->SetInAt(0, Location::RequiresFpuRegister());
1409      locations->SetInAt(1, Location::RequiresFpuRegister());
1410      if (cond->NeedsMaterialization()) {
1411        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1412      }
1413      break;
1414
1415    default:
1416      locations->SetInAt(0, Location::RequiresRegister());
1417      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1418      if (cond->NeedsMaterialization()) {
1419        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1420      }
1421  }
1422}
1423
1424void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1425  if (!cond->NeedsMaterialization()) {
1426    return;
1427  }
1428
1429  LocationSummary* locations = cond->GetLocations();
1430  Location left = locations->InAt(0);
1431  Location right = locations->InAt(1);
1432  Register out = locations->Out().AsRegister<Register>();
1433  Label true_label, false_label;
1434
1435  switch (cond->InputAt(0)->GetType()) {
1436    default: {
1437      // Integer case.
1438      if (right.IsRegister()) {
1439        __ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
1440      } else {
1441        DCHECK(right.IsConstant());
1442        GenerateCompareWithImmediate(left.AsRegister<Register>(),
1443                                     CodeGenerator::GetInt32ValueOf(right.GetConstant()));
1444      }
1445      __ it(ARMCondition(cond->GetCondition()), kItElse);
1446      __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1447             ARMCondition(cond->GetCondition()));
1448      __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1449             ARMCondition(cond->GetOppositeCondition()));
1450      return;
1451    }
1452    case Primitive::kPrimLong:
1453      GenerateLongComparesAndJumps(cond, &true_label, &false_label);
1454      break;
1455    case Primitive::kPrimFloat:
1456      __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
1457      GenerateFPJumps(cond, &true_label, &false_label);
1458      break;
1459    case Primitive::kPrimDouble:
1460      __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
1461               FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
1462      GenerateFPJumps(cond, &true_label, &false_label);
1463      break;
1464  }
1465
1466  // Convert the jumps into the result.
1467  Label done_label;
1468
1469  // False case: result = 0.
1470  __ Bind(&false_label);
1471  __ LoadImmediate(out, 0);
1472  __ b(&done_label);
1473
1474  // True case: result = 1.
1475  __ Bind(&true_label);
1476  __ LoadImmediate(out, 1);
1477  __ Bind(&done_label);
1478}
1479
1480void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1481  VisitCondition(comp);
1482}
1483
1484void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1485  VisitCondition(comp);
1486}
1487
1488void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1489  VisitCondition(comp);
1490}
1491
1492void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1493  VisitCondition(comp);
1494}
1495
1496void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1497  VisitCondition(comp);
1498}
1499
1500void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1501  VisitCondition(comp);
1502}
1503
1504void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1505  VisitCondition(comp);
1506}
1507
1508void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1509  VisitCondition(comp);
1510}
1511
1512void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1513  VisitCondition(comp);
1514}
1515
1516void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1517  VisitCondition(comp);
1518}
1519
1520void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1521  VisitCondition(comp);
1522}
1523
1524void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1525  VisitCondition(comp);
1526}
1527
1528void LocationsBuilderARM::VisitBelow(HBelow* comp) {
1529  VisitCondition(comp);
1530}
1531
1532void InstructionCodeGeneratorARM::VisitBelow(HBelow* comp) {
1533  VisitCondition(comp);
1534}
1535
1536void LocationsBuilderARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
1537  VisitCondition(comp);
1538}
1539
1540void InstructionCodeGeneratorARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
1541  VisitCondition(comp);
1542}
1543
1544void LocationsBuilderARM::VisitAbove(HAbove* comp) {
1545  VisitCondition(comp);
1546}
1547
1548void InstructionCodeGeneratorARM::VisitAbove(HAbove* comp) {
1549  VisitCondition(comp);
1550}
1551
1552void LocationsBuilderARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
1553  VisitCondition(comp);
1554}
1555
1556void InstructionCodeGeneratorARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
1557  VisitCondition(comp);
1558}
1559
1560void LocationsBuilderARM::VisitLocal(HLocal* local) {
1561  local->SetLocations(nullptr);
1562}
1563
1564void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1565  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1566}
1567
1568void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1569  load->SetLocations(nullptr);
1570}
1571
1572void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
1573  // Nothing to do, this is driven by the code generator.
1574}
1575
1576void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1577  LocationSummary* locations =
1578      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1579  switch (store->InputAt(1)->GetType()) {
1580    case Primitive::kPrimBoolean:
1581    case Primitive::kPrimByte:
1582    case Primitive::kPrimChar:
1583    case Primitive::kPrimShort:
1584    case Primitive::kPrimInt:
1585    case Primitive::kPrimNot:
1586    case Primitive::kPrimFloat:
1587      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1588      break;
1589
1590    case Primitive::kPrimLong:
1591    case Primitive::kPrimDouble:
1592      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1593      break;
1594
1595    default:
1596      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1597  }
1598}
1599
1600void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
1601}
1602
1603void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1604  LocationSummary* locations =
1605      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1606  locations->SetOut(Location::ConstantLocation(constant));
1607}
1608
1609void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
1610  // Will be generated at use site.
1611}
1612
1613void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1614  LocationSummary* locations =
1615      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1616  locations->SetOut(Location::ConstantLocation(constant));
1617}
1618
1619void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
1620  // Will be generated at use site.
1621}
1622
1623void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1624  LocationSummary* locations =
1625      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1626  locations->SetOut(Location::ConstantLocation(constant));
1627}
1628
1629void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
1630  // Will be generated at use site.
1631}
1632
1633void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1634  LocationSummary* locations =
1635      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1636  locations->SetOut(Location::ConstantLocation(constant));
1637}
1638
1639void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1640  // Will be generated at use site.
1641}
1642
1643void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1644  LocationSummary* locations =
1645      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1646  locations->SetOut(Location::ConstantLocation(constant));
1647}
1648
1649void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
1650  // Will be generated at use site.
1651}
1652
1653void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1654  memory_barrier->SetLocations(nullptr);
1655}
1656
1657void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1658  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1659}
1660
1661void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1662  ret->SetLocations(nullptr);
1663}
1664
1665void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
1666  codegen_->GenerateFrameExit();
1667}
1668
1669void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1670  LocationSummary* locations =
1671      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1672  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1673}
1674
1675void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
1676  codegen_->GenerateFrameExit();
1677}
1678
1679void LocationsBuilderARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
1680  // The trampoline uses the same calling convention as dex calling conventions,
1681  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
1682  // the method_idx.
1683  HandleInvoke(invoke);
1684}
1685
1686void InstructionCodeGeneratorARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
1687  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
1688}
1689
1690void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1691  // When we do not run baseline, explicit clinit checks triggered by static
1692  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1693  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1694
1695  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1696                                         codegen_->GetAssembler(),
1697                                         codegen_->GetInstructionSetFeatures());
1698  if (intrinsic.TryDispatch(invoke)) {
1699    return;
1700  }
1701
1702  HandleInvoke(invoke);
1703}
1704
1705static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1706  if (invoke->GetLocations()->Intrinsified()) {
1707    IntrinsicCodeGeneratorARM intrinsic(codegen);
1708    intrinsic.Dispatch(invoke);
1709    return true;
1710  }
1711  return false;
1712}
1713
1714void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1715  // When we do not run baseline, explicit clinit checks triggered by static
1716  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1717  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1718
1719  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1720    return;
1721  }
1722
1723  LocationSummary* locations = invoke->GetLocations();
1724  codegen_->GenerateStaticOrDirectCall(
1725      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
1726  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1727}
1728
1729void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1730  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1731  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
1732}
1733
1734void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1735  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1736                                         codegen_->GetAssembler(),
1737                                         codegen_->GetInstructionSetFeatures());
1738  if (intrinsic.TryDispatch(invoke)) {
1739    return;
1740  }
1741
1742  HandleInvoke(invoke);
1743}
1744
1745void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1746  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1747    return;
1748  }
1749
1750  codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
1751  DCHECK(!codegen_->IsLeafMethod());
1752  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1753}
1754
1755void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1756  HandleInvoke(invoke);
1757  // Add the hidden argument.
1758  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1759}
1760
1761void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1762  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1763  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1764  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1765      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1766  LocationSummary* locations = invoke->GetLocations();
1767  Location receiver = locations->InAt(0);
1768  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1769
1770  // Set the hidden argument.
1771  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1772                   invoke->GetDexMethodIndex());
1773
1774  // temp = object->GetClass();
1775  if (receiver.IsStackSlot()) {
1776    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1777    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1778  } else {
1779    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1780  }
1781  codegen_->MaybeRecordImplicitNullCheck(invoke);
1782  __ MaybeUnpoisonHeapReference(temp);
1783  // temp = temp->GetImtEntryAt(method_offset);
1784  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1785      kArmWordSize).Int32Value();
1786  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1787  // LR = temp->GetEntryPoint();
1788  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1789  // LR();
1790  __ blx(LR);
1791  DCHECK(!codegen_->IsLeafMethod());
1792  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1793}
1794
1795void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1796  LocationSummary* locations =
1797      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1798  switch (neg->GetResultType()) {
1799    case Primitive::kPrimInt: {
1800      locations->SetInAt(0, Location::RequiresRegister());
1801      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1802      break;
1803    }
1804    case Primitive::kPrimLong: {
1805      locations->SetInAt(0, Location::RequiresRegister());
1806      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1807      break;
1808    }
1809
1810    case Primitive::kPrimFloat:
1811    case Primitive::kPrimDouble:
1812      locations->SetInAt(0, Location::RequiresFpuRegister());
1813      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1814      break;
1815
1816    default:
1817      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1818  }
1819}
1820
1821void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1822  LocationSummary* locations = neg->GetLocations();
1823  Location out = locations->Out();
1824  Location in = locations->InAt(0);
1825  switch (neg->GetResultType()) {
1826    case Primitive::kPrimInt:
1827      DCHECK(in.IsRegister());
1828      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1829      break;
1830
1831    case Primitive::kPrimLong:
1832      DCHECK(in.IsRegisterPair());
1833      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1834      __ rsbs(out.AsRegisterPairLow<Register>(),
1835              in.AsRegisterPairLow<Register>(),
1836              ShifterOperand(0));
1837      // We cannot emit an RSC (Reverse Subtract with Carry)
1838      // instruction here, as it does not exist in the Thumb-2
1839      // instruction set.  We use the following approach
1840      // using SBC and SUB instead.
1841      //
1842      // out.hi = -C
1843      __ sbc(out.AsRegisterPairHigh<Register>(),
1844             out.AsRegisterPairHigh<Register>(),
1845             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1846      // out.hi = out.hi - in.hi
1847      __ sub(out.AsRegisterPairHigh<Register>(),
1848             out.AsRegisterPairHigh<Register>(),
1849             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1850      break;
1851
1852    case Primitive::kPrimFloat:
1853      DCHECK(in.IsFpuRegister());
1854      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1855      break;
1856
1857    case Primitive::kPrimDouble:
1858      DCHECK(in.IsFpuRegisterPair());
1859      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1860               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1861      break;
1862
1863    default:
1864      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1865  }
1866}
1867
1868void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1869  Primitive::Type result_type = conversion->GetResultType();
1870  Primitive::Type input_type = conversion->GetInputType();
1871  DCHECK_NE(result_type, input_type);
1872
1873  // The float-to-long, double-to-long and long-to-float type conversions
1874  // rely on a call to the runtime.
1875  LocationSummary::CallKind call_kind =
1876      (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1877        && result_type == Primitive::kPrimLong)
1878       || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
1879      ? LocationSummary::kCall
1880      : LocationSummary::kNoCall;
1881  LocationSummary* locations =
1882      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1883
1884  // The Java language does not allow treating boolean as an integral type but
1885  // our bit representation makes it safe.
1886
1887  switch (result_type) {
1888    case Primitive::kPrimByte:
1889      switch (input_type) {
1890        case Primitive::kPrimBoolean:
1891          // Boolean input is a result of code transformations.
1892        case Primitive::kPrimShort:
1893        case Primitive::kPrimInt:
1894        case Primitive::kPrimChar:
1895          // Processing a Dex `int-to-byte' instruction.
1896          locations->SetInAt(0, Location::RequiresRegister());
1897          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1898          break;
1899
1900        default:
1901          LOG(FATAL) << "Unexpected type conversion from " << input_type
1902                     << " to " << result_type;
1903      }
1904      break;
1905
1906    case Primitive::kPrimShort:
1907      switch (input_type) {
1908        case Primitive::kPrimBoolean:
1909          // Boolean input is a result of code transformations.
1910        case Primitive::kPrimByte:
1911        case Primitive::kPrimInt:
1912        case Primitive::kPrimChar:
1913          // Processing a Dex `int-to-short' instruction.
1914          locations->SetInAt(0, Location::RequiresRegister());
1915          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1916          break;
1917
1918        default:
1919          LOG(FATAL) << "Unexpected type conversion from " << input_type
1920                     << " to " << result_type;
1921      }
1922      break;
1923
1924    case Primitive::kPrimInt:
1925      switch (input_type) {
1926        case Primitive::kPrimLong:
1927          // Processing a Dex `long-to-int' instruction.
1928          locations->SetInAt(0, Location::Any());
1929          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1930          break;
1931
1932        case Primitive::kPrimFloat:
1933          // Processing a Dex `float-to-int' instruction.
1934          locations->SetInAt(0, Location::RequiresFpuRegister());
1935          locations->SetOut(Location::RequiresRegister());
1936          locations->AddTemp(Location::RequiresFpuRegister());
1937          break;
1938
1939        case Primitive::kPrimDouble:
1940          // Processing a Dex `double-to-int' instruction.
1941          locations->SetInAt(0, Location::RequiresFpuRegister());
1942          locations->SetOut(Location::RequiresRegister());
1943          locations->AddTemp(Location::RequiresFpuRegister());
1944          break;
1945
1946        default:
1947          LOG(FATAL) << "Unexpected type conversion from " << input_type
1948                     << " to " << result_type;
1949      }
1950      break;
1951
1952    case Primitive::kPrimLong:
1953      switch (input_type) {
1954        case Primitive::kPrimBoolean:
1955          // Boolean input is a result of code transformations.
1956        case Primitive::kPrimByte:
1957        case Primitive::kPrimShort:
1958        case Primitive::kPrimInt:
1959        case Primitive::kPrimChar:
1960          // Processing a Dex `int-to-long' instruction.
1961          locations->SetInAt(0, Location::RequiresRegister());
1962          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1963          break;
1964
1965        case Primitive::kPrimFloat: {
1966          // Processing a Dex `float-to-long' instruction.
1967          InvokeRuntimeCallingConvention calling_convention;
1968          locations->SetInAt(0, Location::FpuRegisterLocation(
1969              calling_convention.GetFpuRegisterAt(0)));
1970          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1971          break;
1972        }
1973
1974        case Primitive::kPrimDouble: {
1975          // Processing a Dex `double-to-long' instruction.
1976          InvokeRuntimeCallingConvention calling_convention;
1977          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1978              calling_convention.GetFpuRegisterAt(0),
1979              calling_convention.GetFpuRegisterAt(1)));
1980          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1981          break;
1982        }
1983
1984        default:
1985          LOG(FATAL) << "Unexpected type conversion from " << input_type
1986                     << " to " << result_type;
1987      }
1988      break;
1989
1990    case Primitive::kPrimChar:
1991      switch (input_type) {
1992        case Primitive::kPrimBoolean:
1993          // Boolean input is a result of code transformations.
1994        case Primitive::kPrimByte:
1995        case Primitive::kPrimShort:
1996        case Primitive::kPrimInt:
1997          // Processing a Dex `int-to-char' instruction.
1998          locations->SetInAt(0, Location::RequiresRegister());
1999          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2000          break;
2001
2002        default:
2003          LOG(FATAL) << "Unexpected type conversion from " << input_type
2004                     << " to " << result_type;
2005      }
2006      break;
2007
2008    case Primitive::kPrimFloat:
2009      switch (input_type) {
2010        case Primitive::kPrimBoolean:
2011          // Boolean input is a result of code transformations.
2012        case Primitive::kPrimByte:
2013        case Primitive::kPrimShort:
2014        case Primitive::kPrimInt:
2015        case Primitive::kPrimChar:
2016          // Processing a Dex `int-to-float' instruction.
2017          locations->SetInAt(0, Location::RequiresRegister());
2018          locations->SetOut(Location::RequiresFpuRegister());
2019          break;
2020
2021        case Primitive::kPrimLong: {
2022          // Processing a Dex `long-to-float' instruction.
2023          InvokeRuntimeCallingConvention calling_convention;
2024          locations->SetInAt(0, Location::RegisterPairLocation(
2025              calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2026          locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2027          break;
2028        }
2029
2030        case Primitive::kPrimDouble:
2031          // Processing a Dex `double-to-float' instruction.
2032          locations->SetInAt(0, Location::RequiresFpuRegister());
2033          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2034          break;
2035
2036        default:
2037          LOG(FATAL) << "Unexpected type conversion from " << input_type
2038                     << " to " << result_type;
2039      };
2040      break;
2041
2042    case Primitive::kPrimDouble:
2043      switch (input_type) {
2044        case Primitive::kPrimBoolean:
2045          // Boolean input is a result of code transformations.
2046        case Primitive::kPrimByte:
2047        case Primitive::kPrimShort:
2048        case Primitive::kPrimInt:
2049        case Primitive::kPrimChar:
2050          // Processing a Dex `int-to-double' instruction.
2051          locations->SetInAt(0, Location::RequiresRegister());
2052          locations->SetOut(Location::RequiresFpuRegister());
2053          break;
2054
2055        case Primitive::kPrimLong:
2056          // Processing a Dex `long-to-double' instruction.
2057          locations->SetInAt(0, Location::RequiresRegister());
2058          locations->SetOut(Location::RequiresFpuRegister());
2059          locations->AddTemp(Location::RequiresFpuRegister());
2060          locations->AddTemp(Location::RequiresFpuRegister());
2061          break;
2062
2063        case Primitive::kPrimFloat:
2064          // Processing a Dex `float-to-double' instruction.
2065          locations->SetInAt(0, Location::RequiresFpuRegister());
2066          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2067          break;
2068
2069        default:
2070          LOG(FATAL) << "Unexpected type conversion from " << input_type
2071                     << " to " << result_type;
2072      };
2073      break;
2074
2075    default:
2076      LOG(FATAL) << "Unexpected type conversion from " << input_type
2077                 << " to " << result_type;
2078  }
2079}
2080
2081void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
2082  LocationSummary* locations = conversion->GetLocations();
2083  Location out = locations->Out();
2084  Location in = locations->InAt(0);
2085  Primitive::Type result_type = conversion->GetResultType();
2086  Primitive::Type input_type = conversion->GetInputType();
2087  DCHECK_NE(result_type, input_type);
2088  switch (result_type) {
2089    case Primitive::kPrimByte:
2090      switch (input_type) {
2091        case Primitive::kPrimBoolean:
2092          // Boolean input is a result of code transformations.
2093        case Primitive::kPrimShort:
2094        case Primitive::kPrimInt:
2095        case Primitive::kPrimChar:
2096          // Processing a Dex `int-to-byte' instruction.
2097          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
2098          break;
2099
2100        default:
2101          LOG(FATAL) << "Unexpected type conversion from " << input_type
2102                     << " to " << result_type;
2103      }
2104      break;
2105
2106    case Primitive::kPrimShort:
2107      switch (input_type) {
2108        case Primitive::kPrimBoolean:
2109          // Boolean input is a result of code transformations.
2110        case Primitive::kPrimByte:
2111        case Primitive::kPrimInt:
2112        case Primitive::kPrimChar:
2113          // Processing a Dex `int-to-short' instruction.
2114          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
2115          break;
2116
2117        default:
2118          LOG(FATAL) << "Unexpected type conversion from " << input_type
2119                     << " to " << result_type;
2120      }
2121      break;
2122
2123    case Primitive::kPrimInt:
2124      switch (input_type) {
2125        case Primitive::kPrimLong:
2126          // Processing a Dex `long-to-int' instruction.
2127          DCHECK(out.IsRegister());
2128          if (in.IsRegisterPair()) {
2129            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
2130          } else if (in.IsDoubleStackSlot()) {
2131            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
2132          } else {
2133            DCHECK(in.IsConstant());
2134            DCHECK(in.GetConstant()->IsLongConstant());
2135            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
2136            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
2137          }
2138          break;
2139
2140        case Primitive::kPrimFloat: {
2141          // Processing a Dex `float-to-int' instruction.
2142          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2143          __ vmovs(temp, in.AsFpuRegister<SRegister>());
2144          __ vcvtis(temp, temp);
2145          __ vmovrs(out.AsRegister<Register>(), temp);
2146          break;
2147        }
2148
2149        case Primitive::kPrimDouble: {
2150          // Processing a Dex `double-to-int' instruction.
2151          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2152          DRegister temp_d = FromLowSToD(temp_s);
2153          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
2154          __ vcvtid(temp_s, temp_d);
2155          __ vmovrs(out.AsRegister<Register>(), temp_s);
2156          break;
2157        }
2158
2159        default:
2160          LOG(FATAL) << "Unexpected type conversion from " << input_type
2161                     << " to " << result_type;
2162      }
2163      break;
2164
2165    case Primitive::kPrimLong:
2166      switch (input_type) {
2167        case Primitive::kPrimBoolean:
2168          // Boolean input is a result of code transformations.
2169        case Primitive::kPrimByte:
2170        case Primitive::kPrimShort:
2171        case Primitive::kPrimInt:
2172        case Primitive::kPrimChar:
2173          // Processing a Dex `int-to-long' instruction.
2174          DCHECK(out.IsRegisterPair());
2175          DCHECK(in.IsRegister());
2176          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
2177          // Sign extension.
2178          __ Asr(out.AsRegisterPairHigh<Register>(),
2179                 out.AsRegisterPairLow<Register>(),
2180                 31);
2181          break;
2182
2183        case Primitive::kPrimFloat:
2184          // Processing a Dex `float-to-long' instruction.
2185          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
2186                                  conversion,
2187                                  conversion->GetDexPc(),
2188                                  nullptr);
2189          break;
2190
2191        case Primitive::kPrimDouble:
2192          // Processing a Dex `double-to-long' instruction.
2193          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
2194                                  conversion,
2195                                  conversion->GetDexPc(),
2196                                  nullptr);
2197          break;
2198
2199        default:
2200          LOG(FATAL) << "Unexpected type conversion from " << input_type
2201                     << " to " << result_type;
2202      }
2203      break;
2204
2205    case Primitive::kPrimChar:
2206      switch (input_type) {
2207        case Primitive::kPrimBoolean:
2208          // Boolean input is a result of code transformations.
2209        case Primitive::kPrimByte:
2210        case Primitive::kPrimShort:
2211        case Primitive::kPrimInt:
2212          // Processing a Dex `int-to-char' instruction.
2213          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
2214          break;
2215
2216        default:
2217          LOG(FATAL) << "Unexpected type conversion from " << input_type
2218                     << " to " << result_type;
2219      }
2220      break;
2221
2222    case Primitive::kPrimFloat:
2223      switch (input_type) {
2224        case Primitive::kPrimBoolean:
2225          // Boolean input is a result of code transformations.
2226        case Primitive::kPrimByte:
2227        case Primitive::kPrimShort:
2228        case Primitive::kPrimInt:
2229        case Primitive::kPrimChar: {
2230          // Processing a Dex `int-to-float' instruction.
2231          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
2232          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
2233          break;
2234        }
2235
2236        case Primitive::kPrimLong:
2237          // Processing a Dex `long-to-float' instruction.
2238          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
2239                                  conversion,
2240                                  conversion->GetDexPc(),
2241                                  nullptr);
2242          break;
2243
2244        case Primitive::kPrimDouble:
2245          // Processing a Dex `double-to-float' instruction.
2246          __ vcvtsd(out.AsFpuRegister<SRegister>(),
2247                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
2248          break;
2249
2250        default:
2251          LOG(FATAL) << "Unexpected type conversion from " << input_type
2252                     << " to " << result_type;
2253      };
2254      break;
2255
2256    case Primitive::kPrimDouble:
2257      switch (input_type) {
2258        case Primitive::kPrimBoolean:
2259          // Boolean input is a result of code transformations.
2260        case Primitive::kPrimByte:
2261        case Primitive::kPrimShort:
2262        case Primitive::kPrimInt:
2263        case Primitive::kPrimChar: {
2264          // Processing a Dex `int-to-double' instruction.
2265          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
2266          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2267                    out.AsFpuRegisterPairLow<SRegister>());
2268          break;
2269        }
2270
2271        case Primitive::kPrimLong: {
2272          // Processing a Dex `long-to-double' instruction.
2273          Register low = in.AsRegisterPairLow<Register>();
2274          Register high = in.AsRegisterPairHigh<Register>();
2275          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
2276          DRegister out_d = FromLowSToD(out_s);
2277          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2278          DRegister temp_d = FromLowSToD(temp_s);
2279          SRegister constant_s = locations->GetTemp(1).AsFpuRegisterPairLow<SRegister>();
2280          DRegister constant_d = FromLowSToD(constant_s);
2281
2282          // temp_d = int-to-double(high)
2283          __ vmovsr(temp_s, high);
2284          __ vcvtdi(temp_d, temp_s);
2285          // constant_d = k2Pow32EncodingForDouble
2286          __ LoadDImmediate(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
2287          // out_d = unsigned-to-double(low)
2288          __ vmovsr(out_s, low);
2289          __ vcvtdu(out_d, out_s);
2290          // out_d += temp_d * constant_d
2291          __ vmlad(out_d, temp_d, constant_d);
2292          break;
2293        }
2294
2295        case Primitive::kPrimFloat:
2296          // Processing a Dex `float-to-double' instruction.
2297          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2298                    in.AsFpuRegister<SRegister>());
2299          break;
2300
2301        default:
2302          LOG(FATAL) << "Unexpected type conversion from " << input_type
2303                     << " to " << result_type;
2304      };
2305      break;
2306
2307    default:
2308      LOG(FATAL) << "Unexpected type conversion from " << input_type
2309                 << " to " << result_type;
2310  }
2311}
2312
2313void LocationsBuilderARM::VisitAdd(HAdd* add) {
2314  LocationSummary* locations =
2315      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
2316  switch (add->GetResultType()) {
2317    case Primitive::kPrimInt: {
2318      locations->SetInAt(0, Location::RequiresRegister());
2319      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
2320      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2321      break;
2322    }
2323
2324    case Primitive::kPrimLong: {
2325      locations->SetInAt(0, Location::RequiresRegister());
2326      locations->SetInAt(1, Location::RequiresRegister());
2327      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2328      break;
2329    }
2330
2331    case Primitive::kPrimFloat:
2332    case Primitive::kPrimDouble: {
2333      locations->SetInAt(0, Location::RequiresFpuRegister());
2334      locations->SetInAt(1, Location::RequiresFpuRegister());
2335      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2336      break;
2337    }
2338
2339    default:
2340      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2341  }
2342}
2343
2344void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
2345  LocationSummary* locations = add->GetLocations();
2346  Location out = locations->Out();
2347  Location first = locations->InAt(0);
2348  Location second = locations->InAt(1);
2349  switch (add->GetResultType()) {
2350    case Primitive::kPrimInt:
2351      if (second.IsRegister()) {
2352        __ add(out.AsRegister<Register>(),
2353               first.AsRegister<Register>(),
2354               ShifterOperand(second.AsRegister<Register>()));
2355      } else {
2356        __ AddConstant(out.AsRegister<Register>(),
2357                       first.AsRegister<Register>(),
2358                       second.GetConstant()->AsIntConstant()->GetValue());
2359      }
2360      break;
2361
2362    case Primitive::kPrimLong: {
2363      DCHECK(second.IsRegisterPair());
2364      __ adds(out.AsRegisterPairLow<Register>(),
2365              first.AsRegisterPairLow<Register>(),
2366              ShifterOperand(second.AsRegisterPairLow<Register>()));
2367      __ adc(out.AsRegisterPairHigh<Register>(),
2368             first.AsRegisterPairHigh<Register>(),
2369             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2370      break;
2371    }
2372
2373    case Primitive::kPrimFloat:
2374      __ vadds(out.AsFpuRegister<SRegister>(),
2375               first.AsFpuRegister<SRegister>(),
2376               second.AsFpuRegister<SRegister>());
2377      break;
2378
2379    case Primitive::kPrimDouble:
2380      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2381               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2382               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2383      break;
2384
2385    default:
2386      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2387  }
2388}
2389
2390void LocationsBuilderARM::VisitSub(HSub* sub) {
2391  LocationSummary* locations =
2392      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2393  switch (sub->GetResultType()) {
2394    case Primitive::kPrimInt: {
2395      locations->SetInAt(0, Location::RequiresRegister());
2396      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2397      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2398      break;
2399    }
2400
2401    case Primitive::kPrimLong: {
2402      locations->SetInAt(0, Location::RequiresRegister());
2403      locations->SetInAt(1, Location::RequiresRegister());
2404      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2405      break;
2406    }
2407    case Primitive::kPrimFloat:
2408    case Primitive::kPrimDouble: {
2409      locations->SetInAt(0, Location::RequiresFpuRegister());
2410      locations->SetInAt(1, Location::RequiresFpuRegister());
2411      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2412      break;
2413    }
2414    default:
2415      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2416  }
2417}
2418
2419void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2420  LocationSummary* locations = sub->GetLocations();
2421  Location out = locations->Out();
2422  Location first = locations->InAt(0);
2423  Location second = locations->InAt(1);
2424  switch (sub->GetResultType()) {
2425    case Primitive::kPrimInt: {
2426      if (second.IsRegister()) {
2427        __ sub(out.AsRegister<Register>(),
2428               first.AsRegister<Register>(),
2429               ShifterOperand(second.AsRegister<Register>()));
2430      } else {
2431        __ AddConstant(out.AsRegister<Register>(),
2432                       first.AsRegister<Register>(),
2433                       -second.GetConstant()->AsIntConstant()->GetValue());
2434      }
2435      break;
2436    }
2437
2438    case Primitive::kPrimLong: {
2439      DCHECK(second.IsRegisterPair());
2440      __ subs(out.AsRegisterPairLow<Register>(),
2441              first.AsRegisterPairLow<Register>(),
2442              ShifterOperand(second.AsRegisterPairLow<Register>()));
2443      __ sbc(out.AsRegisterPairHigh<Register>(),
2444             first.AsRegisterPairHigh<Register>(),
2445             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2446      break;
2447    }
2448
2449    case Primitive::kPrimFloat: {
2450      __ vsubs(out.AsFpuRegister<SRegister>(),
2451               first.AsFpuRegister<SRegister>(),
2452               second.AsFpuRegister<SRegister>());
2453      break;
2454    }
2455
2456    case Primitive::kPrimDouble: {
2457      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2458               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2459               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2460      break;
2461    }
2462
2463
2464    default:
2465      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2466  }
2467}
2468
2469void LocationsBuilderARM::VisitMul(HMul* mul) {
2470  LocationSummary* locations =
2471      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2472  switch (mul->GetResultType()) {
2473    case Primitive::kPrimInt:
2474    case Primitive::kPrimLong:  {
2475      locations->SetInAt(0, Location::RequiresRegister());
2476      locations->SetInAt(1, Location::RequiresRegister());
2477      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2478      break;
2479    }
2480
2481    case Primitive::kPrimFloat:
2482    case Primitive::kPrimDouble: {
2483      locations->SetInAt(0, Location::RequiresFpuRegister());
2484      locations->SetInAt(1, Location::RequiresFpuRegister());
2485      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2486      break;
2487    }
2488
2489    default:
2490      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2491  }
2492}
2493
2494void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2495  LocationSummary* locations = mul->GetLocations();
2496  Location out = locations->Out();
2497  Location first = locations->InAt(0);
2498  Location second = locations->InAt(1);
2499  switch (mul->GetResultType()) {
2500    case Primitive::kPrimInt: {
2501      __ mul(out.AsRegister<Register>(),
2502             first.AsRegister<Register>(),
2503             second.AsRegister<Register>());
2504      break;
2505    }
2506    case Primitive::kPrimLong: {
2507      Register out_hi = out.AsRegisterPairHigh<Register>();
2508      Register out_lo = out.AsRegisterPairLow<Register>();
2509      Register in1_hi = first.AsRegisterPairHigh<Register>();
2510      Register in1_lo = first.AsRegisterPairLow<Register>();
2511      Register in2_hi = second.AsRegisterPairHigh<Register>();
2512      Register in2_lo = second.AsRegisterPairLow<Register>();
2513
2514      // Extra checks to protect caused by the existence of R1_R2.
2515      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2516      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2517      DCHECK_NE(out_hi, in1_lo);
2518      DCHECK_NE(out_hi, in2_lo);
2519
2520      // input: in1 - 64 bits, in2 - 64 bits
2521      // output: out
2522      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2523      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2524      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2525
2526      // IP <- in1.lo * in2.hi
2527      __ mul(IP, in1_lo, in2_hi);
2528      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2529      __ mla(out_hi, in1_hi, in2_lo, IP);
2530      // out.lo <- (in1.lo * in2.lo)[31:0];
2531      __ umull(out_lo, IP, in1_lo, in2_lo);
2532      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2533      __ add(out_hi, out_hi, ShifterOperand(IP));
2534      break;
2535    }
2536
2537    case Primitive::kPrimFloat: {
2538      __ vmuls(out.AsFpuRegister<SRegister>(),
2539               first.AsFpuRegister<SRegister>(),
2540               second.AsFpuRegister<SRegister>());
2541      break;
2542    }
2543
2544    case Primitive::kPrimDouble: {
2545      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2546               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2547               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2548      break;
2549    }
2550
2551    default:
2552      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2553  }
2554}
2555
2556void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2557  DCHECK(instruction->IsDiv() || instruction->IsRem());
2558  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2559
2560  LocationSummary* locations = instruction->GetLocations();
2561  Location second = locations->InAt(1);
2562  DCHECK(second.IsConstant());
2563
2564  Register out = locations->Out().AsRegister<Register>();
2565  Register dividend = locations->InAt(0).AsRegister<Register>();
2566  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2567  DCHECK(imm == 1 || imm == -1);
2568
2569  if (instruction->IsRem()) {
2570    __ LoadImmediate(out, 0);
2571  } else {
2572    if (imm == 1) {
2573      __ Mov(out, dividend);
2574    } else {
2575      __ rsb(out, dividend, ShifterOperand(0));
2576    }
2577  }
2578}
2579
2580void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2581  DCHECK(instruction->IsDiv() || instruction->IsRem());
2582  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2583
2584  LocationSummary* locations = instruction->GetLocations();
2585  Location second = locations->InAt(1);
2586  DCHECK(second.IsConstant());
2587
2588  Register out = locations->Out().AsRegister<Register>();
2589  Register dividend = locations->InAt(0).AsRegister<Register>();
2590  Register temp = locations->GetTemp(0).AsRegister<Register>();
2591  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2592  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2593  DCHECK(IsPowerOfTwo(abs_imm));
2594  int ctz_imm = CTZ(abs_imm);
2595
2596  if (ctz_imm == 1) {
2597    __ Lsr(temp, dividend, 32 - ctz_imm);
2598  } else {
2599    __ Asr(temp, dividend, 31);
2600    __ Lsr(temp, temp, 32 - ctz_imm);
2601  }
2602  __ add(out, temp, ShifterOperand(dividend));
2603
2604  if (instruction->IsDiv()) {
2605    __ Asr(out, out, ctz_imm);
2606    if (imm < 0) {
2607      __ rsb(out, out, ShifterOperand(0));
2608    }
2609  } else {
2610    __ ubfx(out, out, 0, ctz_imm);
2611    __ sub(out, out, ShifterOperand(temp));
2612  }
2613}
2614
2615void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2616  DCHECK(instruction->IsDiv() || instruction->IsRem());
2617  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2618
2619  LocationSummary* locations = instruction->GetLocations();
2620  Location second = locations->InAt(1);
2621  DCHECK(second.IsConstant());
2622
2623  Register out = locations->Out().AsRegister<Register>();
2624  Register dividend = locations->InAt(0).AsRegister<Register>();
2625  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2626  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2627  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2628
2629  int64_t magic;
2630  int shift;
2631  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2632
2633  __ LoadImmediate(temp1, magic);
2634  __ smull(temp2, temp1, dividend, temp1);
2635
2636  if (imm > 0 && magic < 0) {
2637    __ add(temp1, temp1, ShifterOperand(dividend));
2638  } else if (imm < 0 && magic > 0) {
2639    __ sub(temp1, temp1, ShifterOperand(dividend));
2640  }
2641
2642  if (shift != 0) {
2643    __ Asr(temp1, temp1, shift);
2644  }
2645
2646  if (instruction->IsDiv()) {
2647    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2648  } else {
2649    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2650    // TODO: Strength reduction for mls.
2651    __ LoadImmediate(temp2, imm);
2652    __ mls(out, temp1, temp2, dividend);
2653  }
2654}
2655
2656void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2657  DCHECK(instruction->IsDiv() || instruction->IsRem());
2658  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2659
2660  LocationSummary* locations = instruction->GetLocations();
2661  Location second = locations->InAt(1);
2662  DCHECK(second.IsConstant());
2663
2664  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2665  if (imm == 0) {
2666    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2667  } else if (imm == 1 || imm == -1) {
2668    DivRemOneOrMinusOne(instruction);
2669  } else if (IsPowerOfTwo(std::abs(imm))) {
2670    DivRemByPowerOfTwo(instruction);
2671  } else {
2672    DCHECK(imm <= -2 || imm >= 2);
2673    GenerateDivRemWithAnyConstant(instruction);
2674  }
2675}
2676
2677void LocationsBuilderARM::VisitDiv(HDiv* div) {
2678  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2679  if (div->GetResultType() == Primitive::kPrimLong) {
2680    // pLdiv runtime call.
2681    call_kind = LocationSummary::kCall;
2682  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2683    // sdiv will be replaced by other instruction sequence.
2684  } else if (div->GetResultType() == Primitive::kPrimInt &&
2685             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2686    // pIdivmod runtime call.
2687    call_kind = LocationSummary::kCall;
2688  }
2689
2690  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2691
2692  switch (div->GetResultType()) {
2693    case Primitive::kPrimInt: {
2694      if (div->InputAt(1)->IsConstant()) {
2695        locations->SetInAt(0, Location::RequiresRegister());
2696        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2697        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2698        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2699        if (abs_imm <= 1) {
2700          // No temp register required.
2701        } else {
2702          locations->AddTemp(Location::RequiresRegister());
2703          if (!IsPowerOfTwo(abs_imm)) {
2704            locations->AddTemp(Location::RequiresRegister());
2705          }
2706        }
2707      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2708        locations->SetInAt(0, Location::RequiresRegister());
2709        locations->SetInAt(1, Location::RequiresRegister());
2710        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2711      } else {
2712        InvokeRuntimeCallingConvention calling_convention;
2713        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2714        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2715        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2716        //       we only need the former.
2717        locations->SetOut(Location::RegisterLocation(R0));
2718      }
2719      break;
2720    }
2721    case Primitive::kPrimLong: {
2722      InvokeRuntimeCallingConvention calling_convention;
2723      locations->SetInAt(0, Location::RegisterPairLocation(
2724          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2725      locations->SetInAt(1, Location::RegisterPairLocation(
2726          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2727      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2728      break;
2729    }
2730    case Primitive::kPrimFloat:
2731    case Primitive::kPrimDouble: {
2732      locations->SetInAt(0, Location::RequiresFpuRegister());
2733      locations->SetInAt(1, Location::RequiresFpuRegister());
2734      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2735      break;
2736    }
2737
2738    default:
2739      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2740  }
2741}
2742
2743void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2744  LocationSummary* locations = div->GetLocations();
2745  Location out = locations->Out();
2746  Location first = locations->InAt(0);
2747  Location second = locations->InAt(1);
2748
2749  switch (div->GetResultType()) {
2750    case Primitive::kPrimInt: {
2751      if (second.IsConstant()) {
2752        GenerateDivRemConstantIntegral(div);
2753      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2754        __ sdiv(out.AsRegister<Register>(),
2755                first.AsRegister<Register>(),
2756                second.AsRegister<Register>());
2757      } else {
2758        InvokeRuntimeCallingConvention calling_convention;
2759        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2760        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2761        DCHECK_EQ(R0, out.AsRegister<Register>());
2762
2763        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2764      }
2765      break;
2766    }
2767
2768    case Primitive::kPrimLong: {
2769      InvokeRuntimeCallingConvention calling_convention;
2770      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2771      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2772      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2773      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2774      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2775      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2776
2777      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2778      break;
2779    }
2780
2781    case Primitive::kPrimFloat: {
2782      __ vdivs(out.AsFpuRegister<SRegister>(),
2783               first.AsFpuRegister<SRegister>(),
2784               second.AsFpuRegister<SRegister>());
2785      break;
2786    }
2787
2788    case Primitive::kPrimDouble: {
2789      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2790               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2791               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2792      break;
2793    }
2794
2795    default:
2796      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2797  }
2798}
2799
2800void LocationsBuilderARM::VisitRem(HRem* rem) {
2801  Primitive::Type type = rem->GetResultType();
2802
2803  // Most remainders are implemented in the runtime.
2804  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2805  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2806    // sdiv will be replaced by other instruction sequence.
2807    call_kind = LocationSummary::kNoCall;
2808  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2809             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2810    // Have hardware divide instruction for int, do it with three instructions.
2811    call_kind = LocationSummary::kNoCall;
2812  }
2813
2814  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2815
2816  switch (type) {
2817    case Primitive::kPrimInt: {
2818      if (rem->InputAt(1)->IsConstant()) {
2819        locations->SetInAt(0, Location::RequiresRegister());
2820        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2821        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2822        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2823        if (abs_imm <= 1) {
2824          // No temp register required.
2825        } else {
2826          locations->AddTemp(Location::RequiresRegister());
2827          if (!IsPowerOfTwo(abs_imm)) {
2828            locations->AddTemp(Location::RequiresRegister());
2829          }
2830        }
2831      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2832        locations->SetInAt(0, Location::RequiresRegister());
2833        locations->SetInAt(1, Location::RequiresRegister());
2834        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2835        locations->AddTemp(Location::RequiresRegister());
2836      } else {
2837        InvokeRuntimeCallingConvention calling_convention;
2838        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2839        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2840        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2841        //       we only need the latter.
2842        locations->SetOut(Location::RegisterLocation(R1));
2843      }
2844      break;
2845    }
2846    case Primitive::kPrimLong: {
2847      InvokeRuntimeCallingConvention calling_convention;
2848      locations->SetInAt(0, Location::RegisterPairLocation(
2849          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2850      locations->SetInAt(1, Location::RegisterPairLocation(
2851          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2852      // The runtime helper puts the output in R2,R3.
2853      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2854      break;
2855    }
2856    case Primitive::kPrimFloat: {
2857      InvokeRuntimeCallingConvention calling_convention;
2858      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2859      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2860      locations->SetOut(Location::FpuRegisterLocation(S0));
2861      break;
2862    }
2863
2864    case Primitive::kPrimDouble: {
2865      InvokeRuntimeCallingConvention calling_convention;
2866      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2867          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2868      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2869          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2870      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2871      break;
2872    }
2873
2874    default:
2875      LOG(FATAL) << "Unexpected rem type " << type;
2876  }
2877}
2878
2879void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2880  LocationSummary* locations = rem->GetLocations();
2881  Location out = locations->Out();
2882  Location first = locations->InAt(0);
2883  Location second = locations->InAt(1);
2884
2885  Primitive::Type type = rem->GetResultType();
2886  switch (type) {
2887    case Primitive::kPrimInt: {
2888        if (second.IsConstant()) {
2889          GenerateDivRemConstantIntegral(rem);
2890        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2891        Register reg1 = first.AsRegister<Register>();
2892        Register reg2 = second.AsRegister<Register>();
2893        Register temp = locations->GetTemp(0).AsRegister<Register>();
2894
2895        // temp = reg1 / reg2  (integer division)
2896        // dest = reg1 - temp * reg2
2897        __ sdiv(temp, reg1, reg2);
2898        __ mls(out.AsRegister<Register>(), temp, reg2, reg1);
2899      } else {
2900        InvokeRuntimeCallingConvention calling_convention;
2901        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2902        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2903        DCHECK_EQ(R1, out.AsRegister<Register>());
2904
2905        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2906      }
2907      break;
2908    }
2909
2910    case Primitive::kPrimLong: {
2911      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2912      break;
2913    }
2914
2915    case Primitive::kPrimFloat: {
2916      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2917      break;
2918    }
2919
2920    case Primitive::kPrimDouble: {
2921      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2922      break;
2923    }
2924
2925    default:
2926      LOG(FATAL) << "Unexpected rem type " << type;
2927  }
2928}
2929
2930void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2931  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2932      ? LocationSummary::kCallOnSlowPath
2933      : LocationSummary::kNoCall;
2934  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2935  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2936  if (instruction->HasUses()) {
2937    locations->SetOut(Location::SameAsFirstInput());
2938  }
2939}
2940
2941void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2942  SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2943  codegen_->AddSlowPath(slow_path);
2944
2945  LocationSummary* locations = instruction->GetLocations();
2946  Location value = locations->InAt(0);
2947
2948  switch (instruction->GetType()) {
2949    case Primitive::kPrimByte:
2950    case Primitive::kPrimChar:
2951    case Primitive::kPrimShort:
2952    case Primitive::kPrimInt: {
2953      if (value.IsRegister()) {
2954        __ CompareAndBranchIfZero(value.AsRegister<Register>(), slow_path->GetEntryLabel());
2955      } else {
2956        DCHECK(value.IsConstant()) << value;
2957        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2958          __ b(slow_path->GetEntryLabel());
2959        }
2960      }
2961      break;
2962    }
2963    case Primitive::kPrimLong: {
2964      if (value.IsRegisterPair()) {
2965        __ orrs(IP,
2966                value.AsRegisterPairLow<Register>(),
2967                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2968        __ b(slow_path->GetEntryLabel(), EQ);
2969      } else {
2970        DCHECK(value.IsConstant()) << value;
2971        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2972          __ b(slow_path->GetEntryLabel());
2973        }
2974      }
2975      break;
2976    default:
2977      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2978    }
2979  }
2980}
2981
2982void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2983  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2984
2985  LocationSummary* locations =
2986      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2987
2988  switch (op->GetResultType()) {
2989    case Primitive::kPrimInt: {
2990      locations->SetInAt(0, Location::RequiresRegister());
2991      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2992      // Make the output overlap, as it will be used to hold the masked
2993      // second input.
2994      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2995      break;
2996    }
2997    case Primitive::kPrimLong: {
2998      locations->SetInAt(0, Location::RequiresRegister());
2999      locations->SetInAt(1, Location::RequiresRegister());
3000      locations->AddTemp(Location::RequiresRegister());
3001      locations->SetOut(Location::RequiresRegister());
3002      break;
3003    }
3004    default:
3005      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
3006  }
3007}
3008
3009void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
3010  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
3011
3012  LocationSummary* locations = op->GetLocations();
3013  Location out = locations->Out();
3014  Location first = locations->InAt(0);
3015  Location second = locations->InAt(1);
3016
3017  Primitive::Type type = op->GetResultType();
3018  switch (type) {
3019    case Primitive::kPrimInt: {
3020      Register out_reg = out.AsRegister<Register>();
3021      Register first_reg = first.AsRegister<Register>();
3022      // Arm doesn't mask the shift count so we need to do it ourselves.
3023      if (second.IsRegister()) {
3024        Register second_reg = second.AsRegister<Register>();
3025        __ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
3026        if (op->IsShl()) {
3027          __ Lsl(out_reg, first_reg, out_reg);
3028        } else if (op->IsShr()) {
3029          __ Asr(out_reg, first_reg, out_reg);
3030        } else {
3031          __ Lsr(out_reg, first_reg, out_reg);
3032        }
3033      } else {
3034        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
3035        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
3036        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
3037          __ Mov(out_reg, first_reg);
3038        } else if (op->IsShl()) {
3039          __ Lsl(out_reg, first_reg, shift_value);
3040        } else if (op->IsShr()) {
3041          __ Asr(out_reg, first_reg, shift_value);
3042        } else {
3043          __ Lsr(out_reg, first_reg, shift_value);
3044        }
3045      }
3046      break;
3047    }
3048    case Primitive::kPrimLong: {
3049      Register o_h = out.AsRegisterPairHigh<Register>();
3050      Register o_l = out.AsRegisterPairLow<Register>();
3051
3052      Register temp = locations->GetTemp(0).AsRegister<Register>();
3053
3054      Register high = first.AsRegisterPairHigh<Register>();
3055      Register low = first.AsRegisterPairLow<Register>();
3056
3057      Register second_reg = second.AsRegister<Register>();
3058
3059      if (op->IsShl()) {
3060        __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
3061        // Shift the high part
3062        __ Lsl(o_h, high, o_l);
3063        // Shift the low part and `or` what overflew on the high part
3064        __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
3065        __ Lsr(temp, low, temp);
3066        __ orr(o_h, o_h, ShifterOperand(temp));
3067        // If the shift is > 32 bits, override the high part
3068        __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
3069        __ it(PL);
3070        __ Lsl(o_h, low, temp, PL);
3071        // Shift the low part
3072        __ Lsl(o_l, low, o_l);
3073      } else if (op->IsShr()) {
3074        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
3075        // Shift the low part
3076        __ Lsr(o_l, low, o_h);
3077        // Shift the high part and `or` what underflew on the low part
3078        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
3079        __ Lsl(temp, high, temp);
3080        __ orr(o_l, o_l, ShifterOperand(temp));
3081        // If the shift is > 32 bits, override the low part
3082        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
3083        __ it(PL);
3084        __ Asr(o_l, high, temp, PL);
3085        // Shift the high part
3086        __ Asr(o_h, high, o_h);
3087      } else {
3088        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
3089        // same as Shr except we use `Lsr`s and not `Asr`s
3090        __ Lsr(o_l, low, o_h);
3091        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
3092        __ Lsl(temp, high, temp);
3093        __ orr(o_l, o_l, ShifterOperand(temp));
3094        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
3095        __ it(PL);
3096        __ Lsr(o_l, high, temp, PL);
3097        __ Lsr(o_h, high, o_h);
3098      }
3099      break;
3100    }
3101    default:
3102      LOG(FATAL) << "Unexpected operation type " << type;
3103  }
3104}
3105
3106void LocationsBuilderARM::VisitShl(HShl* shl) {
3107  HandleShift(shl);
3108}
3109
3110void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
3111  HandleShift(shl);
3112}
3113
3114void LocationsBuilderARM::VisitShr(HShr* shr) {
3115  HandleShift(shr);
3116}
3117
3118void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
3119  HandleShift(shr);
3120}
3121
3122void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
3123  HandleShift(ushr);
3124}
3125
3126void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
3127  HandleShift(ushr);
3128}
3129
3130void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
3131  LocationSummary* locations =
3132      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3133  InvokeRuntimeCallingConvention calling_convention;
3134  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3135  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3136  locations->SetOut(Location::RegisterLocation(R0));
3137}
3138
3139void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
3140  InvokeRuntimeCallingConvention calling_convention;
3141  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
3142  // Note: if heap poisoning is enabled, the entry point takes cares
3143  // of poisoning the reference.
3144  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3145                          instruction,
3146                          instruction->GetDexPc(),
3147                          nullptr);
3148}
3149
3150void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
3151  LocationSummary* locations =
3152      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3153  InvokeRuntimeCallingConvention calling_convention;
3154  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3155  locations->SetOut(Location::RegisterLocation(R0));
3156  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3157  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3158}
3159
3160void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
3161  InvokeRuntimeCallingConvention calling_convention;
3162  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
3163  // Note: if heap poisoning is enabled, the entry point takes cares
3164  // of poisoning the reference.
3165  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3166                          instruction,
3167                          instruction->GetDexPc(),
3168                          nullptr);
3169}
3170
3171void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
3172  LocationSummary* locations =
3173      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3174  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
3175  if (location.IsStackSlot()) {
3176    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3177  } else if (location.IsDoubleStackSlot()) {
3178    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3179  }
3180  locations->SetOut(location);
3181}
3182
3183void InstructionCodeGeneratorARM::VisitParameterValue(
3184    HParameterValue* instruction ATTRIBUTE_UNUSED) {
3185  // Nothing to do, the parameter is already at its location.
3186}
3187
3188void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
3189  LocationSummary* locations =
3190      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3191  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
3192}
3193
3194void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
3195  // Nothing to do, the method is already at its location.
3196}
3197
3198void LocationsBuilderARM::VisitNot(HNot* not_) {
3199  LocationSummary* locations =
3200      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
3201  locations->SetInAt(0, Location::RequiresRegister());
3202  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3203}
3204
3205void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
3206  LocationSummary* locations = not_->GetLocations();
3207  Location out = locations->Out();
3208  Location in = locations->InAt(0);
3209  switch (not_->GetResultType()) {
3210    case Primitive::kPrimInt:
3211      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
3212      break;
3213
3214    case Primitive::kPrimLong:
3215      __ mvn(out.AsRegisterPairLow<Register>(),
3216             ShifterOperand(in.AsRegisterPairLow<Register>()));
3217      __ mvn(out.AsRegisterPairHigh<Register>(),
3218             ShifterOperand(in.AsRegisterPairHigh<Register>()));
3219      break;
3220
3221    default:
3222      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
3223  }
3224}
3225
3226void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
3227  LocationSummary* locations =
3228      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
3229  locations->SetInAt(0, Location::RequiresRegister());
3230  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3231}
3232
3233void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
3234  LocationSummary* locations = bool_not->GetLocations();
3235  Location out = locations->Out();
3236  Location in = locations->InAt(0);
3237  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
3238}
3239
3240void LocationsBuilderARM::VisitCompare(HCompare* compare) {
3241  LocationSummary* locations =
3242      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
3243  switch (compare->InputAt(0)->GetType()) {
3244    case Primitive::kPrimLong: {
3245      locations->SetInAt(0, Location::RequiresRegister());
3246      locations->SetInAt(1, Location::RequiresRegister());
3247      // Output overlaps because it is written before doing the low comparison.
3248      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3249      break;
3250    }
3251    case Primitive::kPrimFloat:
3252    case Primitive::kPrimDouble: {
3253      locations->SetInAt(0, Location::RequiresFpuRegister());
3254      locations->SetInAt(1, Location::RequiresFpuRegister());
3255      locations->SetOut(Location::RequiresRegister());
3256      break;
3257    }
3258    default:
3259      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
3260  }
3261}
3262
3263void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
3264  LocationSummary* locations = compare->GetLocations();
3265  Register out = locations->Out().AsRegister<Register>();
3266  Location left = locations->InAt(0);
3267  Location right = locations->InAt(1);
3268
3269  Label less, greater, done;
3270  Primitive::Type type = compare->InputAt(0)->GetType();
3271  switch (type) {
3272    case Primitive::kPrimLong: {
3273      __ cmp(left.AsRegisterPairHigh<Register>(),
3274             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
3275      __ b(&less, LT);
3276      __ b(&greater, GT);
3277      // Do LoadImmediate before the last `cmp`, as LoadImmediate might affect the status flags.
3278      __ LoadImmediate(out, 0);
3279      __ cmp(left.AsRegisterPairLow<Register>(),
3280             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
3281      break;
3282    }
3283    case Primitive::kPrimFloat:
3284    case Primitive::kPrimDouble: {
3285      __ LoadImmediate(out, 0);
3286      if (type == Primitive::kPrimFloat) {
3287        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
3288      } else {
3289        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
3290                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
3291      }
3292      __ vmstat();  // transfer FP status register to ARM APSR.
3293      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
3294      break;
3295    }
3296    default:
3297      LOG(FATAL) << "Unexpected compare type " << type;
3298  }
3299  __ b(&done, EQ);
3300  __ b(&less, LO);  // LO is for both: unsigned compare for longs and 'less than' for floats.
3301
3302  __ Bind(&greater);
3303  __ LoadImmediate(out, 1);
3304  __ b(&done);
3305
3306  __ Bind(&less);
3307  __ LoadImmediate(out, -1);
3308
3309  __ Bind(&done);
3310}
3311
3312void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
3313  LocationSummary* locations =
3314      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3315  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
3316    locations->SetInAt(i, Location::Any());
3317  }
3318  locations->SetOut(Location::Any());
3319}
3320
3321void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
3322  LOG(FATAL) << "Unreachable";
3323}
3324
3325void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
3326  // TODO (ported from quick): revisit Arm barrier kinds
3327  DmbOptions flavor = DmbOptions::ISH;  // quiet c++ warnings
3328  switch (kind) {
3329    case MemBarrierKind::kAnyStore:
3330    case MemBarrierKind::kLoadAny:
3331    case MemBarrierKind::kAnyAny: {
3332      flavor = DmbOptions::ISH;
3333      break;
3334    }
3335    case MemBarrierKind::kStoreStore: {
3336      flavor = DmbOptions::ISHST;
3337      break;
3338    }
3339    default:
3340      LOG(FATAL) << "Unexpected memory barrier " << kind;
3341  }
3342  __ dmb(flavor);
3343}
3344
3345void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
3346                                                         uint32_t offset,
3347                                                         Register out_lo,
3348                                                         Register out_hi) {
3349  if (offset != 0) {
3350    __ LoadImmediate(out_lo, offset);
3351    __ add(IP, addr, ShifterOperand(out_lo));
3352    addr = IP;
3353  }
3354  __ ldrexd(out_lo, out_hi, addr);
3355}
3356
3357void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
3358                                                          uint32_t offset,
3359                                                          Register value_lo,
3360                                                          Register value_hi,
3361                                                          Register temp1,
3362                                                          Register temp2,
3363                                                          HInstruction* instruction) {
3364  Label fail;
3365  if (offset != 0) {
3366    __ LoadImmediate(temp1, offset);
3367    __ add(IP, addr, ShifterOperand(temp1));
3368    addr = IP;
3369  }
3370  __ Bind(&fail);
3371  // We need a load followed by store. (The address used in a STREX instruction must
3372  // be the same as the address in the most recently executed LDREX instruction.)
3373  __ ldrexd(temp1, temp2, addr);
3374  codegen_->MaybeRecordImplicitNullCheck(instruction);
3375  __ strexd(temp1, value_lo, value_hi, addr);
3376  __ CompareAndBranchIfNonZero(temp1, &fail);
3377}
3378
3379void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
3380  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3381
3382  LocationSummary* locations =
3383      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3384  locations->SetInAt(0, Location::RequiresRegister());
3385
3386  Primitive::Type field_type = field_info.GetFieldType();
3387  if (Primitive::IsFloatingPointType(field_type)) {
3388    locations->SetInAt(1, Location::RequiresFpuRegister());
3389  } else {
3390    locations->SetInAt(1, Location::RequiresRegister());
3391  }
3392
3393  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3394  bool generate_volatile = field_info.IsVolatile()
3395      && is_wide
3396      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3397  bool needs_write_barrier =
3398      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
3399  // Temporary registers for the write barrier.
3400  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3401  if (needs_write_barrier) {
3402    locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
3403    locations->AddTemp(Location::RequiresRegister());
3404  } else if (generate_volatile) {
3405    // Arm encoding have some additional constraints for ldrexd/strexd:
3406    // - registers need to be consecutive
3407    // - the first register should be even but not R14.
3408    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3409    // enable Arm encoding.
3410    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3411
3412    locations->AddTemp(Location::RequiresRegister());
3413    locations->AddTemp(Location::RequiresRegister());
3414    if (field_type == Primitive::kPrimDouble) {
3415      // For doubles we need two more registers to copy the value.
3416      locations->AddTemp(Location::RegisterLocation(R2));
3417      locations->AddTemp(Location::RegisterLocation(R3));
3418    }
3419  }
3420}
3421
3422void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3423                                                 const FieldInfo& field_info,
3424                                                 bool value_can_be_null) {
3425  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3426
3427  LocationSummary* locations = instruction->GetLocations();
3428  Register base = locations->InAt(0).AsRegister<Register>();
3429  Location value = locations->InAt(1);
3430
3431  bool is_volatile = field_info.IsVolatile();
3432  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3433  Primitive::Type field_type = field_info.GetFieldType();
3434  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3435  bool needs_write_barrier =
3436      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
3437
3438  if (is_volatile) {
3439    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3440  }
3441
3442  switch (field_type) {
3443    case Primitive::kPrimBoolean:
3444    case Primitive::kPrimByte: {
3445      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3446      break;
3447    }
3448
3449    case Primitive::kPrimShort:
3450    case Primitive::kPrimChar: {
3451      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3452      break;
3453    }
3454
3455    case Primitive::kPrimInt:
3456    case Primitive::kPrimNot: {
3457      if (kPoisonHeapReferences && needs_write_barrier) {
3458        // Note that in the case where `value` is a null reference,
3459        // we do not enter this block, as a null reference does not
3460        // need poisoning.
3461        DCHECK_EQ(field_type, Primitive::kPrimNot);
3462        Register temp = locations->GetTemp(0).AsRegister<Register>();
3463        __ Mov(temp, value.AsRegister<Register>());
3464        __ PoisonHeapReference(temp);
3465        __ StoreToOffset(kStoreWord, temp, base, offset);
3466      } else {
3467        __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3468      }
3469      break;
3470    }
3471
3472    case Primitive::kPrimLong: {
3473      if (is_volatile && !atomic_ldrd_strd) {
3474        GenerateWideAtomicStore(base, offset,
3475                                value.AsRegisterPairLow<Register>(),
3476                                value.AsRegisterPairHigh<Register>(),
3477                                locations->GetTemp(0).AsRegister<Register>(),
3478                                locations->GetTemp(1).AsRegister<Register>(),
3479                                instruction);
3480      } else {
3481        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3482        codegen_->MaybeRecordImplicitNullCheck(instruction);
3483      }
3484      break;
3485    }
3486
3487    case Primitive::kPrimFloat: {
3488      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3489      break;
3490    }
3491
3492    case Primitive::kPrimDouble: {
3493      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3494      if (is_volatile && !atomic_ldrd_strd) {
3495        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3496        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3497
3498        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3499
3500        GenerateWideAtomicStore(base, offset,
3501                                value_reg_lo,
3502                                value_reg_hi,
3503                                locations->GetTemp(2).AsRegister<Register>(),
3504                                locations->GetTemp(3).AsRegister<Register>(),
3505                                instruction);
3506      } else {
3507        __ StoreDToOffset(value_reg, base, offset);
3508        codegen_->MaybeRecordImplicitNullCheck(instruction);
3509      }
3510      break;
3511    }
3512
3513    case Primitive::kPrimVoid:
3514      LOG(FATAL) << "Unreachable type " << field_type;
3515      UNREACHABLE();
3516  }
3517
3518  // Longs and doubles are handled in the switch.
3519  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3520    codegen_->MaybeRecordImplicitNullCheck(instruction);
3521  }
3522
3523  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3524    Register temp = locations->GetTemp(0).AsRegister<Register>();
3525    Register card = locations->GetTemp(1).AsRegister<Register>();
3526    codegen_->MarkGCCard(
3527        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3528  }
3529
3530  if (is_volatile) {
3531    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3532  }
3533}
3534
3535void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3536  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3537  LocationSummary* locations =
3538      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3539  locations->SetInAt(0, Location::RequiresRegister());
3540
3541  bool volatile_for_double = field_info.IsVolatile()
3542      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3543      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3544  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3545
3546  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3547    locations->SetOut(Location::RequiresFpuRegister());
3548  } else {
3549    locations->SetOut(Location::RequiresRegister(),
3550                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3551  }
3552  if (volatile_for_double) {
3553    // Arm encoding have some additional constraints for ldrexd/strexd:
3554    // - registers need to be consecutive
3555    // - the first register should be even but not R14.
3556    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3557    // enable Arm encoding.
3558    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3559    locations->AddTemp(Location::RequiresRegister());
3560    locations->AddTemp(Location::RequiresRegister());
3561  }
3562}
3563
3564Location LocationsBuilderARM::ArmEncodableConstantOrRegister(HInstruction* constant,
3565                                                             Opcode opcode) {
3566  DCHECK(!Primitive::IsFloatingPointType(constant->GetType()));
3567  if (constant->IsConstant() &&
3568      CanEncodeConstantAsImmediate(constant->AsConstant(), opcode)) {
3569    return Location::ConstantLocation(constant->AsConstant());
3570  }
3571  return Location::RequiresRegister();
3572}
3573
3574bool LocationsBuilderARM::CanEncodeConstantAsImmediate(HConstant* input_cst,
3575                                                       Opcode opcode) {
3576  uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst));
3577  if (Primitive::Is64BitType(input_cst->GetType())) {
3578    return CanEncodeConstantAsImmediate(Low32Bits(value), opcode) &&
3579        CanEncodeConstantAsImmediate(High32Bits(value), opcode);
3580  } else {
3581    return CanEncodeConstantAsImmediate(Low32Bits(value), opcode);
3582  }
3583}
3584
3585bool LocationsBuilderARM::CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode) {
3586  ShifterOperand so;
3587  ArmAssembler* assembler = codegen_->GetAssembler();
3588  if (assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, opcode, value, &so)) {
3589    return true;
3590  }
3591  Opcode neg_opcode = kNoOperand;
3592  switch (opcode) {
3593    case AND:
3594      neg_opcode = BIC;
3595      break;
3596    case ORR:
3597      neg_opcode = ORN;
3598      break;
3599    default:
3600      return false;
3601  }
3602  return assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, neg_opcode, ~value, &so);
3603}
3604
3605void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3606                                                 const FieldInfo& field_info) {
3607  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3608
3609  LocationSummary* locations = instruction->GetLocations();
3610  Register base = locations->InAt(0).AsRegister<Register>();
3611  Location out = locations->Out();
3612  bool is_volatile = field_info.IsVolatile();
3613  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3614  Primitive::Type field_type = field_info.GetFieldType();
3615  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3616
3617  switch (field_type) {
3618    case Primitive::kPrimBoolean: {
3619      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3620      break;
3621    }
3622
3623    case Primitive::kPrimByte: {
3624      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3625      break;
3626    }
3627
3628    case Primitive::kPrimShort: {
3629      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3630      break;
3631    }
3632
3633    case Primitive::kPrimChar: {
3634      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3635      break;
3636    }
3637
3638    case Primitive::kPrimInt:
3639    case Primitive::kPrimNot: {
3640      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3641      break;
3642    }
3643
3644    case Primitive::kPrimLong: {
3645      if (is_volatile && !atomic_ldrd_strd) {
3646        GenerateWideAtomicLoad(base, offset,
3647                               out.AsRegisterPairLow<Register>(),
3648                               out.AsRegisterPairHigh<Register>());
3649      } else {
3650        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3651      }
3652      break;
3653    }
3654
3655    case Primitive::kPrimFloat: {
3656      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3657      break;
3658    }
3659
3660    case Primitive::kPrimDouble: {
3661      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3662      if (is_volatile && !atomic_ldrd_strd) {
3663        Register lo = locations->GetTemp(0).AsRegister<Register>();
3664        Register hi = locations->GetTemp(1).AsRegister<Register>();
3665        GenerateWideAtomicLoad(base, offset, lo, hi);
3666        codegen_->MaybeRecordImplicitNullCheck(instruction);
3667        __ vmovdrr(out_reg, lo, hi);
3668      } else {
3669        __ LoadDFromOffset(out_reg, base, offset);
3670        codegen_->MaybeRecordImplicitNullCheck(instruction);
3671      }
3672      break;
3673    }
3674
3675    case Primitive::kPrimVoid:
3676      LOG(FATAL) << "Unreachable type " << field_type;
3677      UNREACHABLE();
3678  }
3679
3680  // Doubles are handled in the switch.
3681  if (field_type != Primitive::kPrimDouble) {
3682    codegen_->MaybeRecordImplicitNullCheck(instruction);
3683  }
3684
3685  if (is_volatile) {
3686    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3687  }
3688
3689  if (field_type == Primitive::kPrimNot) {
3690    __ MaybeUnpoisonHeapReference(out.AsRegister<Register>());
3691  }
3692}
3693
3694void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3695  HandleFieldSet(instruction, instruction->GetFieldInfo());
3696}
3697
3698void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3699  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3700}
3701
3702void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3703  HandleFieldGet(instruction, instruction->GetFieldInfo());
3704}
3705
3706void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3707  HandleFieldGet(instruction, instruction->GetFieldInfo());
3708}
3709
3710void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3711  HandleFieldGet(instruction, instruction->GetFieldInfo());
3712}
3713
3714void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3715  HandleFieldGet(instruction, instruction->GetFieldInfo());
3716}
3717
3718void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3719  HandleFieldSet(instruction, instruction->GetFieldInfo());
3720}
3721
3722void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3723  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3724}
3725
3726void LocationsBuilderARM::VisitUnresolvedInstanceFieldGet(
3727    HUnresolvedInstanceFieldGet* instruction) {
3728  FieldAccessCallingConventionARM calling_convention;
3729  codegen_->CreateUnresolvedFieldLocationSummary(
3730      instruction, instruction->GetFieldType(), calling_convention);
3731}
3732
3733void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldGet(
3734    HUnresolvedInstanceFieldGet* instruction) {
3735  FieldAccessCallingConventionARM calling_convention;
3736  codegen_->GenerateUnresolvedFieldAccess(instruction,
3737                                          instruction->GetFieldType(),
3738                                          instruction->GetFieldIndex(),
3739                                          instruction->GetDexPc(),
3740                                          calling_convention);
3741}
3742
3743void LocationsBuilderARM::VisitUnresolvedInstanceFieldSet(
3744    HUnresolvedInstanceFieldSet* instruction) {
3745  FieldAccessCallingConventionARM calling_convention;
3746  codegen_->CreateUnresolvedFieldLocationSummary(
3747      instruction, instruction->GetFieldType(), calling_convention);
3748}
3749
3750void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldSet(
3751    HUnresolvedInstanceFieldSet* instruction) {
3752  FieldAccessCallingConventionARM calling_convention;
3753  codegen_->GenerateUnresolvedFieldAccess(instruction,
3754                                          instruction->GetFieldType(),
3755                                          instruction->GetFieldIndex(),
3756                                          instruction->GetDexPc(),
3757                                          calling_convention);
3758}
3759
3760void LocationsBuilderARM::VisitUnresolvedStaticFieldGet(
3761    HUnresolvedStaticFieldGet* instruction) {
3762  FieldAccessCallingConventionARM calling_convention;
3763  codegen_->CreateUnresolvedFieldLocationSummary(
3764      instruction, instruction->GetFieldType(), calling_convention);
3765}
3766
3767void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldGet(
3768    HUnresolvedStaticFieldGet* instruction) {
3769  FieldAccessCallingConventionARM calling_convention;
3770  codegen_->GenerateUnresolvedFieldAccess(instruction,
3771                                          instruction->GetFieldType(),
3772                                          instruction->GetFieldIndex(),
3773                                          instruction->GetDexPc(),
3774                                          calling_convention);
3775}
3776
3777void LocationsBuilderARM::VisitUnresolvedStaticFieldSet(
3778    HUnresolvedStaticFieldSet* instruction) {
3779  FieldAccessCallingConventionARM calling_convention;
3780  codegen_->CreateUnresolvedFieldLocationSummary(
3781      instruction, instruction->GetFieldType(), calling_convention);
3782}
3783
3784void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet(
3785    HUnresolvedStaticFieldSet* instruction) {
3786  FieldAccessCallingConventionARM calling_convention;
3787  codegen_->GenerateUnresolvedFieldAccess(instruction,
3788                                          instruction->GetFieldType(),
3789                                          instruction->GetFieldIndex(),
3790                                          instruction->GetDexPc(),
3791                                          calling_convention);
3792}
3793
3794void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3795  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
3796      ? LocationSummary::kCallOnSlowPath
3797      : LocationSummary::kNoCall;
3798  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3799  locations->SetInAt(0, Location::RequiresRegister());
3800  if (instruction->HasUses()) {
3801    locations->SetOut(Location::SameAsFirstInput());
3802  }
3803}
3804
3805void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3806  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3807    return;
3808  }
3809  Location obj = instruction->GetLocations()->InAt(0);
3810
3811  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3812  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3813}
3814
3815void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3816  SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3817  codegen_->AddSlowPath(slow_path);
3818
3819  LocationSummary* locations = instruction->GetLocations();
3820  Location obj = locations->InAt(0);
3821
3822  __ CompareAndBranchIfZero(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
3823}
3824
3825void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3826  if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
3827    GenerateImplicitNullCheck(instruction);
3828  } else {
3829    GenerateExplicitNullCheck(instruction);
3830  }
3831}
3832
3833void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3834  LocationSummary* locations =
3835      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3836  locations->SetInAt(0, Location::RequiresRegister());
3837  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3838  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3839    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3840  } else {
3841    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3842  }
3843}
3844
3845void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3846  LocationSummary* locations = instruction->GetLocations();
3847  Register obj = locations->InAt(0).AsRegister<Register>();
3848  Location index = locations->InAt(1);
3849  Primitive::Type type = instruction->GetType();
3850
3851  switch (type) {
3852    case Primitive::kPrimBoolean: {
3853      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3854      Register out = locations->Out().AsRegister<Register>();
3855      if (index.IsConstant()) {
3856        size_t offset =
3857            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3858        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3859      } else {
3860        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3861        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3862      }
3863      break;
3864    }
3865
3866    case Primitive::kPrimByte: {
3867      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3868      Register out = locations->Out().AsRegister<Register>();
3869      if (index.IsConstant()) {
3870        size_t offset =
3871            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3872        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3873      } else {
3874        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3875        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3876      }
3877      break;
3878    }
3879
3880    case Primitive::kPrimShort: {
3881      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3882      Register out = locations->Out().AsRegister<Register>();
3883      if (index.IsConstant()) {
3884        size_t offset =
3885            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3886        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3887      } else {
3888        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3889        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3890      }
3891      break;
3892    }
3893
3894    case Primitive::kPrimChar: {
3895      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3896      Register out = locations->Out().AsRegister<Register>();
3897      if (index.IsConstant()) {
3898        size_t offset =
3899            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3900        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3901      } else {
3902        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3903        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3904      }
3905      break;
3906    }
3907
3908    case Primitive::kPrimInt:
3909    case Primitive::kPrimNot: {
3910      static_assert(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
3911                    "art::mirror::HeapReference<mirror::Object> and int32_t have different sizes.");
3912      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3913      Register out = locations->Out().AsRegister<Register>();
3914      if (index.IsConstant()) {
3915        size_t offset =
3916            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3917        __ LoadFromOffset(kLoadWord, out, obj, offset);
3918      } else {
3919        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3920        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3921      }
3922      break;
3923    }
3924
3925    case Primitive::kPrimLong: {
3926      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3927      Location out = locations->Out();
3928      if (index.IsConstant()) {
3929        size_t offset =
3930            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3931        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3932      } else {
3933        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3934        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3935      }
3936      break;
3937    }
3938
3939    case Primitive::kPrimFloat: {
3940      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3941      Location out = locations->Out();
3942      DCHECK(out.IsFpuRegister());
3943      if (index.IsConstant()) {
3944        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3945        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3946      } else {
3947        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3948        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3949      }
3950      break;
3951    }
3952
3953    case Primitive::kPrimDouble: {
3954      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3955      Location out = locations->Out();
3956      DCHECK(out.IsFpuRegisterPair());
3957      if (index.IsConstant()) {
3958        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3959        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3960      } else {
3961        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3962        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3963      }
3964      break;
3965    }
3966
3967    case Primitive::kPrimVoid:
3968      LOG(FATAL) << "Unreachable type " << type;
3969      UNREACHABLE();
3970  }
3971  codegen_->MaybeRecordImplicitNullCheck(instruction);
3972
3973  if (type == Primitive::kPrimNot) {
3974    Register out = locations->Out().AsRegister<Register>();
3975    __ MaybeUnpoisonHeapReference(out);
3976  }
3977}
3978
3979void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3980  Primitive::Type value_type = instruction->GetComponentType();
3981
3982  bool needs_write_barrier =
3983      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3984  bool may_need_runtime_call = instruction->NeedsTypeCheck();
3985
3986  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3987      instruction,
3988      may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
3989  locations->SetInAt(0, Location::RequiresRegister());
3990  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3991  if (Primitive::IsFloatingPointType(value_type)) {
3992    locations->SetInAt(2, Location::RequiresFpuRegister());
3993  } else {
3994    locations->SetInAt(2, Location::RequiresRegister());
3995  }
3996
3997  if (needs_write_barrier) {
3998    // Temporary registers for the write barrier.
3999    locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
4000    locations->AddTemp(Location::RequiresRegister());
4001  }
4002}
4003
4004void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
4005  LocationSummary* locations = instruction->GetLocations();
4006  Register array = locations->InAt(0).AsRegister<Register>();
4007  Location index = locations->InAt(1);
4008  Primitive::Type value_type = instruction->GetComponentType();
4009  bool may_need_runtime_call = locations->CanCall();
4010  bool needs_write_barrier =
4011      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
4012
4013  switch (value_type) {
4014    case Primitive::kPrimBoolean:
4015    case Primitive::kPrimByte: {
4016      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
4017      Register value = locations->InAt(2).AsRegister<Register>();
4018      if (index.IsConstant()) {
4019        size_t offset =
4020            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
4021        __ StoreToOffset(kStoreByte, value, array, offset);
4022      } else {
4023        __ add(IP, array, ShifterOperand(index.AsRegister<Register>()));
4024        __ StoreToOffset(kStoreByte, value, IP, data_offset);
4025      }
4026      break;
4027    }
4028
4029    case Primitive::kPrimShort:
4030    case Primitive::kPrimChar: {
4031      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
4032      Register value = locations->InAt(2).AsRegister<Register>();
4033      if (index.IsConstant()) {
4034        size_t offset =
4035            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
4036        __ StoreToOffset(kStoreHalfword, value, array, offset);
4037      } else {
4038        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
4039        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
4040      }
4041      break;
4042    }
4043
4044    case Primitive::kPrimNot: {
4045      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
4046      Register value = locations->InAt(2).AsRegister<Register>();
4047      Register source = value;
4048
4049      if (instruction->InputAt(2)->IsNullConstant()) {
4050        // Just setting null.
4051        if (index.IsConstant()) {
4052          size_t offset =
4053              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4054          __ StoreToOffset(kStoreWord, source, array, offset);
4055        } else {
4056          DCHECK(index.IsRegister()) << index;
4057          __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4058          __ StoreToOffset(kStoreWord, source, IP, data_offset);
4059        }
4060        break;
4061      }
4062
4063      DCHECK(needs_write_barrier);
4064      Register temp1 = locations->GetTemp(0).AsRegister<Register>();
4065      Register temp2 = locations->GetTemp(1).AsRegister<Register>();
4066      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4067      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
4068      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
4069      Label done;
4070      SlowPathCode* slow_path = nullptr;
4071
4072      if (may_need_runtime_call) {
4073        slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM(instruction);
4074        codegen_->AddSlowPath(slow_path);
4075        if (instruction->GetValueCanBeNull()) {
4076          Label non_zero;
4077          __ CompareAndBranchIfNonZero(value, &non_zero);
4078          if (index.IsConstant()) {
4079            size_t offset =
4080               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4081            __ StoreToOffset(kStoreWord, value, array, offset);
4082          } else {
4083            DCHECK(index.IsRegister()) << index;
4084            __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4085            __ StoreToOffset(kStoreWord, value, IP, data_offset);
4086          }
4087          codegen_->MaybeRecordImplicitNullCheck(instruction);
4088          __ b(&done);
4089          __ Bind(&non_zero);
4090        }
4091
4092        __ LoadFromOffset(kLoadWord, temp1, array, class_offset);
4093        codegen_->MaybeRecordImplicitNullCheck(instruction);
4094        __ MaybeUnpoisonHeapReference(temp1);
4095        __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
4096        __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
4097        // No need to poison/unpoison, we're comparing two poisoined references.
4098        __ cmp(temp1, ShifterOperand(temp2));
4099        if (instruction->StaticTypeOfArrayIsObjectArray()) {
4100          Label do_put;
4101          __ b(&do_put, EQ);
4102          __ MaybeUnpoisonHeapReference(temp1);
4103          __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
4104          // No need to poison/unpoison, we're comparing against null.
4105          __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
4106          __ Bind(&do_put);
4107        } else {
4108          __ b(slow_path->GetEntryLabel(), NE);
4109        }
4110      }
4111
4112      if (kPoisonHeapReferences) {
4113        // Note that in the case where `value` is a null reference,
4114        // we do not enter this block, as a null reference does not
4115        // need poisoning.
4116        DCHECK_EQ(value_type, Primitive::kPrimNot);
4117        __ Mov(temp1, value);
4118        __ PoisonHeapReference(temp1);
4119        source = temp1;
4120      }
4121
4122      if (index.IsConstant()) {
4123        size_t offset =
4124            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4125        __ StoreToOffset(kStoreWord, source, array, offset);
4126      } else {
4127        DCHECK(index.IsRegister()) << index;
4128        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4129        __ StoreToOffset(kStoreWord, source, IP, data_offset);
4130      }
4131
4132      if (!may_need_runtime_call) {
4133        codegen_->MaybeRecordImplicitNullCheck(instruction);
4134      }
4135
4136      codegen_->MarkGCCard(temp1, temp2, array, value, instruction->GetValueCanBeNull());
4137
4138      if (done.IsLinked()) {
4139        __ Bind(&done);
4140      }
4141
4142      if (slow_path != nullptr) {
4143        __ Bind(slow_path->GetExitLabel());
4144      }
4145
4146      break;
4147    }
4148
4149    case Primitive::kPrimInt: {
4150      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
4151      Register value = locations->InAt(2).AsRegister<Register>();
4152      if (index.IsConstant()) {
4153        size_t offset =
4154            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4155        __ StoreToOffset(kStoreWord, value, array, offset);
4156      } else {
4157        DCHECK(index.IsRegister()) << index;
4158        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4159        __ StoreToOffset(kStoreWord, value, IP, data_offset);
4160      }
4161
4162      codegen_->MaybeRecordImplicitNullCheck(instruction);
4163      break;
4164    }
4165
4166    case Primitive::kPrimLong: {
4167      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
4168      Location value = locations->InAt(2);
4169      if (index.IsConstant()) {
4170        size_t offset =
4171            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
4172        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), array, offset);
4173      } else {
4174        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
4175        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
4176      }
4177      break;
4178    }
4179
4180    case Primitive::kPrimFloat: {
4181      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
4182      Location value = locations->InAt(2);
4183      DCHECK(value.IsFpuRegister());
4184      if (index.IsConstant()) {
4185        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4186        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), array, offset);
4187      } else {
4188        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4189        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
4190      }
4191      break;
4192    }
4193
4194    case Primitive::kPrimDouble: {
4195      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
4196      Location value = locations->InAt(2);
4197      DCHECK(value.IsFpuRegisterPair());
4198      if (index.IsConstant()) {
4199        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
4200        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), array, offset);
4201      } else {
4202        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
4203        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
4204      }
4205
4206      break;
4207    }
4208
4209    case Primitive::kPrimVoid:
4210      LOG(FATAL) << "Unreachable type " << value_type;
4211      UNREACHABLE();
4212  }
4213
4214  // Ints and objects are handled in the switch.
4215  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
4216    codegen_->MaybeRecordImplicitNullCheck(instruction);
4217  }
4218}
4219
4220void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
4221  LocationSummary* locations =
4222      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4223  locations->SetInAt(0, Location::RequiresRegister());
4224  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4225}
4226
4227void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
4228  LocationSummary* locations = instruction->GetLocations();
4229  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
4230  Register obj = locations->InAt(0).AsRegister<Register>();
4231  Register out = locations->Out().AsRegister<Register>();
4232  __ LoadFromOffset(kLoadWord, out, obj, offset);
4233  codegen_->MaybeRecordImplicitNullCheck(instruction);
4234}
4235
4236void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
4237  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
4238      ? LocationSummary::kCallOnSlowPath
4239      : LocationSummary::kNoCall;
4240  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4241  locations->SetInAt(0, Location::RequiresRegister());
4242  locations->SetInAt(1, Location::RequiresRegister());
4243  if (instruction->HasUses()) {
4244    locations->SetOut(Location::SameAsFirstInput());
4245  }
4246}
4247
4248void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
4249  LocationSummary* locations = instruction->GetLocations();
4250  SlowPathCode* slow_path =
4251      new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
4252  codegen_->AddSlowPath(slow_path);
4253
4254  Register index = locations->InAt(0).AsRegister<Register>();
4255  Register length = locations->InAt(1).AsRegister<Register>();
4256
4257  __ cmp(index, ShifterOperand(length));
4258  __ b(slow_path->GetEntryLabel(), HS);
4259}
4260
4261void CodeGeneratorARM::MarkGCCard(Register temp,
4262                                  Register card,
4263                                  Register object,
4264                                  Register value,
4265                                  bool can_be_null) {
4266  Label is_null;
4267  if (can_be_null) {
4268    __ CompareAndBranchIfZero(value, &is_null);
4269  }
4270  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
4271  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
4272  __ strb(card, Address(card, temp));
4273  if (can_be_null) {
4274    __ Bind(&is_null);
4275  }
4276}
4277
4278void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
4279  temp->SetLocations(nullptr);
4280}
4281
4282void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
4283  // Nothing to do, this is driven by the code generator.
4284}
4285
4286void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
4287  LOG(FATAL) << "Unreachable";
4288}
4289
4290void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
4291  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
4292}
4293
4294void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
4295  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
4296}
4297
4298void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
4299  HBasicBlock* block = instruction->GetBlock();
4300  if (block->GetLoopInformation() != nullptr) {
4301    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
4302    // The back edge will generate the suspend check.
4303    return;
4304  }
4305  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
4306    // The goto will generate the suspend check.
4307    return;
4308  }
4309  GenerateSuspendCheck(instruction, nullptr);
4310}
4311
4312void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
4313                                                       HBasicBlock* successor) {
4314  SuspendCheckSlowPathARM* slow_path =
4315      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
4316  if (slow_path == nullptr) {
4317    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
4318    instruction->SetSlowPath(slow_path);
4319    codegen_->AddSlowPath(slow_path);
4320    if (successor != nullptr) {
4321      DCHECK(successor->IsLoopHeader());
4322      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
4323    }
4324  } else {
4325    DCHECK_EQ(slow_path->GetSuccessor(), successor);
4326  }
4327
4328  __ LoadFromOffset(
4329      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
4330  if (successor == nullptr) {
4331    __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
4332    __ Bind(slow_path->GetReturnLabel());
4333  } else {
4334    __ CompareAndBranchIfZero(IP, codegen_->GetLabelOf(successor));
4335    __ b(slow_path->GetEntryLabel());
4336  }
4337}
4338
4339ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
4340  return codegen_->GetAssembler();
4341}
4342
4343void ParallelMoveResolverARM::EmitMove(size_t index) {
4344  MoveOperands* move = moves_[index];
4345  Location source = move->GetSource();
4346  Location destination = move->GetDestination();
4347
4348  if (source.IsRegister()) {
4349    if (destination.IsRegister()) {
4350      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
4351    } else {
4352      DCHECK(destination.IsStackSlot());
4353      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
4354                       SP, destination.GetStackIndex());
4355    }
4356  } else if (source.IsStackSlot()) {
4357    if (destination.IsRegister()) {
4358      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
4359                        SP, source.GetStackIndex());
4360    } else if (destination.IsFpuRegister()) {
4361      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
4362    } else {
4363      DCHECK(destination.IsStackSlot());
4364      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
4365      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4366    }
4367  } else if (source.IsFpuRegister()) {
4368    if (destination.IsFpuRegister()) {
4369      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
4370    } else {
4371      DCHECK(destination.IsStackSlot());
4372      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
4373    }
4374  } else if (source.IsDoubleStackSlot()) {
4375    if (destination.IsDoubleStackSlot()) {
4376      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
4377      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
4378    } else if (destination.IsRegisterPair()) {
4379      DCHECK(ExpectedPairLayout(destination));
4380      __ LoadFromOffset(
4381          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
4382    } else {
4383      DCHECK(destination.IsFpuRegisterPair()) << destination;
4384      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
4385                         SP,
4386                         source.GetStackIndex());
4387    }
4388  } else if (source.IsRegisterPair()) {
4389    if (destination.IsRegisterPair()) {
4390      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
4391      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
4392    } else {
4393      DCHECK(destination.IsDoubleStackSlot()) << destination;
4394      DCHECK(ExpectedPairLayout(source));
4395      __ StoreToOffset(
4396          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
4397    }
4398  } else if (source.IsFpuRegisterPair()) {
4399    if (destination.IsFpuRegisterPair()) {
4400      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
4401               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
4402    } else {
4403      DCHECK(destination.IsDoubleStackSlot()) << destination;
4404      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
4405                        SP,
4406                        destination.GetStackIndex());
4407    }
4408  } else {
4409    DCHECK(source.IsConstant()) << source;
4410    HConstant* constant = source.GetConstant();
4411    if (constant->IsIntConstant() || constant->IsNullConstant()) {
4412      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
4413      if (destination.IsRegister()) {
4414        __ LoadImmediate(destination.AsRegister<Register>(), value);
4415      } else {
4416        DCHECK(destination.IsStackSlot());
4417        __ LoadImmediate(IP, value);
4418        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4419      }
4420    } else if (constant->IsLongConstant()) {
4421      int64_t value = constant->AsLongConstant()->GetValue();
4422      if (destination.IsRegisterPair()) {
4423        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
4424        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
4425      } else {
4426        DCHECK(destination.IsDoubleStackSlot()) << destination;
4427        __ LoadImmediate(IP, Low32Bits(value));
4428        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4429        __ LoadImmediate(IP, High32Bits(value));
4430        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
4431      }
4432    } else if (constant->IsDoubleConstant()) {
4433      double value = constant->AsDoubleConstant()->GetValue();
4434      if (destination.IsFpuRegisterPair()) {
4435        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
4436      } else {
4437        DCHECK(destination.IsDoubleStackSlot()) << destination;
4438        uint64_t int_value = bit_cast<uint64_t, double>(value);
4439        __ LoadImmediate(IP, Low32Bits(int_value));
4440        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4441        __ LoadImmediate(IP, High32Bits(int_value));
4442        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
4443      }
4444    } else {
4445      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
4446      float value = constant->AsFloatConstant()->GetValue();
4447      if (destination.IsFpuRegister()) {
4448        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
4449      } else {
4450        DCHECK(destination.IsStackSlot());
4451        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
4452        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4453      }
4454    }
4455  }
4456}
4457
4458void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
4459  __ Mov(IP, reg);
4460  __ LoadFromOffset(kLoadWord, reg, SP, mem);
4461  __ StoreToOffset(kStoreWord, IP, SP, mem);
4462}
4463
4464void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
4465  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
4466  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
4467  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
4468                    SP, mem1 + stack_offset);
4469  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
4470  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
4471                   SP, mem2 + stack_offset);
4472  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
4473}
4474
4475void ParallelMoveResolverARM::EmitSwap(size_t index) {
4476  MoveOperands* move = moves_[index];
4477  Location source = move->GetSource();
4478  Location destination = move->GetDestination();
4479
4480  if (source.IsRegister() && destination.IsRegister()) {
4481    DCHECK_NE(source.AsRegister<Register>(), IP);
4482    DCHECK_NE(destination.AsRegister<Register>(), IP);
4483    __ Mov(IP, source.AsRegister<Register>());
4484    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
4485    __ Mov(destination.AsRegister<Register>(), IP);
4486  } else if (source.IsRegister() && destination.IsStackSlot()) {
4487    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
4488  } else if (source.IsStackSlot() && destination.IsRegister()) {
4489    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
4490  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
4491    Exchange(source.GetStackIndex(), destination.GetStackIndex());
4492  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
4493    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
4494    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
4495    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
4496  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
4497    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
4498    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
4499    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
4500    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
4501               destination.AsRegisterPairHigh<Register>(),
4502               DTMP);
4503  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
4504    Register low_reg = source.IsRegisterPair()
4505        ? source.AsRegisterPairLow<Register>()
4506        : destination.AsRegisterPairLow<Register>();
4507    int mem = source.IsRegisterPair()
4508        ? destination.GetStackIndex()
4509        : source.GetStackIndex();
4510    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
4511    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
4512    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
4513    __ StoreDToOffset(DTMP, SP, mem);
4514  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
4515    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
4516    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
4517    __ vmovd(DTMP, first);
4518    __ vmovd(first, second);
4519    __ vmovd(second, DTMP);
4520  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
4521    DRegister reg = source.IsFpuRegisterPair()
4522        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
4523        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
4524    int mem = source.IsFpuRegisterPair()
4525        ? destination.GetStackIndex()
4526        : source.GetStackIndex();
4527    __ vmovd(DTMP, reg);
4528    __ LoadDFromOffset(reg, SP, mem);
4529    __ StoreDToOffset(DTMP, SP, mem);
4530  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
4531    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
4532                                           : destination.AsFpuRegister<SRegister>();
4533    int mem = source.IsFpuRegister()
4534        ? destination.GetStackIndex()
4535        : source.GetStackIndex();
4536
4537    __ vmovrs(IP, reg);
4538    __ LoadSFromOffset(reg, SP, mem);
4539    __ StoreToOffset(kStoreWord, IP, SP, mem);
4540  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
4541    Exchange(source.GetStackIndex(), destination.GetStackIndex());
4542    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
4543  } else {
4544    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
4545  }
4546}
4547
4548void ParallelMoveResolverARM::SpillScratch(int reg) {
4549  __ Push(static_cast<Register>(reg));
4550}
4551
4552void ParallelMoveResolverARM::RestoreScratch(int reg) {
4553  __ Pop(static_cast<Register>(reg));
4554}
4555
4556void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
4557  InvokeRuntimeCallingConvention calling_convention;
4558  CodeGenerator::CreateLoadClassLocationSummary(
4559      cls,
4560      Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
4561      Location::RegisterLocation(R0));
4562}
4563
4564void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
4565  LocationSummary* locations = cls->GetLocations();
4566  if (cls->NeedsAccessCheck()) {
4567    codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
4568    codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
4569                            cls,
4570                            cls->GetDexPc(),
4571                            nullptr);
4572    return;
4573  }
4574
4575  Register out = locations->Out().AsRegister<Register>();
4576  Register current_method = locations->InAt(0).AsRegister<Register>();
4577  if (cls->IsReferrersClass()) {
4578    DCHECK(!cls->CanCallRuntime());
4579    DCHECK(!cls->MustGenerateClinitCheck());
4580    __ LoadFromOffset(
4581        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4582  } else {
4583    DCHECK(cls->CanCallRuntime());
4584    __ LoadFromOffset(kLoadWord,
4585                      out,
4586                      current_method,
4587                      ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
4588    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
4589    // TODO: We will need a read barrier here.
4590
4591    SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4592        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
4593    codegen_->AddSlowPath(slow_path);
4594    __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4595    if (cls->MustGenerateClinitCheck()) {
4596      GenerateClassInitializationCheck(slow_path, out);
4597    } else {
4598      __ Bind(slow_path->GetExitLabel());
4599    }
4600  }
4601}
4602
4603void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
4604  LocationSummary* locations =
4605      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
4606  locations->SetInAt(0, Location::RequiresRegister());
4607  if (check->HasUses()) {
4608    locations->SetOut(Location::SameAsFirstInput());
4609  }
4610}
4611
4612void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
4613  // We assume the class is not null.
4614  SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4615      check->GetLoadClass(), check, check->GetDexPc(), true);
4616  codegen_->AddSlowPath(slow_path);
4617  GenerateClassInitializationCheck(slow_path,
4618                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4619}
4620
4621void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4622    SlowPathCode* slow_path, Register class_reg) {
4623  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4624  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4625  __ b(slow_path->GetEntryLabel(), LT);
4626  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4627  // properly. Therefore, we do a memory fence.
4628  __ dmb(ISH);
4629  __ Bind(slow_path->GetExitLabel());
4630}
4631
4632void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4633  LocationSummary* locations =
4634      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4635  locations->SetInAt(0, Location::RequiresRegister());
4636  locations->SetOut(Location::RequiresRegister());
4637}
4638
4639void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4640  SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4641  codegen_->AddSlowPath(slow_path);
4642
4643  LocationSummary* locations = load->GetLocations();
4644  Register out = locations->Out().AsRegister<Register>();
4645  Register current_method = locations->InAt(0).AsRegister<Register>();
4646  __ LoadFromOffset(
4647      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4648  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4649  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4650  // TODO: We will need a read barrier here.
4651  __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4652  __ Bind(slow_path->GetExitLabel());
4653}
4654
4655static int32_t GetExceptionTlsOffset() {
4656  return Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4657}
4658
4659void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4660  LocationSummary* locations =
4661      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4662  locations->SetOut(Location::RequiresRegister());
4663}
4664
4665void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4666  Register out = load->GetLocations()->Out().AsRegister<Register>();
4667  __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
4668}
4669
4670void LocationsBuilderARM::VisitClearException(HClearException* clear) {
4671  new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
4672}
4673
4674void InstructionCodeGeneratorARM::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
4675  __ LoadImmediate(IP, 0);
4676  __ StoreToOffset(kStoreWord, IP, TR, GetExceptionTlsOffset());
4677}
4678
4679void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4680  LocationSummary* locations =
4681      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4682  InvokeRuntimeCallingConvention calling_convention;
4683  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4684}
4685
4686void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4687  codegen_->InvokeRuntime(
4688      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4689}
4690
4691void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4692  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
4693  switch (instruction->GetTypeCheckKind()) {
4694    case TypeCheckKind::kExactCheck:
4695    case TypeCheckKind::kAbstractClassCheck:
4696    case TypeCheckKind::kClassHierarchyCheck:
4697    case TypeCheckKind::kArrayObjectCheck:
4698      call_kind = LocationSummary::kNoCall;
4699      break;
4700    case TypeCheckKind::kUnresolvedCheck:
4701    case TypeCheckKind::kInterfaceCheck:
4702      call_kind = LocationSummary::kCall;
4703      break;
4704    case TypeCheckKind::kArrayCheck:
4705      call_kind = LocationSummary::kCallOnSlowPath;
4706      break;
4707  }
4708  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4709  if (call_kind != LocationSummary::kCall) {
4710    locations->SetInAt(0, Location::RequiresRegister());
4711    locations->SetInAt(1, Location::RequiresRegister());
4712    // The out register is used as a temporary, so it overlaps with the inputs.
4713    // Note that TypeCheckSlowPathARM uses this register too.
4714    locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4715  } else {
4716    InvokeRuntimeCallingConvention calling_convention;
4717    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4718    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
4719    locations->SetOut(Location::RegisterLocation(R0));
4720  }
4721}
4722
4723void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4724  LocationSummary* locations = instruction->GetLocations();
4725  Register obj = locations->InAt(0).AsRegister<Register>();
4726  Register cls = locations->InAt(1).AsRegister<Register>();
4727  Register out = locations->Out().AsRegister<Register>();
4728  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4729  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
4730  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
4731  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
4732  Label done, zero;
4733  SlowPathCode* slow_path = nullptr;
4734
4735  // Return 0 if `obj` is null.
4736  // avoid null check if we know obj is not null.
4737  if (instruction->MustDoNullCheck()) {
4738    __ CompareAndBranchIfZero(obj, &zero);
4739  }
4740
4741  // In case of an interface/unresolved check, we put the object class into the object register.
4742  // This is safe, as the register is caller-save, and the object must be in another
4743  // register if it survives the runtime call.
4744  Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
4745      (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
4746      ? obj
4747      : out;
4748  __ LoadFromOffset(kLoadWord, target, obj, class_offset);
4749  __ MaybeUnpoisonHeapReference(target);
4750
4751  switch (instruction->GetTypeCheckKind()) {
4752    case TypeCheckKind::kExactCheck: {
4753      __ cmp(out, ShifterOperand(cls));
4754      // Classes must be equal for the instanceof to succeed.
4755      __ b(&zero, NE);
4756      __ LoadImmediate(out, 1);
4757      __ b(&done);
4758      break;
4759    }
4760    case TypeCheckKind::kAbstractClassCheck: {
4761      // If the class is abstract, we eagerly fetch the super class of the
4762      // object to avoid doing a comparison we know will fail.
4763      Label loop;
4764      __ Bind(&loop);
4765      __ LoadFromOffset(kLoadWord, out, out, super_offset);
4766      __ MaybeUnpoisonHeapReference(out);
4767      // If `out` is null, we use it for the result, and jump to `done`.
4768      __ CompareAndBranchIfZero(out, &done);
4769      __ cmp(out, ShifterOperand(cls));
4770      __ b(&loop, NE);
4771      __ LoadImmediate(out, 1);
4772      if (zero.IsLinked()) {
4773        __ b(&done);
4774      }
4775      break;
4776    }
4777    case TypeCheckKind::kClassHierarchyCheck: {
4778      // Walk over the class hierarchy to find a match.
4779      Label loop, success;
4780      __ Bind(&loop);
4781      __ cmp(out, ShifterOperand(cls));
4782      __ b(&success, EQ);
4783      __ LoadFromOffset(kLoadWord, out, out, super_offset);
4784      __ MaybeUnpoisonHeapReference(out);
4785      __ CompareAndBranchIfNonZero(out, &loop);
4786      // If `out` is null, we use it for the result, and jump to `done`.
4787      __ b(&done);
4788      __ Bind(&success);
4789      __ LoadImmediate(out, 1);
4790      if (zero.IsLinked()) {
4791        __ b(&done);
4792      }
4793      break;
4794    }
4795    case TypeCheckKind::kArrayObjectCheck: {
4796      // Do an exact check.
4797      Label exact_check;
4798      __ cmp(out, ShifterOperand(cls));
4799      __ b(&exact_check, EQ);
4800      // Otherwise, we need to check that the object's class is a non primitive array.
4801      __ LoadFromOffset(kLoadWord, out, out, component_offset);
4802      __ MaybeUnpoisonHeapReference(out);
4803      // If `out` is null, we use it for the result, and jump to `done`.
4804      __ CompareAndBranchIfZero(out, &done);
4805      __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
4806      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
4807      __ CompareAndBranchIfNonZero(out, &zero);
4808      __ Bind(&exact_check);
4809      __ LoadImmediate(out, 1);
4810      __ b(&done);
4811      break;
4812    }
4813    case TypeCheckKind::kArrayCheck: {
4814      __ cmp(out, ShifterOperand(cls));
4815      DCHECK(locations->OnlyCallsOnSlowPath());
4816      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4817          instruction, /* is_fatal */ false);
4818      codegen_->AddSlowPath(slow_path);
4819      __ b(slow_path->GetEntryLabel(), NE);
4820      __ LoadImmediate(out, 1);
4821      if (zero.IsLinked()) {
4822        __ b(&done);
4823      }
4824      break;
4825    }
4826    case TypeCheckKind::kUnresolvedCheck:
4827    case TypeCheckKind::kInterfaceCheck:
4828    default: {
4829      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
4830                              instruction,
4831                              instruction->GetDexPc(),
4832                              nullptr);
4833      if (zero.IsLinked()) {
4834        __ b(&done);
4835      }
4836      break;
4837    }
4838  }
4839
4840  if (zero.IsLinked()) {
4841    __ Bind(&zero);
4842    __ LoadImmediate(out, 0);
4843  }
4844
4845  if (done.IsLinked()) {
4846    __ Bind(&done);
4847  }
4848
4849  if (slow_path != nullptr) {
4850    __ Bind(slow_path->GetExitLabel());
4851  }
4852}
4853
4854void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4855  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
4856  bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
4857
4858  switch (instruction->GetTypeCheckKind()) {
4859    case TypeCheckKind::kExactCheck:
4860    case TypeCheckKind::kAbstractClassCheck:
4861    case TypeCheckKind::kClassHierarchyCheck:
4862    case TypeCheckKind::kArrayObjectCheck:
4863      call_kind = throws_into_catch
4864          ? LocationSummary::kCallOnSlowPath
4865          : LocationSummary::kNoCall;
4866      break;
4867    case TypeCheckKind::kUnresolvedCheck:
4868    case TypeCheckKind::kInterfaceCheck:
4869      call_kind = LocationSummary::kCall;
4870      break;
4871    case TypeCheckKind::kArrayCheck:
4872      call_kind = LocationSummary::kCallOnSlowPath;
4873      break;
4874  }
4875
4876  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4877      instruction, call_kind);
4878  if (call_kind != LocationSummary::kCall) {
4879    locations->SetInAt(0, Location::RequiresRegister());
4880    locations->SetInAt(1, Location::RequiresRegister());
4881    // Note that TypeCheckSlowPathARM uses this register too.
4882    locations->AddTemp(Location::RequiresRegister());
4883  } else {
4884    InvokeRuntimeCallingConvention calling_convention;
4885    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4886    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
4887  }
4888}
4889
4890void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4891  LocationSummary* locations = instruction->GetLocations();
4892  Register obj = locations->InAt(0).AsRegister<Register>();
4893  Register cls = locations->InAt(1).AsRegister<Register>();
4894  Register temp = locations->WillCall()
4895      ? Register(kNoRegister)
4896      : locations->GetTemp(0).AsRegister<Register>();
4897
4898  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4899  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
4900  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
4901  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
4902  SlowPathCode* slow_path = nullptr;
4903
4904  if (!locations->WillCall()) {
4905    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4906        instruction, !locations->CanCall());
4907    codegen_->AddSlowPath(slow_path);
4908  }
4909
4910  Label done;
4911  // Avoid null check if we know obj is not null.
4912  if (instruction->MustDoNullCheck()) {
4913    __ CompareAndBranchIfZero(obj, &done);
4914  }
4915
4916  if (locations->WillCall()) {
4917    __ LoadFromOffset(kLoadWord, obj, obj, class_offset);
4918    __ MaybeUnpoisonHeapReference(obj);
4919  } else {
4920    __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4921    __ MaybeUnpoisonHeapReference(temp);
4922  }
4923
4924  switch (instruction->GetTypeCheckKind()) {
4925    case TypeCheckKind::kExactCheck:
4926    case TypeCheckKind::kArrayCheck: {
4927      __ cmp(temp, ShifterOperand(cls));
4928      // Jump to slow path for throwing the exception or doing a
4929      // more involved array check.
4930      __ b(slow_path->GetEntryLabel(), NE);
4931      break;
4932    }
4933    case TypeCheckKind::kAbstractClassCheck: {
4934      // If the class is abstract, we eagerly fetch the super class of the
4935      // object to avoid doing a comparison we know will fail.
4936      Label loop;
4937      __ Bind(&loop);
4938      __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
4939      __ MaybeUnpoisonHeapReference(temp);
4940      // Jump to the slow path to throw the exception.
4941      __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel());
4942      __ cmp(temp, ShifterOperand(cls));
4943      __ b(&loop, NE);
4944      break;
4945    }
4946    case TypeCheckKind::kClassHierarchyCheck: {
4947      // Walk over the class hierarchy to find a match.
4948      Label loop;
4949      __ Bind(&loop);
4950      __ cmp(temp, ShifterOperand(cls));
4951      __ b(&done, EQ);
4952      __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
4953      __ MaybeUnpoisonHeapReference(temp);
4954      __ CompareAndBranchIfNonZero(temp, &loop);
4955      // Jump to the slow path to throw the exception.
4956      __ b(slow_path->GetEntryLabel());
4957      break;
4958    }
4959    case TypeCheckKind::kArrayObjectCheck: {
4960      // Do an exact check.
4961      __ cmp(temp, ShifterOperand(cls));
4962      __ b(&done, EQ);
4963      // Otherwise, we need to check that the object's class is a non primitive array.
4964      __ LoadFromOffset(kLoadWord, temp, temp, component_offset);
4965      __ MaybeUnpoisonHeapReference(temp);
4966      __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel());
4967      __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
4968      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
4969      __ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel());
4970      break;
4971    }
4972    case TypeCheckKind::kUnresolvedCheck:
4973    case TypeCheckKind::kInterfaceCheck:
4974    default:
4975      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
4976                              instruction,
4977                              instruction->GetDexPc(),
4978                              nullptr);
4979      break;
4980  }
4981  __ Bind(&done);
4982
4983  if (slow_path != nullptr) {
4984    __ Bind(slow_path->GetExitLabel());
4985  }
4986}
4987
4988void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4989  LocationSummary* locations =
4990      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4991  InvokeRuntimeCallingConvention calling_convention;
4992  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4993}
4994
4995void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4996  codegen_->InvokeRuntime(instruction->IsEnter()
4997        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4998      instruction,
4999      instruction->GetDexPc(),
5000      nullptr);
5001}
5002
5003void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction, AND); }
5004void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction, ORR); }
5005void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction, EOR); }
5006
5007void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) {
5008  LocationSummary* locations =
5009      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
5010  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
5011         || instruction->GetResultType() == Primitive::kPrimLong);
5012  // Note: GVN reorders commutative operations to have the constant on the right hand side.
5013  locations->SetInAt(0, Location::RequiresRegister());
5014  locations->SetInAt(1, ArmEncodableConstantOrRegister(instruction->InputAt(1), opcode));
5015  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
5016}
5017
5018void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
5019  HandleBitwiseOperation(instruction);
5020}
5021
5022void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
5023  HandleBitwiseOperation(instruction);
5024}
5025
5026void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
5027  HandleBitwiseOperation(instruction);
5028}
5029
5030void InstructionCodeGeneratorARM::GenerateAndConst(Register out, Register first, uint32_t value) {
5031  // Optimize special cases for individual halfs of `and-long` (`and` is simplified earlier).
5032  if (value == 0xffffffffu) {
5033    if (out != first) {
5034      __ mov(out, ShifterOperand(first));
5035    }
5036    return;
5037  }
5038  if (value == 0u) {
5039    __ mov(out, ShifterOperand(0));
5040    return;
5041  }
5042  ShifterOperand so;
5043  if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, AND, value, &so)) {
5044    __ and_(out, first, so);
5045  } else {
5046    DCHECK(__ ShifterOperandCanHold(kNoRegister, kNoRegister, BIC, ~value, &so));
5047    __ bic(out, first, ShifterOperand(~value));
5048  }
5049}
5050
5051void InstructionCodeGeneratorARM::GenerateOrrConst(Register out, Register first, uint32_t value) {
5052  // Optimize special cases for individual halfs of `or-long` (`or` is simplified earlier).
5053  if (value == 0u) {
5054    if (out != first) {
5055      __ mov(out, ShifterOperand(first));
5056    }
5057    return;
5058  }
5059  if (value == 0xffffffffu) {
5060    __ mvn(out, ShifterOperand(0));
5061    return;
5062  }
5063  ShifterOperand so;
5064  if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, ORR, value, &so)) {
5065    __ orr(out, first, so);
5066  } else {
5067    DCHECK(__ ShifterOperandCanHold(kNoRegister, kNoRegister, ORN, ~value, &so));
5068    __ orn(out, first, ShifterOperand(~value));
5069  }
5070}
5071
5072void InstructionCodeGeneratorARM::GenerateEorConst(Register out, Register first, uint32_t value) {
5073  // Optimize special case for individual halfs of `xor-long` (`xor` is simplified earlier).
5074  if (value == 0u) {
5075    if (out != first) {
5076      __ mov(out, ShifterOperand(first));
5077    }
5078    return;
5079  }
5080  __ eor(out, first, ShifterOperand(value));
5081}
5082
5083void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
5084  LocationSummary* locations = instruction->GetLocations();
5085  Location first = locations->InAt(0);
5086  Location second = locations->InAt(1);
5087  Location out = locations->Out();
5088
5089  if (second.IsConstant()) {
5090    uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
5091    uint32_t value_low = Low32Bits(value);
5092    if (instruction->GetResultType() == Primitive::kPrimInt) {
5093      Register first_reg = first.AsRegister<Register>();
5094      Register out_reg = out.AsRegister<Register>();
5095      if (instruction->IsAnd()) {
5096        GenerateAndConst(out_reg, first_reg, value_low);
5097      } else if (instruction->IsOr()) {
5098        GenerateOrrConst(out_reg, first_reg, value_low);
5099      } else {
5100        DCHECK(instruction->IsXor());
5101        GenerateEorConst(out_reg, first_reg, value_low);
5102      }
5103    } else {
5104      DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
5105      uint32_t value_high = High32Bits(value);
5106      Register first_low = first.AsRegisterPairLow<Register>();
5107      Register first_high = first.AsRegisterPairHigh<Register>();
5108      Register out_low = out.AsRegisterPairLow<Register>();
5109      Register out_high = out.AsRegisterPairHigh<Register>();
5110      if (instruction->IsAnd()) {
5111        GenerateAndConst(out_low, first_low, value_low);
5112        GenerateAndConst(out_high, first_high, value_high);
5113      } else if (instruction->IsOr()) {
5114        GenerateOrrConst(out_low, first_low, value_low);
5115        GenerateOrrConst(out_high, first_high, value_high);
5116      } else {
5117        DCHECK(instruction->IsXor());
5118        GenerateEorConst(out_low, first_low, value_low);
5119        GenerateEorConst(out_high, first_high, value_high);
5120      }
5121    }
5122    return;
5123  }
5124
5125  if (instruction->GetResultType() == Primitive::kPrimInt) {
5126    Register first_reg = first.AsRegister<Register>();
5127    ShifterOperand second_reg(second.AsRegister<Register>());
5128    Register out_reg = out.AsRegister<Register>();
5129    if (instruction->IsAnd()) {
5130      __ and_(out_reg, first_reg, second_reg);
5131    } else if (instruction->IsOr()) {
5132      __ orr(out_reg, first_reg, second_reg);
5133    } else {
5134      DCHECK(instruction->IsXor());
5135      __ eor(out_reg, first_reg, second_reg);
5136    }
5137  } else {
5138    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
5139    Register first_low = first.AsRegisterPairLow<Register>();
5140    Register first_high = first.AsRegisterPairHigh<Register>();
5141    ShifterOperand second_low(second.AsRegisterPairLow<Register>());
5142    ShifterOperand second_high(second.AsRegisterPairHigh<Register>());
5143    Register out_low = out.AsRegisterPairLow<Register>();
5144    Register out_high = out.AsRegisterPairHigh<Register>();
5145    if (instruction->IsAnd()) {
5146      __ and_(out_low, first_low, second_low);
5147      __ and_(out_high, first_high, second_high);
5148    } else if (instruction->IsOr()) {
5149      __ orr(out_low, first_low, second_low);
5150      __ orr(out_high, first_high, second_high);
5151    } else {
5152      DCHECK(instruction->IsXor());
5153      __ eor(out_low, first_low, second_low);
5154      __ eor(out_high, first_high, second_high);
5155    }
5156  }
5157}
5158
5159void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
5160  // For better instruction scheduling we load the direct code pointer before the method pointer.
5161  bool direct_code_loaded = false;
5162  switch (invoke->GetCodePtrLocation()) {
5163    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
5164      if (IsSameDexFile(*invoke->GetTargetMethod().dex_file, GetGraph()->GetDexFile())) {
5165        break;
5166      }
5167      // Calls across dex files are more likely to exceed the available BL range,
5168      // so use absolute patch by falling through to kDirectCodeFixup.
5169      FALLTHROUGH_INTENDED;
5170    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
5171      // LR = code address from literal pool with link-time patch.
5172      __ LoadLiteral(LR, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
5173      direct_code_loaded = true;
5174      break;
5175    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
5176      // LR = invoke->GetDirectCodePtr();
5177      __ LoadImmediate(LR, invoke->GetDirectCodePtr());
5178      direct_code_loaded = true;
5179      break;
5180    default:
5181      break;
5182  }
5183
5184  Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
5185  switch (invoke->GetMethodLoadKind()) {
5186    case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
5187      // temp = thread->string_init_entrypoint
5188      __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, invoke->GetStringInitOffset());
5189      break;
5190    case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
5191      callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
5192      break;
5193    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
5194      __ LoadImmediate(temp.AsRegister<Register>(), invoke->GetMethodAddress());
5195      break;
5196    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
5197      __ LoadLiteral(temp.AsRegister<Register>(),
5198                     DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
5199      break;
5200    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
5201      // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
5202      FALLTHROUGH_INTENDED;
5203    case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
5204      Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
5205      Register method_reg;
5206      Register reg = temp.AsRegister<Register>();
5207      if (current_method.IsRegister()) {
5208        method_reg = current_method.AsRegister<Register>();
5209      } else {
5210        DCHECK(invoke->GetLocations()->Intrinsified());
5211        DCHECK(!current_method.IsValid());
5212        method_reg = reg;
5213        __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
5214      }
5215      // temp = current_method->dex_cache_resolved_methods_;
5216      __ LoadFromOffset(
5217          kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset(
5218              kArmPointerSize).Int32Value());
5219      // temp = temp[index_in_cache]
5220      uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
5221      __ LoadFromOffset(kLoadWord, reg, reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
5222      break;
5223    }
5224  }
5225
5226  switch (invoke->GetCodePtrLocation()) {
5227    case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
5228      __ bl(GetFrameEntryLabel());
5229      break;
5230    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
5231      if (!direct_code_loaded) {
5232        relative_call_patches_.emplace_back(invoke->GetTargetMethod());
5233        __ Bind(&relative_call_patches_.back().label);
5234        Label label;
5235        __ bl(&label);  // Arbitrarily branch to the instruction after BL, override at link time.
5236        __ Bind(&label);
5237        break;
5238      }
5239      // If we loaded the direct code above, fall through.
5240      FALLTHROUGH_INTENDED;
5241    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
5242    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
5243      // LR prepared above for better instruction scheduling.
5244      DCHECK(direct_code_loaded);
5245      // LR()
5246      __ blx(LR);
5247      break;
5248    case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
5249      // LR = callee_method->entry_point_from_quick_compiled_code_
5250      __ LoadFromOffset(
5251          kLoadWord, LR, callee_method.AsRegister<Register>(),
5252          ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value());
5253      // LR()
5254      __ blx(LR);
5255      break;
5256  }
5257
5258  DCHECK(!IsLeafMethod());
5259}
5260
5261void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
5262  Register temp = temp_location.AsRegister<Register>();
5263  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
5264      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
5265  LocationSummary* locations = invoke->GetLocations();
5266  Location receiver = locations->InAt(0);
5267  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
5268  // temp = object->GetClass();
5269  DCHECK(receiver.IsRegister());
5270  __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
5271  MaybeRecordImplicitNullCheck(invoke);
5272  __ MaybeUnpoisonHeapReference(temp);
5273  // temp = temp->GetMethodAt(method_offset);
5274  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
5275      kArmWordSize).Int32Value();
5276  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
5277  // LR = temp->GetEntryPoint();
5278  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
5279  // LR();
5280  __ blx(LR);
5281}
5282
5283void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
5284  DCHECK(linker_patches->empty());
5285  size_t size = method_patches_.size() + call_patches_.size() + relative_call_patches_.size();
5286  linker_patches->reserve(size);
5287  for (const auto& entry : method_patches_) {
5288    const MethodReference& target_method = entry.first;
5289    Literal* literal = entry.second;
5290    DCHECK(literal->GetLabel()->IsBound());
5291    uint32_t literal_offset = literal->GetLabel()->Position();
5292    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
5293                                                       target_method.dex_file,
5294                                                       target_method.dex_method_index));
5295  }
5296  for (const auto& entry : call_patches_) {
5297    const MethodReference& target_method = entry.first;
5298    Literal* literal = entry.second;
5299    DCHECK(literal->GetLabel()->IsBound());
5300    uint32_t literal_offset = literal->GetLabel()->Position();
5301    linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
5302                                                     target_method.dex_file,
5303                                                     target_method.dex_method_index));
5304  }
5305  for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
5306    uint32_t literal_offset = info.label.Position();
5307    linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
5308                                                             info.target_method.dex_file,
5309                                                             info.target_method.dex_method_index));
5310  }
5311}
5312
5313Literal* CodeGeneratorARM::DeduplicateMethodLiteral(MethodReference target_method,
5314                                                    MethodToLiteralMap* map) {
5315  // Look up the literal for target_method.
5316  auto lb = map->lower_bound(target_method);
5317  if (lb != map->end() && !map->key_comp()(target_method, lb->first)) {
5318    return lb->second;
5319  }
5320  // We don't have a literal for this method yet, insert a new one.
5321  Literal* literal = __ NewLiteral<uint32_t>(0u);
5322  map->PutBefore(lb, target_method, literal);
5323  return literal;
5324}
5325
5326Literal* CodeGeneratorARM::DeduplicateMethodAddressLiteral(MethodReference target_method) {
5327  return DeduplicateMethodLiteral(target_method, &method_patches_);
5328}
5329
5330Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_method) {
5331  return DeduplicateMethodLiteral(target_method, &call_patches_);
5332}
5333
5334void LocationsBuilderARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
5335  // Nothing to do, this should be removed during prepare for register allocator.
5336  LOG(FATAL) << "Unreachable";
5337}
5338
5339void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
5340  // Nothing to do, this should be removed during prepare for register allocator.
5341  LOG(FATAL) << "Unreachable";
5342}
5343
5344void LocationsBuilderARM::VisitFakeString(HFakeString* instruction) {
5345  DCHECK(codegen_->IsBaseline());
5346  LocationSummary* locations =
5347      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
5348  locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
5349}
5350
5351void InstructionCodeGeneratorARM::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
5352  DCHECK(codegen_->IsBaseline());
5353  // Will be generated at use site.
5354}
5355
5356// Simple implementation of packed switch - generate cascaded compare/jumps.
5357void LocationsBuilderARM::VisitPackedSwitch(HPackedSwitch* switch_instr) {
5358  LocationSummary* locations =
5359      new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
5360  locations->SetInAt(0, Location::RequiresRegister());
5361}
5362
5363void InstructionCodeGeneratorARM::VisitPackedSwitch(HPackedSwitch* switch_instr) {
5364  int32_t lower_bound = switch_instr->GetStartValue();
5365  int32_t num_entries = switch_instr->GetNumEntries();
5366  LocationSummary* locations = switch_instr->GetLocations();
5367  Register value_reg = locations->InAt(0).AsRegister<Register>();
5368  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
5369
5370  // Create a series of compare/jumps.
5371  const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
5372  for (int32_t i = 0; i < num_entries; i++) {
5373    GenerateCompareWithImmediate(value_reg, lower_bound + i);
5374    __ b(codegen_->GetLabelOf(successors[i]), EQ);
5375  }
5376
5377  // And the default for any other value.
5378  if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
5379    __ b(codegen_->GetLabelOf(default_block));
5380  }
5381}
5382
5383void CodeGeneratorARM::MoveFromReturnRegister(Location trg, Primitive::Type type) {
5384  if (!trg.IsValid()) {
5385    DCHECK(type == Primitive::kPrimVoid);
5386    return;
5387  }
5388
5389  DCHECK_NE(type, Primitive::kPrimVoid);
5390
5391  Location return_loc = InvokeDexCallingConventionVisitorARM().GetReturnLocation(type);
5392  if (return_loc.Equals(trg)) {
5393    return;
5394  }
5395
5396  // TODO: Consider pairs in the parallel move resolver, then this could be nicely merged
5397  //       with the last branch.
5398  if (type == Primitive::kPrimLong) {
5399    HParallelMove parallel_move(GetGraph()->GetArena());
5400    parallel_move.AddMove(return_loc.ToLow(), trg.ToLow(), Primitive::kPrimInt, nullptr);
5401    parallel_move.AddMove(return_loc.ToHigh(), trg.ToHigh(), Primitive::kPrimInt, nullptr);
5402    GetMoveResolver()->EmitNativeCode(&parallel_move);
5403  } else if (type == Primitive::kPrimDouble) {
5404    HParallelMove parallel_move(GetGraph()->GetArena());
5405    parallel_move.AddMove(return_loc.ToLow(), trg.ToLow(), Primitive::kPrimFloat, nullptr);
5406    parallel_move.AddMove(return_loc.ToHigh(), trg.ToHigh(), Primitive::kPrimFloat, nullptr);
5407    GetMoveResolver()->EmitNativeCode(&parallel_move);
5408  } else {
5409    // Let the parallel move resolver take care of all of this.
5410    HParallelMove parallel_move(GetGraph()->GetArena());
5411    parallel_move.AddMove(return_loc, trg, type, nullptr);
5412    GetMoveResolver()->EmitNativeCode(&parallel_move);
5413  }
5414}
5415
5416#undef __
5417#undef QUICK_ENTRY_POINT
5418
5419}  // namespace arm
5420}  // namespace art
5421