code_generator_arm.cc revision 33ad10e72438f01d11ec57695fe68194007535d2
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "compiled_method.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "gc/accounting/card_table.h"
25#include "intrinsics.h"
26#include "intrinsics_arm.h"
27#include "mirror/array-inl.h"
28#include "mirror/class-inl.h"
29#include "thread.h"
30#include "utils/arm/assembler_arm.h"
31#include "utils/arm/managed_register_arm.h"
32#include "utils/assembler.h"
33#include "utils/stack_checks.h"
34
35namespace art {
36
37namespace arm {
38
39static bool ExpectedPairLayout(Location location) {
40  // We expected this for both core and fpu register pairs.
41  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
42}
43
44static constexpr int kCurrentMethodStackOffset = 0;
45static constexpr Register kMethodRegisterArgument = R0;
46
47// We unconditionally allocate R5 to ensure we can do long operations
48// with baseline.
49static constexpr Register kCoreSavedRegisterForBaseline = R5;
50static constexpr Register kCoreCalleeSaves[] =
51    { R5, R6, R7, R8, R10, R11, LR };
52static constexpr SRegister kFpuCalleeSaves[] =
53    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
54
55// D31 cannot be split into two S registers, and the register allocator only works on
56// S registers. Therefore there is no need to block it.
57static constexpr DRegister DTMP = D31;
58
59static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
60
61#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
62#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
63
64class NullCheckSlowPathARM : public SlowPathCode {
65 public:
66  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
67
68  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
69    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
70    __ Bind(GetEntryLabel());
71    if (instruction_->CanThrowIntoCatchBlock()) {
72      // Live registers will be restored in the catch block if caught.
73      SaveLiveRegisters(codegen, instruction_->GetLocations());
74    }
75    arm_codegen->InvokeRuntime(
76        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
77  }
78
79  bool IsFatal() const OVERRIDE { return true; }
80
81  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
82
83 private:
84  HNullCheck* const instruction_;
85  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
86};
87
88class DivZeroCheckSlowPathARM : public SlowPathCode {
89 public:
90  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
91
92  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
93    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
94    __ Bind(GetEntryLabel());
95    if (instruction_->CanThrowIntoCatchBlock()) {
96      // Live registers will be restored in the catch block if caught.
97      SaveLiveRegisters(codegen, instruction_->GetLocations());
98    }
99    arm_codegen->InvokeRuntime(
100        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
101  }
102
103  bool IsFatal() const OVERRIDE { return true; }
104
105  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
106
107 private:
108  HDivZeroCheck* const instruction_;
109  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
110};
111
112class SuspendCheckSlowPathARM : public SlowPathCode {
113 public:
114  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
115      : instruction_(instruction), successor_(successor) {}
116
117  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
118    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
119    __ Bind(GetEntryLabel());
120    SaveLiveRegisters(codegen, instruction_->GetLocations());
121    arm_codegen->InvokeRuntime(
122        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
123    RestoreLiveRegisters(codegen, instruction_->GetLocations());
124    if (successor_ == nullptr) {
125      __ b(GetReturnLabel());
126    } else {
127      __ b(arm_codegen->GetLabelOf(successor_));
128    }
129  }
130
131  Label* GetReturnLabel() {
132    DCHECK(successor_ == nullptr);
133    return &return_label_;
134  }
135
136  HBasicBlock* GetSuccessor() const {
137    return successor_;
138  }
139
140  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; }
141
142 private:
143  HSuspendCheck* const instruction_;
144  // If not null, the block to branch to after the suspend check.
145  HBasicBlock* const successor_;
146
147  // If `successor_` is null, the label to branch to after the suspend check.
148  Label return_label_;
149
150  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
151};
152
153class BoundsCheckSlowPathARM : public SlowPathCode {
154 public:
155  explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction)
156      : instruction_(instruction) {}
157
158  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
159    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
160    LocationSummary* locations = instruction_->GetLocations();
161
162    __ Bind(GetEntryLabel());
163    if (instruction_->CanThrowIntoCatchBlock()) {
164      // Live registers will be restored in the catch block if caught.
165      SaveLiveRegisters(codegen, instruction_->GetLocations());
166    }
167    // We're moving two locations to locations that could overlap, so we need a parallel
168    // move resolver.
169    InvokeRuntimeCallingConvention calling_convention;
170    codegen->EmitParallelMoves(
171        locations->InAt(0),
172        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
173        Primitive::kPrimInt,
174        locations->InAt(1),
175        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
176        Primitive::kPrimInt);
177    arm_codegen->InvokeRuntime(
178        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
179  }
180
181  bool IsFatal() const OVERRIDE { return true; }
182
183  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
184
185 private:
186  HBoundsCheck* const instruction_;
187
188  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
189};
190
191class LoadClassSlowPathARM : public SlowPathCode {
192 public:
193  LoadClassSlowPathARM(HLoadClass* cls,
194                       HInstruction* at,
195                       uint32_t dex_pc,
196                       bool do_clinit)
197      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
198    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
199  }
200
201  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
202    LocationSummary* locations = at_->GetLocations();
203
204    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
205    __ Bind(GetEntryLabel());
206    SaveLiveRegisters(codegen, locations);
207
208    InvokeRuntimeCallingConvention calling_convention;
209    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
210    int32_t entry_point_offset = do_clinit_
211        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
212        : QUICK_ENTRY_POINT(pInitializeType);
213    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
214
215    // Move the class to the desired location.
216    Location out = locations->Out();
217    if (out.IsValid()) {
218      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
219      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
220    }
221    RestoreLiveRegisters(codegen, locations);
222    __ b(GetExitLabel());
223  }
224
225  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM"; }
226
227 private:
228  // The class this slow path will load.
229  HLoadClass* const cls_;
230
231  // The instruction where this slow path is happening.
232  // (Might be the load class or an initialization check).
233  HInstruction* const at_;
234
235  // The dex PC of `at_`.
236  const uint32_t dex_pc_;
237
238  // Whether to initialize the class.
239  const bool do_clinit_;
240
241  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
242};
243
244class LoadStringSlowPathARM : public SlowPathCode {
245 public:
246  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
247
248  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
249    LocationSummary* locations = instruction_->GetLocations();
250    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
251
252    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
253    __ Bind(GetEntryLabel());
254    SaveLiveRegisters(codegen, locations);
255
256    InvokeRuntimeCallingConvention calling_convention;
257    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
258    arm_codegen->InvokeRuntime(
259        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
260    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
261
262    RestoreLiveRegisters(codegen, locations);
263    __ b(GetExitLabel());
264  }
265
266  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
267
268 private:
269  HLoadString* const instruction_;
270
271  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
272};
273
274class TypeCheckSlowPathARM : public SlowPathCode {
275 public:
276  TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal)
277      : instruction_(instruction), is_fatal_(is_fatal) {}
278
279  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
280    LocationSummary* locations = instruction_->GetLocations();
281    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
282                                                        : locations->Out();
283    DCHECK(instruction_->IsCheckCast()
284           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
285
286    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
287    __ Bind(GetEntryLabel());
288
289    if (instruction_->IsCheckCast()) {
290      // The codegen for the instruction overwrites `temp`, so put it back in place.
291      Register obj = locations->InAt(0).AsRegister<Register>();
292      Register temp = locations->GetTemp(0).AsRegister<Register>();
293      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
294      __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
295      __ MaybeUnpoisonHeapReference(temp);
296    }
297
298    if (!is_fatal_) {
299      SaveLiveRegisters(codegen, locations);
300    }
301
302    // We're moving two locations to locations that could overlap, so we need a parallel
303    // move resolver.
304    InvokeRuntimeCallingConvention calling_convention;
305    codegen->EmitParallelMoves(
306        locations->InAt(1),
307        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
308        Primitive::kPrimNot,
309        object_class,
310        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
311        Primitive::kPrimNot);
312
313    if (instruction_->IsInstanceOf()) {
314      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
315                                 instruction_,
316                                 instruction_->GetDexPc(),
317                                 this);
318      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
319    } else {
320      DCHECK(instruction_->IsCheckCast());
321      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
322                                 instruction_,
323                                 instruction_->GetDexPc(),
324                                 this);
325    }
326
327    if (!is_fatal_) {
328      RestoreLiveRegisters(codegen, locations);
329      __ b(GetExitLabel());
330    }
331  }
332
333  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; }
334
335  bool IsFatal() const OVERRIDE { return is_fatal_; }
336
337 private:
338  HInstruction* const instruction_;
339  const bool is_fatal_;
340
341  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
342};
343
344class DeoptimizationSlowPathARM : public SlowPathCode {
345 public:
346  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
347    : instruction_(instruction) {}
348
349  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
350    __ Bind(GetEntryLabel());
351    SaveLiveRegisters(codegen, instruction_->GetLocations());
352    DCHECK(instruction_->IsDeoptimize());
353    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
354    uint32_t dex_pc = deoptimize->GetDexPc();
355    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
356    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
357  }
358
359  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
360
361 private:
362  HInstruction* const instruction_;
363  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
364};
365
366class ArraySetSlowPathARM : public SlowPathCode {
367 public:
368  explicit ArraySetSlowPathARM(HInstruction* instruction) : instruction_(instruction) {}
369
370  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
371    LocationSummary* locations = instruction_->GetLocations();
372    __ Bind(GetEntryLabel());
373    SaveLiveRegisters(codegen, locations);
374
375    InvokeRuntimeCallingConvention calling_convention;
376    HParallelMove parallel_move(codegen->GetGraph()->GetArena());
377    parallel_move.AddMove(
378        locations->InAt(0),
379        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
380        Primitive::kPrimNot,
381        nullptr);
382    parallel_move.AddMove(
383        locations->InAt(1),
384        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
385        Primitive::kPrimInt,
386        nullptr);
387    parallel_move.AddMove(
388        locations->InAt(2),
389        Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
390        Primitive::kPrimNot,
391        nullptr);
392    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
393
394    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
395    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
396                               instruction_,
397                               instruction_->GetDexPc(),
398                               this);
399    RestoreLiveRegisters(codegen, locations);
400    __ b(GetExitLabel());
401  }
402
403  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM"; }
404
405 private:
406  HInstruction* const instruction_;
407
408  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM);
409};
410
411#undef __
412#define __ down_cast<ArmAssembler*>(GetAssembler())->
413
414inline Condition ARMCondition(IfCondition cond) {
415  switch (cond) {
416    case kCondEQ: return EQ;
417    case kCondNE: return NE;
418    case kCondLT: return LT;
419    case kCondLE: return LE;
420    case kCondGT: return GT;
421    case kCondGE: return GE;
422    case kCondB:  return LO;
423    case kCondBE: return LS;
424    case kCondA:  return HI;
425    case kCondAE: return HS;
426  }
427  LOG(FATAL) << "Unreachable";
428  UNREACHABLE();
429}
430
431// Maps signed condition to unsigned condition.
432inline Condition ARMUnsignedCondition(IfCondition cond) {
433  switch (cond) {
434    case kCondEQ: return EQ;
435    case kCondNE: return NE;
436    // Signed to unsigned.
437    case kCondLT: return LO;
438    case kCondLE: return LS;
439    case kCondGT: return HI;
440    case kCondGE: return HS;
441    // Unsigned remain unchanged.
442    case kCondB:  return LO;
443    case kCondBE: return LS;
444    case kCondA:  return HI;
445    case kCondAE: return HS;
446  }
447  LOG(FATAL) << "Unreachable";
448  UNREACHABLE();
449}
450
451void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
452  stream << Register(reg);
453}
454
455void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
456  stream << SRegister(reg);
457}
458
459size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
460  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
461  return kArmWordSize;
462}
463
464size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
465  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
466  return kArmWordSize;
467}
468
469size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
470  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
471  return kArmWordSize;
472}
473
474size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
475  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
476  return kArmWordSize;
477}
478
479CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
480                                   const ArmInstructionSetFeatures& isa_features,
481                                   const CompilerOptions& compiler_options,
482                                   OptimizingCompilerStats* stats)
483    : CodeGenerator(graph,
484                    kNumberOfCoreRegisters,
485                    kNumberOfSRegisters,
486                    kNumberOfRegisterPairs,
487                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
488                                        arraysize(kCoreCalleeSaves)),
489                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
490                                        arraysize(kFpuCalleeSaves)),
491                    compiler_options,
492                    stats),
493      block_labels_(nullptr),
494      location_builder_(graph, this),
495      instruction_visitor_(graph, this),
496      move_resolver_(graph->GetArena(), this),
497      assembler_(),
498      isa_features_(isa_features),
499      method_patches_(MethodReferenceComparator(),
500                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
501      call_patches_(MethodReferenceComparator(),
502                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
503      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
504  // Always save the LR register to mimic Quick.
505  AddAllocatedRegister(Location::RegisterLocation(LR));
506}
507
508void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
509  // Ensure that we fix up branches and literal loads and emit the literal pool.
510  __ FinalizeCode();
511
512  // Adjust native pc offsets in stack maps.
513  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
514    uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
515    uint32_t new_position = __ GetAdjustedPosition(old_position);
516    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
517  }
518  // Adjust pc offsets for the disassembly information.
519  if (disasm_info_ != nullptr) {
520    GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
521    frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
522    frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
523    for (auto& it : *disasm_info_->GetInstructionIntervals()) {
524      it.second.start = __ GetAdjustedPosition(it.second.start);
525      it.second.end = __ GetAdjustedPosition(it.second.end);
526    }
527    for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
528      it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
529      it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
530    }
531  }
532
533  CodeGenerator::Finalize(allocator);
534}
535
536Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
537  switch (type) {
538    case Primitive::kPrimLong: {
539      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
540      ArmManagedRegister pair =
541          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
542      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
543      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
544
545      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
546      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
547      UpdateBlockedPairRegisters();
548      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
549    }
550
551    case Primitive::kPrimByte:
552    case Primitive::kPrimBoolean:
553    case Primitive::kPrimChar:
554    case Primitive::kPrimShort:
555    case Primitive::kPrimInt:
556    case Primitive::kPrimNot: {
557      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
558      // Block all register pairs that contain `reg`.
559      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
560        ArmManagedRegister current =
561            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
562        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
563          blocked_register_pairs_[i] = true;
564        }
565      }
566      return Location::RegisterLocation(reg);
567    }
568
569    case Primitive::kPrimFloat: {
570      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
571      return Location::FpuRegisterLocation(reg);
572    }
573
574    case Primitive::kPrimDouble: {
575      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
576      DCHECK_EQ(reg % 2, 0);
577      return Location::FpuRegisterPairLocation(reg, reg + 1);
578    }
579
580    case Primitive::kPrimVoid:
581      LOG(FATAL) << "Unreachable type " << type;
582  }
583
584  return Location();
585}
586
587void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
588  // Don't allocate the dalvik style register pair passing.
589  blocked_register_pairs_[R1_R2] = true;
590
591  // Stack register, LR and PC are always reserved.
592  blocked_core_registers_[SP] = true;
593  blocked_core_registers_[LR] = true;
594  blocked_core_registers_[PC] = true;
595
596  // Reserve thread register.
597  blocked_core_registers_[TR] = true;
598
599  // Reserve temp register.
600  blocked_core_registers_[IP] = true;
601
602  if (is_baseline) {
603    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
604      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
605    }
606
607    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
608  }
609
610  if (is_baseline || GetGraph()->IsDebuggable()) {
611    // Stubs do not save callee-save floating point registers. If the graph
612    // is debuggable, we need to deal with these registers differently. For
613    // now, just block them.
614    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
615      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
616    }
617  }
618
619  UpdateBlockedPairRegisters();
620}
621
622void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
623  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
624    ArmManagedRegister current =
625        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
626    if (blocked_core_registers_[current.AsRegisterPairLow()]
627        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
628      blocked_register_pairs_[i] = true;
629    }
630  }
631}
632
633InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
634      : HGraphVisitor(graph),
635        assembler_(codegen->GetAssembler()),
636        codegen_(codegen) {}
637
638void CodeGeneratorARM::ComputeSpillMask() {
639  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
640  // Save one extra register for baseline. Note that on thumb2, there is no easy
641  // instruction to restore just the PC, so this actually helps both baseline
642  // and non-baseline to save and restore at least two registers at entry and exit.
643  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
644  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
645  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
646  // We use vpush and vpop for saving and restoring floating point registers, which take
647  // a SRegister and the number of registers to save/restore after that SRegister. We
648  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
649  // but in the range.
650  if (fpu_spill_mask_ != 0) {
651    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
652    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
653    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
654      fpu_spill_mask_ |= (1 << i);
655    }
656  }
657}
658
659static dwarf::Reg DWARFReg(Register reg) {
660  return dwarf::Reg::ArmCore(static_cast<int>(reg));
661}
662
663static dwarf::Reg DWARFReg(SRegister reg) {
664  return dwarf::Reg::ArmFp(static_cast<int>(reg));
665}
666
667void CodeGeneratorARM::GenerateFrameEntry() {
668  bool skip_overflow_check =
669      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
670  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
671  __ Bind(&frame_entry_label_);
672
673  if (HasEmptyFrame()) {
674    return;
675  }
676
677  if (!skip_overflow_check) {
678    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
679    __ LoadFromOffset(kLoadWord, IP, IP, 0);
680    RecordPcInfo(nullptr, 0);
681  }
682
683  __ PushList(core_spill_mask_);
684  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
685  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, core_spill_mask_, kArmWordSize);
686  if (fpu_spill_mask_ != 0) {
687    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
688    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
689    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
690    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
691  }
692  int adjust = GetFrameSize() - FrameEntrySpillSize();
693  __ AddConstant(SP, -adjust);
694  __ cfi().AdjustCFAOffset(adjust);
695  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
696}
697
698void CodeGeneratorARM::GenerateFrameExit() {
699  if (HasEmptyFrame()) {
700    __ bx(LR);
701    return;
702  }
703  __ cfi().RememberState();
704  int adjust = GetFrameSize() - FrameEntrySpillSize();
705  __ AddConstant(SP, adjust);
706  __ cfi().AdjustCFAOffset(-adjust);
707  if (fpu_spill_mask_ != 0) {
708    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
709    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
710    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
711    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
712  }
713  // Pop LR into PC to return.
714  DCHECK_NE(core_spill_mask_ & (1 << LR), 0U);
715  uint32_t pop_mask = (core_spill_mask_ & (~(1 << LR))) | 1 << PC;
716  __ PopList(pop_mask);
717  __ cfi().RestoreState();
718  __ cfi().DefCFAOffset(GetFrameSize());
719}
720
721void CodeGeneratorARM::Bind(HBasicBlock* block) {
722  Label* label = GetLabelOf(block);
723  __ BindTrackedLabel(label);
724}
725
726Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
727  switch (load->GetType()) {
728    case Primitive::kPrimLong:
729    case Primitive::kPrimDouble:
730      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
731
732    case Primitive::kPrimInt:
733    case Primitive::kPrimNot:
734    case Primitive::kPrimFloat:
735      return Location::StackSlot(GetStackSlot(load->GetLocal()));
736
737    case Primitive::kPrimBoolean:
738    case Primitive::kPrimByte:
739    case Primitive::kPrimChar:
740    case Primitive::kPrimShort:
741    case Primitive::kPrimVoid:
742      LOG(FATAL) << "Unexpected type " << load->GetType();
743      UNREACHABLE();
744  }
745
746  LOG(FATAL) << "Unreachable";
747  UNREACHABLE();
748}
749
750Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
751  switch (type) {
752    case Primitive::kPrimBoolean:
753    case Primitive::kPrimByte:
754    case Primitive::kPrimChar:
755    case Primitive::kPrimShort:
756    case Primitive::kPrimInt:
757    case Primitive::kPrimNot: {
758      uint32_t index = gp_index_++;
759      uint32_t stack_index = stack_index_++;
760      if (index < calling_convention.GetNumberOfRegisters()) {
761        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
762      } else {
763        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
764      }
765    }
766
767    case Primitive::kPrimLong: {
768      uint32_t index = gp_index_;
769      uint32_t stack_index = stack_index_;
770      gp_index_ += 2;
771      stack_index_ += 2;
772      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
773        if (calling_convention.GetRegisterAt(index) == R1) {
774          // Skip R1, and use R2_R3 instead.
775          gp_index_++;
776          index++;
777        }
778      }
779      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
780        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
781                  calling_convention.GetRegisterAt(index + 1));
782
783        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
784                                              calling_convention.GetRegisterAt(index + 1));
785      } else {
786        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
787      }
788    }
789
790    case Primitive::kPrimFloat: {
791      uint32_t stack_index = stack_index_++;
792      if (float_index_ % 2 == 0) {
793        float_index_ = std::max(double_index_, float_index_);
794      }
795      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
796        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
797      } else {
798        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
799      }
800    }
801
802    case Primitive::kPrimDouble: {
803      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
804      uint32_t stack_index = stack_index_;
805      stack_index_ += 2;
806      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
807        uint32_t index = double_index_;
808        double_index_ += 2;
809        Location result = Location::FpuRegisterPairLocation(
810          calling_convention.GetFpuRegisterAt(index),
811          calling_convention.GetFpuRegisterAt(index + 1));
812        DCHECK(ExpectedPairLayout(result));
813        return result;
814      } else {
815        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
816      }
817    }
818
819    case Primitive::kPrimVoid:
820      LOG(FATAL) << "Unexpected parameter type " << type;
821      break;
822  }
823  return Location();
824}
825
826Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
827  switch (type) {
828    case Primitive::kPrimBoolean:
829    case Primitive::kPrimByte:
830    case Primitive::kPrimChar:
831    case Primitive::kPrimShort:
832    case Primitive::kPrimInt:
833    case Primitive::kPrimNot: {
834      return Location::RegisterLocation(R0);
835    }
836
837    case Primitive::kPrimFloat: {
838      return Location::FpuRegisterLocation(S0);
839    }
840
841    case Primitive::kPrimLong: {
842      return Location::RegisterPairLocation(R0, R1);
843    }
844
845    case Primitive::kPrimDouble: {
846      return Location::FpuRegisterPairLocation(S0, S1);
847    }
848
849    case Primitive::kPrimVoid:
850      return Location();
851  }
852
853  UNREACHABLE();
854}
855
856Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
857  return Location::RegisterLocation(kMethodRegisterArgument);
858}
859
860void CodeGeneratorARM::Move32(Location destination, Location source) {
861  if (source.Equals(destination)) {
862    return;
863  }
864  if (destination.IsRegister()) {
865    if (source.IsRegister()) {
866      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
867    } else if (source.IsFpuRegister()) {
868      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
869    } else {
870      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
871    }
872  } else if (destination.IsFpuRegister()) {
873    if (source.IsRegister()) {
874      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
875    } else if (source.IsFpuRegister()) {
876      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
877    } else {
878      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
879    }
880  } else {
881    DCHECK(destination.IsStackSlot()) << destination;
882    if (source.IsRegister()) {
883      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
884    } else if (source.IsFpuRegister()) {
885      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
886    } else {
887      DCHECK(source.IsStackSlot()) << source;
888      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
889      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
890    }
891  }
892}
893
894void CodeGeneratorARM::Move64(Location destination, Location source) {
895  if (source.Equals(destination)) {
896    return;
897  }
898  if (destination.IsRegisterPair()) {
899    if (source.IsRegisterPair()) {
900      EmitParallelMoves(
901          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
902          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
903          Primitive::kPrimInt,
904          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
905          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
906          Primitive::kPrimInt);
907    } else if (source.IsFpuRegister()) {
908      UNIMPLEMENTED(FATAL);
909    } else if (source.IsFpuRegisterPair()) {
910      __ vmovrrd(destination.AsRegisterPairLow<Register>(),
911                 destination.AsRegisterPairHigh<Register>(),
912                 FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
913    } else {
914      DCHECK(source.IsDoubleStackSlot());
915      DCHECK(ExpectedPairLayout(destination));
916      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
917                        SP, source.GetStackIndex());
918    }
919  } else if (destination.IsFpuRegisterPair()) {
920    if (source.IsDoubleStackSlot()) {
921      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
922                         SP,
923                         source.GetStackIndex());
924    } else if (source.IsRegisterPair()) {
925      __ vmovdrr(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
926                 source.AsRegisterPairLow<Register>(),
927                 source.AsRegisterPairHigh<Register>());
928    } else {
929      UNIMPLEMENTED(FATAL);
930    }
931  } else {
932    DCHECK(destination.IsDoubleStackSlot());
933    if (source.IsRegisterPair()) {
934      // No conflict possible, so just do the moves.
935      if (source.AsRegisterPairLow<Register>() == R1) {
936        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
937        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
938        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
939      } else {
940        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
941                         SP, destination.GetStackIndex());
942      }
943    } else if (source.IsFpuRegisterPair()) {
944      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
945                        SP,
946                        destination.GetStackIndex());
947    } else {
948      DCHECK(source.IsDoubleStackSlot());
949      EmitParallelMoves(
950          Location::StackSlot(source.GetStackIndex()),
951          Location::StackSlot(destination.GetStackIndex()),
952          Primitive::kPrimInt,
953          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
954          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
955          Primitive::kPrimInt);
956    }
957  }
958}
959
960void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
961  LocationSummary* locations = instruction->GetLocations();
962  if (instruction->IsCurrentMethod()) {
963    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
964  } else if (locations != nullptr && locations->Out().Equals(location)) {
965    return;
966  } else if (locations != nullptr && locations->Out().IsConstant()) {
967    HConstant* const_to_move = locations->Out().GetConstant();
968    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
969      int32_t value = GetInt32ValueOf(const_to_move);
970      if (location.IsRegister()) {
971        __ LoadImmediate(location.AsRegister<Register>(), value);
972      } else {
973        DCHECK(location.IsStackSlot());
974        __ LoadImmediate(IP, value);
975        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
976      }
977    } else {
978      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
979      int64_t value = const_to_move->AsLongConstant()->GetValue();
980      if (location.IsRegisterPair()) {
981        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
982        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
983      } else {
984        DCHECK(location.IsDoubleStackSlot());
985        __ LoadImmediate(IP, Low32Bits(value));
986        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
987        __ LoadImmediate(IP, High32Bits(value));
988        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
989      }
990    }
991  } else if (instruction->IsLoadLocal()) {
992    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
993    switch (instruction->GetType()) {
994      case Primitive::kPrimBoolean:
995      case Primitive::kPrimByte:
996      case Primitive::kPrimChar:
997      case Primitive::kPrimShort:
998      case Primitive::kPrimInt:
999      case Primitive::kPrimNot:
1000      case Primitive::kPrimFloat:
1001        Move32(location, Location::StackSlot(stack_slot));
1002        break;
1003
1004      case Primitive::kPrimLong:
1005      case Primitive::kPrimDouble:
1006        Move64(location, Location::DoubleStackSlot(stack_slot));
1007        break;
1008
1009      default:
1010        LOG(FATAL) << "Unexpected type " << instruction->GetType();
1011    }
1012  } else if (instruction->IsTemporary()) {
1013    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
1014    if (temp_location.IsStackSlot()) {
1015      Move32(location, temp_location);
1016    } else {
1017      DCHECK(temp_location.IsDoubleStackSlot());
1018      Move64(location, temp_location);
1019    }
1020  } else {
1021    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
1022    switch (instruction->GetType()) {
1023      case Primitive::kPrimBoolean:
1024      case Primitive::kPrimByte:
1025      case Primitive::kPrimChar:
1026      case Primitive::kPrimShort:
1027      case Primitive::kPrimNot:
1028      case Primitive::kPrimInt:
1029      case Primitive::kPrimFloat:
1030        Move32(location, locations->Out());
1031        break;
1032
1033      case Primitive::kPrimLong:
1034      case Primitive::kPrimDouble:
1035        Move64(location, locations->Out());
1036        break;
1037
1038      default:
1039        LOG(FATAL) << "Unexpected type " << instruction->GetType();
1040    }
1041  }
1042}
1043
1044void CodeGeneratorARM::MoveConstant(Location location, int32_t value) {
1045  DCHECK(location.IsRegister());
1046  __ LoadImmediate(location.AsRegister<Register>(), value);
1047}
1048
1049void CodeGeneratorARM::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
1050  if (Primitive::Is64BitType(dst_type)) {
1051    Move64(dst, src);
1052  } else {
1053    Move32(dst, src);
1054  }
1055}
1056
1057void CodeGeneratorARM::AddLocationAsTemp(Location location, LocationSummary* locations) {
1058  if (location.IsRegister()) {
1059    locations->AddTemp(location);
1060  } else if (location.IsRegisterPair()) {
1061    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
1062    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
1063  } else {
1064    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
1065  }
1066}
1067
1068void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
1069                                     HInstruction* instruction,
1070                                     uint32_t dex_pc,
1071                                     SlowPathCode* slow_path) {
1072  InvokeRuntime(GetThreadOffset<kArmWordSize>(entrypoint).Int32Value(),
1073                instruction,
1074                dex_pc,
1075                slow_path);
1076}
1077
1078void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
1079                                     HInstruction* instruction,
1080                                     uint32_t dex_pc,
1081                                     SlowPathCode* slow_path) {
1082  ValidateInvokeRuntime(instruction, slow_path);
1083  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
1084  __ blx(LR);
1085  RecordPcInfo(instruction, dex_pc, slow_path);
1086}
1087
1088void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) {
1089  DCHECK(!successor->IsExitBlock());
1090
1091  HBasicBlock* block = got->GetBlock();
1092  HInstruction* previous = got->GetPrevious();
1093
1094  HLoopInformation* info = block->GetLoopInformation();
1095  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1096    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1097    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1098    return;
1099  }
1100
1101  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1102    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1103  }
1104  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
1105    __ b(codegen_->GetLabelOf(successor));
1106  }
1107}
1108
1109void LocationsBuilderARM::VisitGoto(HGoto* got) {
1110  got->SetLocations(nullptr);
1111}
1112
1113void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
1114  HandleGoto(got, got->GetSuccessor());
1115}
1116
1117void LocationsBuilderARM::VisitTryBoundary(HTryBoundary* try_boundary) {
1118  try_boundary->SetLocations(nullptr);
1119}
1120
1121void InstructionCodeGeneratorARM::VisitTryBoundary(HTryBoundary* try_boundary) {
1122  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1123  if (!successor->IsExitBlock()) {
1124    HandleGoto(try_boundary, successor);
1125  }
1126}
1127
1128void LocationsBuilderARM::VisitExit(HExit* exit) {
1129  exit->SetLocations(nullptr);
1130}
1131
1132void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1133}
1134
1135void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
1136  ShifterOperand operand;
1137  if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, right, &operand)) {
1138    __ cmp(left, operand);
1139  } else {
1140    Register temp = IP;
1141    __ LoadImmediate(temp, right);
1142    __ cmp(left, ShifterOperand(temp));
1143  }
1144}
1145
1146void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
1147                                                  Label* true_label,
1148                                                  Label* false_label) {
1149  __ vmstat();  // transfer FP status register to ARM APSR.
1150  // TODO: merge into a single branch (except "equal or unordered" and "not equal")
1151  if (cond->IsFPConditionTrueIfNaN()) {
1152    __ b(true_label, VS);  // VS for unordered.
1153  } else if (cond->IsFPConditionFalseIfNaN()) {
1154    __ b(false_label, VS);  // VS for unordered.
1155  }
1156  __ b(true_label, ARMCondition(cond->GetCondition()));
1157}
1158
1159void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
1160                                                               Label* true_label,
1161                                                               Label* false_label) {
1162  LocationSummary* locations = cond->GetLocations();
1163  Location left = locations->InAt(0);
1164  Location right = locations->InAt(1);
1165  IfCondition if_cond = cond->GetCondition();
1166
1167  Register left_high = left.AsRegisterPairHigh<Register>();
1168  Register left_low = left.AsRegisterPairLow<Register>();
1169  IfCondition true_high_cond = if_cond;
1170  IfCondition false_high_cond = cond->GetOppositeCondition();
1171  Condition final_condition = ARMUnsignedCondition(if_cond);  // unsigned on lower part
1172
1173  // Set the conditions for the test, remembering that == needs to be
1174  // decided using the low words.
1175  // TODO: consider avoiding jumps with temporary and CMP low+SBC high
1176  switch (if_cond) {
1177    case kCondEQ:
1178    case kCondNE:
1179      // Nothing to do.
1180      break;
1181    case kCondLT:
1182      false_high_cond = kCondGT;
1183      break;
1184    case kCondLE:
1185      true_high_cond = kCondLT;
1186      break;
1187    case kCondGT:
1188      false_high_cond = kCondLT;
1189      break;
1190    case kCondGE:
1191      true_high_cond = kCondGT;
1192      break;
1193    case kCondB:
1194      false_high_cond = kCondA;
1195      break;
1196    case kCondBE:
1197      true_high_cond = kCondB;
1198      break;
1199    case kCondA:
1200      false_high_cond = kCondB;
1201      break;
1202    case kCondAE:
1203      true_high_cond = kCondA;
1204      break;
1205  }
1206  if (right.IsConstant()) {
1207    int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
1208    int32_t val_low = Low32Bits(value);
1209    int32_t val_high = High32Bits(value);
1210
1211    GenerateCompareWithImmediate(left_high, val_high);
1212    if (if_cond == kCondNE) {
1213      __ b(true_label, ARMCondition(true_high_cond));
1214    } else if (if_cond == kCondEQ) {
1215      __ b(false_label, ARMCondition(false_high_cond));
1216    } else {
1217      __ b(true_label, ARMCondition(true_high_cond));
1218      __ b(false_label, ARMCondition(false_high_cond));
1219    }
1220    // Must be equal high, so compare the lows.
1221    GenerateCompareWithImmediate(left_low, val_low);
1222  } else {
1223    Register right_high = right.AsRegisterPairHigh<Register>();
1224    Register right_low = right.AsRegisterPairLow<Register>();
1225
1226    __ cmp(left_high, ShifterOperand(right_high));
1227    if (if_cond == kCondNE) {
1228      __ b(true_label, ARMCondition(true_high_cond));
1229    } else if (if_cond == kCondEQ) {
1230      __ b(false_label, ARMCondition(false_high_cond));
1231    } else {
1232      __ b(true_label, ARMCondition(true_high_cond));
1233      __ b(false_label, ARMCondition(false_high_cond));
1234    }
1235    // Must be equal high, so compare the lows.
1236    __ cmp(left_low, ShifterOperand(right_low));
1237  }
1238  // The last comparison might be unsigned.
1239  // TODO: optimize cases where this is always true/false
1240  __ b(true_label, final_condition);
1241}
1242
1243void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HIf* if_instr,
1244                                                               HCondition* condition,
1245                                                               Label* true_target,
1246                                                               Label* false_target,
1247                                                               Label* always_true_target) {
1248  LocationSummary* locations = condition->GetLocations();
1249  Location left = locations->InAt(0);
1250  Location right = locations->InAt(1);
1251
1252  // We don't want true_target as a nullptr.
1253  if (true_target == nullptr) {
1254    true_target = always_true_target;
1255  }
1256  bool falls_through = (false_target == nullptr);
1257
1258  // FP compares don't like null false_targets.
1259  if (false_target == nullptr) {
1260    false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1261  }
1262
1263  Primitive::Type type = condition->InputAt(0)->GetType();
1264  switch (type) {
1265    case Primitive::kPrimLong:
1266      GenerateLongComparesAndJumps(condition, true_target, false_target);
1267      break;
1268    case Primitive::kPrimFloat:
1269      __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
1270      GenerateFPJumps(condition, true_target, false_target);
1271      break;
1272    case Primitive::kPrimDouble:
1273      __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
1274               FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
1275      GenerateFPJumps(condition, true_target, false_target);
1276      break;
1277    default:
1278      LOG(FATAL) << "Unexpected compare type " << type;
1279  }
1280
1281  if (!falls_through) {
1282    __ b(false_target);
1283  }
1284}
1285
1286void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
1287                                                        Label* true_target,
1288                                                        Label* false_target,
1289                                                        Label* always_true_target) {
1290  HInstruction* cond = instruction->InputAt(0);
1291  if (cond->IsIntConstant()) {
1292    // Constant condition, statically compared against 1.
1293    int32_t cond_value = cond->AsIntConstant()->GetValue();
1294    if (cond_value == 1) {
1295      if (always_true_target != nullptr) {
1296        __ b(always_true_target);
1297      }
1298      return;
1299    } else {
1300      DCHECK_EQ(cond_value, 0);
1301    }
1302  } else {
1303    // Can we optimize the jump if we know that the next block is the true case?
1304    HCondition* condition = cond->AsCondition();
1305    bool can_jump_to_false = CanReverseCondition(always_true_target, false_target, condition);
1306    if (condition == nullptr || condition->NeedsMaterialization()) {
1307      // Condition has been materialized, compare the output to 0.
1308      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
1309      if (can_jump_to_false) {
1310        __ CompareAndBranchIfZero(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
1311                                  false_target);
1312        return;
1313      }
1314      __ CompareAndBranchIfNonZero(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
1315                                   true_target);
1316    } else {
1317      // Condition has not been materialized, use its inputs as the
1318      // comparison and its condition as the branch condition.
1319      Primitive::Type type = (condition != nullptr)
1320          ? cond->InputAt(0)->GetType()
1321          : Primitive::kPrimInt;
1322      // Is this a long or FP comparison that has been folded into the HCondition?
1323      if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
1324        // Generate the comparison directly.
1325        GenerateCompareTestAndBranch(instruction->AsIf(), condition,
1326                                     true_target, false_target, always_true_target);
1327        return;
1328      }
1329
1330      LocationSummary* locations = cond->GetLocations();
1331      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
1332      Register left = locations->InAt(0).AsRegister<Register>();
1333      Location right = locations->InAt(1);
1334      if (right.IsRegister()) {
1335        __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
1336      } else {
1337        DCHECK(right.IsConstant());
1338        GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
1339      }
1340      if (can_jump_to_false) {
1341        __ b(false_target, ARMCondition(condition->GetOppositeCondition()));
1342        return;
1343      }
1344
1345      __ b(true_target, ARMCondition(condition->GetCondition()));
1346    }
1347  }
1348  if (false_target != nullptr) {
1349    __ b(false_target);
1350  }
1351}
1352
1353void LocationsBuilderARM::VisitIf(HIf* if_instr) {
1354  LocationSummary* locations =
1355      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
1356  HInstruction* cond = if_instr->InputAt(0);
1357  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1358    locations->SetInAt(0, Location::RequiresRegister());
1359  }
1360}
1361
1362void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1363  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1364  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1365  Label* always_true_target = true_target;
1366  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1367                                if_instr->IfTrueSuccessor())) {
1368    always_true_target = nullptr;
1369  }
1370  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1371                                if_instr->IfFalseSuccessor())) {
1372    false_target = nullptr;
1373  }
1374  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1375}
1376
1377void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1378  LocationSummary* locations = new (GetGraph()->GetArena())
1379      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1380  HInstruction* cond = deoptimize->InputAt(0);
1381  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1382    locations->SetInAt(0, Location::RequiresRegister());
1383  }
1384}
1385
1386void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1387  SlowPathCode* slow_path = new (GetGraph()->GetArena())
1388      DeoptimizationSlowPathARM(deoptimize);
1389  codegen_->AddSlowPath(slow_path);
1390  Label* slow_path_entry = slow_path->GetEntryLabel();
1391  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1392}
1393
1394void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1395  LocationSummary* locations =
1396      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1397  // Handle the long/FP comparisons made in instruction simplification.
1398  switch (cond->InputAt(0)->GetType()) {
1399    case Primitive::kPrimLong:
1400      locations->SetInAt(0, Location::RequiresRegister());
1401      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1402      if (cond->NeedsMaterialization()) {
1403        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1404      }
1405      break;
1406
1407    case Primitive::kPrimFloat:
1408    case Primitive::kPrimDouble:
1409      locations->SetInAt(0, Location::RequiresFpuRegister());
1410      locations->SetInAt(1, Location::RequiresFpuRegister());
1411      if (cond->NeedsMaterialization()) {
1412        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1413      }
1414      break;
1415
1416    default:
1417      locations->SetInAt(0, Location::RequiresRegister());
1418      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1419      if (cond->NeedsMaterialization()) {
1420        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1421      }
1422  }
1423}
1424
1425void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1426  if (!cond->NeedsMaterialization()) {
1427    return;
1428  }
1429
1430  LocationSummary* locations = cond->GetLocations();
1431  Location left = locations->InAt(0);
1432  Location right = locations->InAt(1);
1433  Register out = locations->Out().AsRegister<Register>();
1434  Label true_label, false_label;
1435
1436  switch (cond->InputAt(0)->GetType()) {
1437    default: {
1438      // Integer case.
1439      if (right.IsRegister()) {
1440        __ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
1441      } else {
1442        DCHECK(right.IsConstant());
1443        GenerateCompareWithImmediate(left.AsRegister<Register>(),
1444                                     CodeGenerator::GetInt32ValueOf(right.GetConstant()));
1445      }
1446      __ it(ARMCondition(cond->GetCondition()), kItElse);
1447      __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1448             ARMCondition(cond->GetCondition()));
1449      __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1450             ARMCondition(cond->GetOppositeCondition()));
1451      return;
1452    }
1453    case Primitive::kPrimLong:
1454      GenerateLongComparesAndJumps(cond, &true_label, &false_label);
1455      break;
1456    case Primitive::kPrimFloat:
1457      __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
1458      GenerateFPJumps(cond, &true_label, &false_label);
1459      break;
1460    case Primitive::kPrimDouble:
1461      __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
1462               FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
1463      GenerateFPJumps(cond, &true_label, &false_label);
1464      break;
1465  }
1466
1467  // Convert the jumps into the result.
1468  Label done_label;
1469
1470  // False case: result = 0.
1471  __ Bind(&false_label);
1472  __ LoadImmediate(out, 0);
1473  __ b(&done_label);
1474
1475  // True case: result = 1.
1476  __ Bind(&true_label);
1477  __ LoadImmediate(out, 1);
1478  __ Bind(&done_label);
1479}
1480
1481void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1482  VisitCondition(comp);
1483}
1484
1485void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1486  VisitCondition(comp);
1487}
1488
1489void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1490  VisitCondition(comp);
1491}
1492
1493void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1494  VisitCondition(comp);
1495}
1496
1497void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1498  VisitCondition(comp);
1499}
1500
1501void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1502  VisitCondition(comp);
1503}
1504
1505void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1506  VisitCondition(comp);
1507}
1508
1509void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1510  VisitCondition(comp);
1511}
1512
1513void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1514  VisitCondition(comp);
1515}
1516
1517void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1518  VisitCondition(comp);
1519}
1520
1521void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1522  VisitCondition(comp);
1523}
1524
1525void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1526  VisitCondition(comp);
1527}
1528
1529void LocationsBuilderARM::VisitBelow(HBelow* comp) {
1530  VisitCondition(comp);
1531}
1532
1533void InstructionCodeGeneratorARM::VisitBelow(HBelow* comp) {
1534  VisitCondition(comp);
1535}
1536
1537void LocationsBuilderARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
1538  VisitCondition(comp);
1539}
1540
1541void InstructionCodeGeneratorARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
1542  VisitCondition(comp);
1543}
1544
1545void LocationsBuilderARM::VisitAbove(HAbove* comp) {
1546  VisitCondition(comp);
1547}
1548
1549void InstructionCodeGeneratorARM::VisitAbove(HAbove* comp) {
1550  VisitCondition(comp);
1551}
1552
1553void LocationsBuilderARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
1554  VisitCondition(comp);
1555}
1556
1557void InstructionCodeGeneratorARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
1558  VisitCondition(comp);
1559}
1560
1561void LocationsBuilderARM::VisitLocal(HLocal* local) {
1562  local->SetLocations(nullptr);
1563}
1564
1565void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1566  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1567}
1568
1569void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1570  load->SetLocations(nullptr);
1571}
1572
1573void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
1574  // Nothing to do, this is driven by the code generator.
1575}
1576
1577void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1578  LocationSummary* locations =
1579      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1580  switch (store->InputAt(1)->GetType()) {
1581    case Primitive::kPrimBoolean:
1582    case Primitive::kPrimByte:
1583    case Primitive::kPrimChar:
1584    case Primitive::kPrimShort:
1585    case Primitive::kPrimInt:
1586    case Primitive::kPrimNot:
1587    case Primitive::kPrimFloat:
1588      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1589      break;
1590
1591    case Primitive::kPrimLong:
1592    case Primitive::kPrimDouble:
1593      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1594      break;
1595
1596    default:
1597      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1598  }
1599}
1600
1601void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
1602}
1603
1604void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1605  LocationSummary* locations =
1606      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1607  locations->SetOut(Location::ConstantLocation(constant));
1608}
1609
1610void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
1611  // Will be generated at use site.
1612}
1613
1614void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1615  LocationSummary* locations =
1616      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1617  locations->SetOut(Location::ConstantLocation(constant));
1618}
1619
1620void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
1621  // Will be generated at use site.
1622}
1623
1624void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1625  LocationSummary* locations =
1626      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1627  locations->SetOut(Location::ConstantLocation(constant));
1628}
1629
1630void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
1631  // Will be generated at use site.
1632}
1633
1634void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1635  LocationSummary* locations =
1636      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1637  locations->SetOut(Location::ConstantLocation(constant));
1638}
1639
1640void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1641  // Will be generated at use site.
1642}
1643
1644void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1645  LocationSummary* locations =
1646      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1647  locations->SetOut(Location::ConstantLocation(constant));
1648}
1649
1650void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
1651  // Will be generated at use site.
1652}
1653
1654void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1655  memory_barrier->SetLocations(nullptr);
1656}
1657
1658void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1659  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1660}
1661
1662void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1663  ret->SetLocations(nullptr);
1664}
1665
1666void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
1667  codegen_->GenerateFrameExit();
1668}
1669
1670void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1671  LocationSummary* locations =
1672      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1673  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1674}
1675
1676void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
1677  codegen_->GenerateFrameExit();
1678}
1679
1680void LocationsBuilderARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
1681  // The trampoline uses the same calling convention as dex calling conventions,
1682  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
1683  // the method_idx.
1684  HandleInvoke(invoke);
1685}
1686
1687void InstructionCodeGeneratorARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
1688  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
1689}
1690
1691void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1692  // When we do not run baseline, explicit clinit checks triggered by static
1693  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1694  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1695
1696  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1697                                         codegen_->GetAssembler(),
1698                                         codegen_->GetInstructionSetFeatures());
1699  if (intrinsic.TryDispatch(invoke)) {
1700    return;
1701  }
1702
1703  HandleInvoke(invoke);
1704}
1705
1706static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1707  if (invoke->GetLocations()->Intrinsified()) {
1708    IntrinsicCodeGeneratorARM intrinsic(codegen);
1709    intrinsic.Dispatch(invoke);
1710    return true;
1711  }
1712  return false;
1713}
1714
1715void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1716  // When we do not run baseline, explicit clinit checks triggered by static
1717  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1718  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1719
1720  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1721    return;
1722  }
1723
1724  LocationSummary* locations = invoke->GetLocations();
1725  codegen_->GenerateStaticOrDirectCall(
1726      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
1727  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1728}
1729
1730void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1731  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1732  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
1733}
1734
1735void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1736  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1737                                         codegen_->GetAssembler(),
1738                                         codegen_->GetInstructionSetFeatures());
1739  if (intrinsic.TryDispatch(invoke)) {
1740    return;
1741  }
1742
1743  HandleInvoke(invoke);
1744}
1745
1746void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1747  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1748    return;
1749  }
1750
1751  codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
1752  DCHECK(!codegen_->IsLeafMethod());
1753  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1754}
1755
1756void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1757  HandleInvoke(invoke);
1758  // Add the hidden argument.
1759  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1760}
1761
1762void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1763  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1764  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1765  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1766      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1767  LocationSummary* locations = invoke->GetLocations();
1768  Location receiver = locations->InAt(0);
1769  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1770
1771  // Set the hidden argument.
1772  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1773                   invoke->GetDexMethodIndex());
1774
1775  // temp = object->GetClass();
1776  if (receiver.IsStackSlot()) {
1777    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1778    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1779  } else {
1780    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1781  }
1782  codegen_->MaybeRecordImplicitNullCheck(invoke);
1783  __ MaybeUnpoisonHeapReference(temp);
1784  // temp = temp->GetImtEntryAt(method_offset);
1785  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1786      kArmWordSize).Int32Value();
1787  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1788  // LR = temp->GetEntryPoint();
1789  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1790  // LR();
1791  __ blx(LR);
1792  DCHECK(!codegen_->IsLeafMethod());
1793  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1794}
1795
1796void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1797  LocationSummary* locations =
1798      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1799  switch (neg->GetResultType()) {
1800    case Primitive::kPrimInt: {
1801      locations->SetInAt(0, Location::RequiresRegister());
1802      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1803      break;
1804    }
1805    case Primitive::kPrimLong: {
1806      locations->SetInAt(0, Location::RequiresRegister());
1807      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1808      break;
1809    }
1810
1811    case Primitive::kPrimFloat:
1812    case Primitive::kPrimDouble:
1813      locations->SetInAt(0, Location::RequiresFpuRegister());
1814      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1815      break;
1816
1817    default:
1818      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1819  }
1820}
1821
1822void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1823  LocationSummary* locations = neg->GetLocations();
1824  Location out = locations->Out();
1825  Location in = locations->InAt(0);
1826  switch (neg->GetResultType()) {
1827    case Primitive::kPrimInt:
1828      DCHECK(in.IsRegister());
1829      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1830      break;
1831
1832    case Primitive::kPrimLong:
1833      DCHECK(in.IsRegisterPair());
1834      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1835      __ rsbs(out.AsRegisterPairLow<Register>(),
1836              in.AsRegisterPairLow<Register>(),
1837              ShifterOperand(0));
1838      // We cannot emit an RSC (Reverse Subtract with Carry)
1839      // instruction here, as it does not exist in the Thumb-2
1840      // instruction set.  We use the following approach
1841      // using SBC and SUB instead.
1842      //
1843      // out.hi = -C
1844      __ sbc(out.AsRegisterPairHigh<Register>(),
1845             out.AsRegisterPairHigh<Register>(),
1846             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1847      // out.hi = out.hi - in.hi
1848      __ sub(out.AsRegisterPairHigh<Register>(),
1849             out.AsRegisterPairHigh<Register>(),
1850             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1851      break;
1852
1853    case Primitive::kPrimFloat:
1854      DCHECK(in.IsFpuRegister());
1855      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1856      break;
1857
1858    case Primitive::kPrimDouble:
1859      DCHECK(in.IsFpuRegisterPair());
1860      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1861               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1862      break;
1863
1864    default:
1865      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1866  }
1867}
1868
1869void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1870  Primitive::Type result_type = conversion->GetResultType();
1871  Primitive::Type input_type = conversion->GetInputType();
1872  DCHECK_NE(result_type, input_type);
1873
1874  // The float-to-long, double-to-long and long-to-float type conversions
1875  // rely on a call to the runtime.
1876  LocationSummary::CallKind call_kind =
1877      (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1878        && result_type == Primitive::kPrimLong)
1879       || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
1880      ? LocationSummary::kCall
1881      : LocationSummary::kNoCall;
1882  LocationSummary* locations =
1883      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1884
1885  // The Java language does not allow treating boolean as an integral type but
1886  // our bit representation makes it safe.
1887
1888  switch (result_type) {
1889    case Primitive::kPrimByte:
1890      switch (input_type) {
1891        case Primitive::kPrimBoolean:
1892          // Boolean input is a result of code transformations.
1893        case Primitive::kPrimShort:
1894        case Primitive::kPrimInt:
1895        case Primitive::kPrimChar:
1896          // Processing a Dex `int-to-byte' instruction.
1897          locations->SetInAt(0, Location::RequiresRegister());
1898          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1899          break;
1900
1901        default:
1902          LOG(FATAL) << "Unexpected type conversion from " << input_type
1903                     << " to " << result_type;
1904      }
1905      break;
1906
1907    case Primitive::kPrimShort:
1908      switch (input_type) {
1909        case Primitive::kPrimBoolean:
1910          // Boolean input is a result of code transformations.
1911        case Primitive::kPrimByte:
1912        case Primitive::kPrimInt:
1913        case Primitive::kPrimChar:
1914          // Processing a Dex `int-to-short' instruction.
1915          locations->SetInAt(0, Location::RequiresRegister());
1916          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1917          break;
1918
1919        default:
1920          LOG(FATAL) << "Unexpected type conversion from " << input_type
1921                     << " to " << result_type;
1922      }
1923      break;
1924
1925    case Primitive::kPrimInt:
1926      switch (input_type) {
1927        case Primitive::kPrimLong:
1928          // Processing a Dex `long-to-int' instruction.
1929          locations->SetInAt(0, Location::Any());
1930          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1931          break;
1932
1933        case Primitive::kPrimFloat:
1934          // Processing a Dex `float-to-int' instruction.
1935          locations->SetInAt(0, Location::RequiresFpuRegister());
1936          locations->SetOut(Location::RequiresRegister());
1937          locations->AddTemp(Location::RequiresFpuRegister());
1938          break;
1939
1940        case Primitive::kPrimDouble:
1941          // Processing a Dex `double-to-int' instruction.
1942          locations->SetInAt(0, Location::RequiresFpuRegister());
1943          locations->SetOut(Location::RequiresRegister());
1944          locations->AddTemp(Location::RequiresFpuRegister());
1945          break;
1946
1947        default:
1948          LOG(FATAL) << "Unexpected type conversion from " << input_type
1949                     << " to " << result_type;
1950      }
1951      break;
1952
1953    case Primitive::kPrimLong:
1954      switch (input_type) {
1955        case Primitive::kPrimBoolean:
1956          // Boolean input is a result of code transformations.
1957        case Primitive::kPrimByte:
1958        case Primitive::kPrimShort:
1959        case Primitive::kPrimInt:
1960        case Primitive::kPrimChar:
1961          // Processing a Dex `int-to-long' instruction.
1962          locations->SetInAt(0, Location::RequiresRegister());
1963          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1964          break;
1965
1966        case Primitive::kPrimFloat: {
1967          // Processing a Dex `float-to-long' instruction.
1968          InvokeRuntimeCallingConvention calling_convention;
1969          locations->SetInAt(0, Location::FpuRegisterLocation(
1970              calling_convention.GetFpuRegisterAt(0)));
1971          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1972          break;
1973        }
1974
1975        case Primitive::kPrimDouble: {
1976          // Processing a Dex `double-to-long' instruction.
1977          InvokeRuntimeCallingConvention calling_convention;
1978          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1979              calling_convention.GetFpuRegisterAt(0),
1980              calling_convention.GetFpuRegisterAt(1)));
1981          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1982          break;
1983        }
1984
1985        default:
1986          LOG(FATAL) << "Unexpected type conversion from " << input_type
1987                     << " to " << result_type;
1988      }
1989      break;
1990
1991    case Primitive::kPrimChar:
1992      switch (input_type) {
1993        case Primitive::kPrimBoolean:
1994          // Boolean input is a result of code transformations.
1995        case Primitive::kPrimByte:
1996        case Primitive::kPrimShort:
1997        case Primitive::kPrimInt:
1998          // Processing a Dex `int-to-char' instruction.
1999          locations->SetInAt(0, Location::RequiresRegister());
2000          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2001          break;
2002
2003        default:
2004          LOG(FATAL) << "Unexpected type conversion from " << input_type
2005                     << " to " << result_type;
2006      }
2007      break;
2008
2009    case Primitive::kPrimFloat:
2010      switch (input_type) {
2011        case Primitive::kPrimBoolean:
2012          // Boolean input is a result of code transformations.
2013        case Primitive::kPrimByte:
2014        case Primitive::kPrimShort:
2015        case Primitive::kPrimInt:
2016        case Primitive::kPrimChar:
2017          // Processing a Dex `int-to-float' instruction.
2018          locations->SetInAt(0, Location::RequiresRegister());
2019          locations->SetOut(Location::RequiresFpuRegister());
2020          break;
2021
2022        case Primitive::kPrimLong: {
2023          // Processing a Dex `long-to-float' instruction.
2024          InvokeRuntimeCallingConvention calling_convention;
2025          locations->SetInAt(0, Location::RegisterPairLocation(
2026              calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2027          locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2028          break;
2029        }
2030
2031        case Primitive::kPrimDouble:
2032          // Processing a Dex `double-to-float' instruction.
2033          locations->SetInAt(0, Location::RequiresFpuRegister());
2034          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2035          break;
2036
2037        default:
2038          LOG(FATAL) << "Unexpected type conversion from " << input_type
2039                     << " to " << result_type;
2040      };
2041      break;
2042
2043    case Primitive::kPrimDouble:
2044      switch (input_type) {
2045        case Primitive::kPrimBoolean:
2046          // Boolean input is a result of code transformations.
2047        case Primitive::kPrimByte:
2048        case Primitive::kPrimShort:
2049        case Primitive::kPrimInt:
2050        case Primitive::kPrimChar:
2051          // Processing a Dex `int-to-double' instruction.
2052          locations->SetInAt(0, Location::RequiresRegister());
2053          locations->SetOut(Location::RequiresFpuRegister());
2054          break;
2055
2056        case Primitive::kPrimLong:
2057          // Processing a Dex `long-to-double' instruction.
2058          locations->SetInAt(0, Location::RequiresRegister());
2059          locations->SetOut(Location::RequiresFpuRegister());
2060          locations->AddTemp(Location::RequiresFpuRegister());
2061          locations->AddTemp(Location::RequiresFpuRegister());
2062          break;
2063
2064        case Primitive::kPrimFloat:
2065          // Processing a Dex `float-to-double' instruction.
2066          locations->SetInAt(0, Location::RequiresFpuRegister());
2067          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2068          break;
2069
2070        default:
2071          LOG(FATAL) << "Unexpected type conversion from " << input_type
2072                     << " to " << result_type;
2073      };
2074      break;
2075
2076    default:
2077      LOG(FATAL) << "Unexpected type conversion from " << input_type
2078                 << " to " << result_type;
2079  }
2080}
2081
2082void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
2083  LocationSummary* locations = conversion->GetLocations();
2084  Location out = locations->Out();
2085  Location in = locations->InAt(0);
2086  Primitive::Type result_type = conversion->GetResultType();
2087  Primitive::Type input_type = conversion->GetInputType();
2088  DCHECK_NE(result_type, input_type);
2089  switch (result_type) {
2090    case Primitive::kPrimByte:
2091      switch (input_type) {
2092        case Primitive::kPrimBoolean:
2093          // Boolean input is a result of code transformations.
2094        case Primitive::kPrimShort:
2095        case Primitive::kPrimInt:
2096        case Primitive::kPrimChar:
2097          // Processing a Dex `int-to-byte' instruction.
2098          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
2099          break;
2100
2101        default:
2102          LOG(FATAL) << "Unexpected type conversion from " << input_type
2103                     << " to " << result_type;
2104      }
2105      break;
2106
2107    case Primitive::kPrimShort:
2108      switch (input_type) {
2109        case Primitive::kPrimBoolean:
2110          // Boolean input is a result of code transformations.
2111        case Primitive::kPrimByte:
2112        case Primitive::kPrimInt:
2113        case Primitive::kPrimChar:
2114          // Processing a Dex `int-to-short' instruction.
2115          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
2116          break;
2117
2118        default:
2119          LOG(FATAL) << "Unexpected type conversion from " << input_type
2120                     << " to " << result_type;
2121      }
2122      break;
2123
2124    case Primitive::kPrimInt:
2125      switch (input_type) {
2126        case Primitive::kPrimLong:
2127          // Processing a Dex `long-to-int' instruction.
2128          DCHECK(out.IsRegister());
2129          if (in.IsRegisterPair()) {
2130            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
2131          } else if (in.IsDoubleStackSlot()) {
2132            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
2133          } else {
2134            DCHECK(in.IsConstant());
2135            DCHECK(in.GetConstant()->IsLongConstant());
2136            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
2137            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
2138          }
2139          break;
2140
2141        case Primitive::kPrimFloat: {
2142          // Processing a Dex `float-to-int' instruction.
2143          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2144          __ vmovs(temp, in.AsFpuRegister<SRegister>());
2145          __ vcvtis(temp, temp);
2146          __ vmovrs(out.AsRegister<Register>(), temp);
2147          break;
2148        }
2149
2150        case Primitive::kPrimDouble: {
2151          // Processing a Dex `double-to-int' instruction.
2152          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2153          DRegister temp_d = FromLowSToD(temp_s);
2154          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
2155          __ vcvtid(temp_s, temp_d);
2156          __ vmovrs(out.AsRegister<Register>(), temp_s);
2157          break;
2158        }
2159
2160        default:
2161          LOG(FATAL) << "Unexpected type conversion from " << input_type
2162                     << " to " << result_type;
2163      }
2164      break;
2165
2166    case Primitive::kPrimLong:
2167      switch (input_type) {
2168        case Primitive::kPrimBoolean:
2169          // Boolean input is a result of code transformations.
2170        case Primitive::kPrimByte:
2171        case Primitive::kPrimShort:
2172        case Primitive::kPrimInt:
2173        case Primitive::kPrimChar:
2174          // Processing a Dex `int-to-long' instruction.
2175          DCHECK(out.IsRegisterPair());
2176          DCHECK(in.IsRegister());
2177          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
2178          // Sign extension.
2179          __ Asr(out.AsRegisterPairHigh<Register>(),
2180                 out.AsRegisterPairLow<Register>(),
2181                 31);
2182          break;
2183
2184        case Primitive::kPrimFloat:
2185          // Processing a Dex `float-to-long' instruction.
2186          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
2187                                  conversion,
2188                                  conversion->GetDexPc(),
2189                                  nullptr);
2190          break;
2191
2192        case Primitive::kPrimDouble:
2193          // Processing a Dex `double-to-long' instruction.
2194          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
2195                                  conversion,
2196                                  conversion->GetDexPc(),
2197                                  nullptr);
2198          break;
2199
2200        default:
2201          LOG(FATAL) << "Unexpected type conversion from " << input_type
2202                     << " to " << result_type;
2203      }
2204      break;
2205
2206    case Primitive::kPrimChar:
2207      switch (input_type) {
2208        case Primitive::kPrimBoolean:
2209          // Boolean input is a result of code transformations.
2210        case Primitive::kPrimByte:
2211        case Primitive::kPrimShort:
2212        case Primitive::kPrimInt:
2213          // Processing a Dex `int-to-char' instruction.
2214          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
2215          break;
2216
2217        default:
2218          LOG(FATAL) << "Unexpected type conversion from " << input_type
2219                     << " to " << result_type;
2220      }
2221      break;
2222
2223    case Primitive::kPrimFloat:
2224      switch (input_type) {
2225        case Primitive::kPrimBoolean:
2226          // Boolean input is a result of code transformations.
2227        case Primitive::kPrimByte:
2228        case Primitive::kPrimShort:
2229        case Primitive::kPrimInt:
2230        case Primitive::kPrimChar: {
2231          // Processing a Dex `int-to-float' instruction.
2232          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
2233          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
2234          break;
2235        }
2236
2237        case Primitive::kPrimLong:
2238          // Processing a Dex `long-to-float' instruction.
2239          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
2240                                  conversion,
2241                                  conversion->GetDexPc(),
2242                                  nullptr);
2243          break;
2244
2245        case Primitive::kPrimDouble:
2246          // Processing a Dex `double-to-float' instruction.
2247          __ vcvtsd(out.AsFpuRegister<SRegister>(),
2248                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
2249          break;
2250
2251        default:
2252          LOG(FATAL) << "Unexpected type conversion from " << input_type
2253                     << " to " << result_type;
2254      };
2255      break;
2256
2257    case Primitive::kPrimDouble:
2258      switch (input_type) {
2259        case Primitive::kPrimBoolean:
2260          // Boolean input is a result of code transformations.
2261        case Primitive::kPrimByte:
2262        case Primitive::kPrimShort:
2263        case Primitive::kPrimInt:
2264        case Primitive::kPrimChar: {
2265          // Processing a Dex `int-to-double' instruction.
2266          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
2267          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2268                    out.AsFpuRegisterPairLow<SRegister>());
2269          break;
2270        }
2271
2272        case Primitive::kPrimLong: {
2273          // Processing a Dex `long-to-double' instruction.
2274          Register low = in.AsRegisterPairLow<Register>();
2275          Register high = in.AsRegisterPairHigh<Register>();
2276          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
2277          DRegister out_d = FromLowSToD(out_s);
2278          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2279          DRegister temp_d = FromLowSToD(temp_s);
2280          SRegister constant_s = locations->GetTemp(1).AsFpuRegisterPairLow<SRegister>();
2281          DRegister constant_d = FromLowSToD(constant_s);
2282
2283          // temp_d = int-to-double(high)
2284          __ vmovsr(temp_s, high);
2285          __ vcvtdi(temp_d, temp_s);
2286          // constant_d = k2Pow32EncodingForDouble
2287          __ LoadDImmediate(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
2288          // out_d = unsigned-to-double(low)
2289          __ vmovsr(out_s, low);
2290          __ vcvtdu(out_d, out_s);
2291          // out_d += temp_d * constant_d
2292          __ vmlad(out_d, temp_d, constant_d);
2293          break;
2294        }
2295
2296        case Primitive::kPrimFloat:
2297          // Processing a Dex `float-to-double' instruction.
2298          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2299                    in.AsFpuRegister<SRegister>());
2300          break;
2301
2302        default:
2303          LOG(FATAL) << "Unexpected type conversion from " << input_type
2304                     << " to " << result_type;
2305      };
2306      break;
2307
2308    default:
2309      LOG(FATAL) << "Unexpected type conversion from " << input_type
2310                 << " to " << result_type;
2311  }
2312}
2313
2314void LocationsBuilderARM::VisitAdd(HAdd* add) {
2315  LocationSummary* locations =
2316      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
2317  switch (add->GetResultType()) {
2318    case Primitive::kPrimInt: {
2319      locations->SetInAt(0, Location::RequiresRegister());
2320      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
2321      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2322      break;
2323    }
2324
2325    case Primitive::kPrimLong: {
2326      locations->SetInAt(0, Location::RequiresRegister());
2327      locations->SetInAt(1, Location::RequiresRegister());
2328      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2329      break;
2330    }
2331
2332    case Primitive::kPrimFloat:
2333    case Primitive::kPrimDouble: {
2334      locations->SetInAt(0, Location::RequiresFpuRegister());
2335      locations->SetInAt(1, Location::RequiresFpuRegister());
2336      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2337      break;
2338    }
2339
2340    default:
2341      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2342  }
2343}
2344
2345void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
2346  LocationSummary* locations = add->GetLocations();
2347  Location out = locations->Out();
2348  Location first = locations->InAt(0);
2349  Location second = locations->InAt(1);
2350  switch (add->GetResultType()) {
2351    case Primitive::kPrimInt:
2352      if (second.IsRegister()) {
2353        __ add(out.AsRegister<Register>(),
2354               first.AsRegister<Register>(),
2355               ShifterOperand(second.AsRegister<Register>()));
2356      } else {
2357        __ AddConstant(out.AsRegister<Register>(),
2358                       first.AsRegister<Register>(),
2359                       second.GetConstant()->AsIntConstant()->GetValue());
2360      }
2361      break;
2362
2363    case Primitive::kPrimLong: {
2364      DCHECK(second.IsRegisterPair());
2365      __ adds(out.AsRegisterPairLow<Register>(),
2366              first.AsRegisterPairLow<Register>(),
2367              ShifterOperand(second.AsRegisterPairLow<Register>()));
2368      __ adc(out.AsRegisterPairHigh<Register>(),
2369             first.AsRegisterPairHigh<Register>(),
2370             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2371      break;
2372    }
2373
2374    case Primitive::kPrimFloat:
2375      __ vadds(out.AsFpuRegister<SRegister>(),
2376               first.AsFpuRegister<SRegister>(),
2377               second.AsFpuRegister<SRegister>());
2378      break;
2379
2380    case Primitive::kPrimDouble:
2381      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2382               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2383               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2384      break;
2385
2386    default:
2387      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2388  }
2389}
2390
2391void LocationsBuilderARM::VisitSub(HSub* sub) {
2392  LocationSummary* locations =
2393      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2394  switch (sub->GetResultType()) {
2395    case Primitive::kPrimInt: {
2396      locations->SetInAt(0, Location::RequiresRegister());
2397      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2398      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2399      break;
2400    }
2401
2402    case Primitive::kPrimLong: {
2403      locations->SetInAt(0, Location::RequiresRegister());
2404      locations->SetInAt(1, Location::RequiresRegister());
2405      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2406      break;
2407    }
2408    case Primitive::kPrimFloat:
2409    case Primitive::kPrimDouble: {
2410      locations->SetInAt(0, Location::RequiresFpuRegister());
2411      locations->SetInAt(1, Location::RequiresFpuRegister());
2412      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2413      break;
2414    }
2415    default:
2416      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2417  }
2418}
2419
2420void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2421  LocationSummary* locations = sub->GetLocations();
2422  Location out = locations->Out();
2423  Location first = locations->InAt(0);
2424  Location second = locations->InAt(1);
2425  switch (sub->GetResultType()) {
2426    case Primitive::kPrimInt: {
2427      if (second.IsRegister()) {
2428        __ sub(out.AsRegister<Register>(),
2429               first.AsRegister<Register>(),
2430               ShifterOperand(second.AsRegister<Register>()));
2431      } else {
2432        __ AddConstant(out.AsRegister<Register>(),
2433                       first.AsRegister<Register>(),
2434                       -second.GetConstant()->AsIntConstant()->GetValue());
2435      }
2436      break;
2437    }
2438
2439    case Primitive::kPrimLong: {
2440      DCHECK(second.IsRegisterPair());
2441      __ subs(out.AsRegisterPairLow<Register>(),
2442              first.AsRegisterPairLow<Register>(),
2443              ShifterOperand(second.AsRegisterPairLow<Register>()));
2444      __ sbc(out.AsRegisterPairHigh<Register>(),
2445             first.AsRegisterPairHigh<Register>(),
2446             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2447      break;
2448    }
2449
2450    case Primitive::kPrimFloat: {
2451      __ vsubs(out.AsFpuRegister<SRegister>(),
2452               first.AsFpuRegister<SRegister>(),
2453               second.AsFpuRegister<SRegister>());
2454      break;
2455    }
2456
2457    case Primitive::kPrimDouble: {
2458      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2459               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2460               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2461      break;
2462    }
2463
2464
2465    default:
2466      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2467  }
2468}
2469
2470void LocationsBuilderARM::VisitMul(HMul* mul) {
2471  LocationSummary* locations =
2472      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2473  switch (mul->GetResultType()) {
2474    case Primitive::kPrimInt:
2475    case Primitive::kPrimLong:  {
2476      locations->SetInAt(0, Location::RequiresRegister());
2477      locations->SetInAt(1, Location::RequiresRegister());
2478      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2479      break;
2480    }
2481
2482    case Primitive::kPrimFloat:
2483    case Primitive::kPrimDouble: {
2484      locations->SetInAt(0, Location::RequiresFpuRegister());
2485      locations->SetInAt(1, Location::RequiresFpuRegister());
2486      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2487      break;
2488    }
2489
2490    default:
2491      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2492  }
2493}
2494
2495void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2496  LocationSummary* locations = mul->GetLocations();
2497  Location out = locations->Out();
2498  Location first = locations->InAt(0);
2499  Location second = locations->InAt(1);
2500  switch (mul->GetResultType()) {
2501    case Primitive::kPrimInt: {
2502      __ mul(out.AsRegister<Register>(),
2503             first.AsRegister<Register>(),
2504             second.AsRegister<Register>());
2505      break;
2506    }
2507    case Primitive::kPrimLong: {
2508      Register out_hi = out.AsRegisterPairHigh<Register>();
2509      Register out_lo = out.AsRegisterPairLow<Register>();
2510      Register in1_hi = first.AsRegisterPairHigh<Register>();
2511      Register in1_lo = first.AsRegisterPairLow<Register>();
2512      Register in2_hi = second.AsRegisterPairHigh<Register>();
2513      Register in2_lo = second.AsRegisterPairLow<Register>();
2514
2515      // Extra checks to protect caused by the existence of R1_R2.
2516      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2517      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2518      DCHECK_NE(out_hi, in1_lo);
2519      DCHECK_NE(out_hi, in2_lo);
2520
2521      // input: in1 - 64 bits, in2 - 64 bits
2522      // output: out
2523      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2524      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2525      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2526
2527      // IP <- in1.lo * in2.hi
2528      __ mul(IP, in1_lo, in2_hi);
2529      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2530      __ mla(out_hi, in1_hi, in2_lo, IP);
2531      // out.lo <- (in1.lo * in2.lo)[31:0];
2532      __ umull(out_lo, IP, in1_lo, in2_lo);
2533      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2534      __ add(out_hi, out_hi, ShifterOperand(IP));
2535      break;
2536    }
2537
2538    case Primitive::kPrimFloat: {
2539      __ vmuls(out.AsFpuRegister<SRegister>(),
2540               first.AsFpuRegister<SRegister>(),
2541               second.AsFpuRegister<SRegister>());
2542      break;
2543    }
2544
2545    case Primitive::kPrimDouble: {
2546      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2547               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2548               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2549      break;
2550    }
2551
2552    default:
2553      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2554  }
2555}
2556
2557void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2558  DCHECK(instruction->IsDiv() || instruction->IsRem());
2559  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2560
2561  LocationSummary* locations = instruction->GetLocations();
2562  Location second = locations->InAt(1);
2563  DCHECK(second.IsConstant());
2564
2565  Register out = locations->Out().AsRegister<Register>();
2566  Register dividend = locations->InAt(0).AsRegister<Register>();
2567  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2568  DCHECK(imm == 1 || imm == -1);
2569
2570  if (instruction->IsRem()) {
2571    __ LoadImmediate(out, 0);
2572  } else {
2573    if (imm == 1) {
2574      __ Mov(out, dividend);
2575    } else {
2576      __ rsb(out, dividend, ShifterOperand(0));
2577    }
2578  }
2579}
2580
2581void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2582  DCHECK(instruction->IsDiv() || instruction->IsRem());
2583  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2584
2585  LocationSummary* locations = instruction->GetLocations();
2586  Location second = locations->InAt(1);
2587  DCHECK(second.IsConstant());
2588
2589  Register out = locations->Out().AsRegister<Register>();
2590  Register dividend = locations->InAt(0).AsRegister<Register>();
2591  Register temp = locations->GetTemp(0).AsRegister<Register>();
2592  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2593  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2594  DCHECK(IsPowerOfTwo(abs_imm));
2595  int ctz_imm = CTZ(abs_imm);
2596
2597  if (ctz_imm == 1) {
2598    __ Lsr(temp, dividend, 32 - ctz_imm);
2599  } else {
2600    __ Asr(temp, dividend, 31);
2601    __ Lsr(temp, temp, 32 - ctz_imm);
2602  }
2603  __ add(out, temp, ShifterOperand(dividend));
2604
2605  if (instruction->IsDiv()) {
2606    __ Asr(out, out, ctz_imm);
2607    if (imm < 0) {
2608      __ rsb(out, out, ShifterOperand(0));
2609    }
2610  } else {
2611    __ ubfx(out, out, 0, ctz_imm);
2612    __ sub(out, out, ShifterOperand(temp));
2613  }
2614}
2615
2616void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2617  DCHECK(instruction->IsDiv() || instruction->IsRem());
2618  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2619
2620  LocationSummary* locations = instruction->GetLocations();
2621  Location second = locations->InAt(1);
2622  DCHECK(second.IsConstant());
2623
2624  Register out = locations->Out().AsRegister<Register>();
2625  Register dividend = locations->InAt(0).AsRegister<Register>();
2626  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2627  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2628  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2629
2630  int64_t magic;
2631  int shift;
2632  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2633
2634  __ LoadImmediate(temp1, magic);
2635  __ smull(temp2, temp1, dividend, temp1);
2636
2637  if (imm > 0 && magic < 0) {
2638    __ add(temp1, temp1, ShifterOperand(dividend));
2639  } else if (imm < 0 && magic > 0) {
2640    __ sub(temp1, temp1, ShifterOperand(dividend));
2641  }
2642
2643  if (shift != 0) {
2644    __ Asr(temp1, temp1, shift);
2645  }
2646
2647  if (instruction->IsDiv()) {
2648    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2649  } else {
2650    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2651    // TODO: Strength reduction for mls.
2652    __ LoadImmediate(temp2, imm);
2653    __ mls(out, temp1, temp2, dividend);
2654  }
2655}
2656
2657void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2658  DCHECK(instruction->IsDiv() || instruction->IsRem());
2659  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2660
2661  LocationSummary* locations = instruction->GetLocations();
2662  Location second = locations->InAt(1);
2663  DCHECK(second.IsConstant());
2664
2665  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2666  if (imm == 0) {
2667    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2668  } else if (imm == 1 || imm == -1) {
2669    DivRemOneOrMinusOne(instruction);
2670  } else if (IsPowerOfTwo(std::abs(imm))) {
2671    DivRemByPowerOfTwo(instruction);
2672  } else {
2673    DCHECK(imm <= -2 || imm >= 2);
2674    GenerateDivRemWithAnyConstant(instruction);
2675  }
2676}
2677
2678void LocationsBuilderARM::VisitDiv(HDiv* div) {
2679  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2680  if (div->GetResultType() == Primitive::kPrimLong) {
2681    // pLdiv runtime call.
2682    call_kind = LocationSummary::kCall;
2683  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2684    // sdiv will be replaced by other instruction sequence.
2685  } else if (div->GetResultType() == Primitive::kPrimInt &&
2686             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2687    // pIdivmod runtime call.
2688    call_kind = LocationSummary::kCall;
2689  }
2690
2691  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2692
2693  switch (div->GetResultType()) {
2694    case Primitive::kPrimInt: {
2695      if (div->InputAt(1)->IsConstant()) {
2696        locations->SetInAt(0, Location::RequiresRegister());
2697        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2698        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2699        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2700        if (abs_imm <= 1) {
2701          // No temp register required.
2702        } else {
2703          locations->AddTemp(Location::RequiresRegister());
2704          if (!IsPowerOfTwo(abs_imm)) {
2705            locations->AddTemp(Location::RequiresRegister());
2706          }
2707        }
2708      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2709        locations->SetInAt(0, Location::RequiresRegister());
2710        locations->SetInAt(1, Location::RequiresRegister());
2711        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2712      } else {
2713        InvokeRuntimeCallingConvention calling_convention;
2714        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2715        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2716        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2717        //       we only need the former.
2718        locations->SetOut(Location::RegisterLocation(R0));
2719      }
2720      break;
2721    }
2722    case Primitive::kPrimLong: {
2723      InvokeRuntimeCallingConvention calling_convention;
2724      locations->SetInAt(0, Location::RegisterPairLocation(
2725          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2726      locations->SetInAt(1, Location::RegisterPairLocation(
2727          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2728      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2729      break;
2730    }
2731    case Primitive::kPrimFloat:
2732    case Primitive::kPrimDouble: {
2733      locations->SetInAt(0, Location::RequiresFpuRegister());
2734      locations->SetInAt(1, Location::RequiresFpuRegister());
2735      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2736      break;
2737    }
2738
2739    default:
2740      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2741  }
2742}
2743
2744void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2745  LocationSummary* locations = div->GetLocations();
2746  Location out = locations->Out();
2747  Location first = locations->InAt(0);
2748  Location second = locations->InAt(1);
2749
2750  switch (div->GetResultType()) {
2751    case Primitive::kPrimInt: {
2752      if (second.IsConstant()) {
2753        GenerateDivRemConstantIntegral(div);
2754      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2755        __ sdiv(out.AsRegister<Register>(),
2756                first.AsRegister<Register>(),
2757                second.AsRegister<Register>());
2758      } else {
2759        InvokeRuntimeCallingConvention calling_convention;
2760        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2761        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2762        DCHECK_EQ(R0, out.AsRegister<Register>());
2763
2764        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2765      }
2766      break;
2767    }
2768
2769    case Primitive::kPrimLong: {
2770      InvokeRuntimeCallingConvention calling_convention;
2771      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2772      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2773      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2774      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2775      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2776      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2777
2778      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2779      break;
2780    }
2781
2782    case Primitive::kPrimFloat: {
2783      __ vdivs(out.AsFpuRegister<SRegister>(),
2784               first.AsFpuRegister<SRegister>(),
2785               second.AsFpuRegister<SRegister>());
2786      break;
2787    }
2788
2789    case Primitive::kPrimDouble: {
2790      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2791               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2792               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2793      break;
2794    }
2795
2796    default:
2797      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2798  }
2799}
2800
2801void LocationsBuilderARM::VisitRem(HRem* rem) {
2802  Primitive::Type type = rem->GetResultType();
2803
2804  // Most remainders are implemented in the runtime.
2805  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2806  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2807    // sdiv will be replaced by other instruction sequence.
2808    call_kind = LocationSummary::kNoCall;
2809  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2810             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2811    // Have hardware divide instruction for int, do it with three instructions.
2812    call_kind = LocationSummary::kNoCall;
2813  }
2814
2815  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2816
2817  switch (type) {
2818    case Primitive::kPrimInt: {
2819      if (rem->InputAt(1)->IsConstant()) {
2820        locations->SetInAt(0, Location::RequiresRegister());
2821        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2822        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2823        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2824        if (abs_imm <= 1) {
2825          // No temp register required.
2826        } else {
2827          locations->AddTemp(Location::RequiresRegister());
2828          if (!IsPowerOfTwo(abs_imm)) {
2829            locations->AddTemp(Location::RequiresRegister());
2830          }
2831        }
2832      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2833        locations->SetInAt(0, Location::RequiresRegister());
2834        locations->SetInAt(1, Location::RequiresRegister());
2835        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2836        locations->AddTemp(Location::RequiresRegister());
2837      } else {
2838        InvokeRuntimeCallingConvention calling_convention;
2839        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2840        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2841        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2842        //       we only need the latter.
2843        locations->SetOut(Location::RegisterLocation(R1));
2844      }
2845      break;
2846    }
2847    case Primitive::kPrimLong: {
2848      InvokeRuntimeCallingConvention calling_convention;
2849      locations->SetInAt(0, Location::RegisterPairLocation(
2850          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2851      locations->SetInAt(1, Location::RegisterPairLocation(
2852          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2853      // The runtime helper puts the output in R2,R3.
2854      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2855      break;
2856    }
2857    case Primitive::kPrimFloat: {
2858      InvokeRuntimeCallingConvention calling_convention;
2859      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2860      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2861      locations->SetOut(Location::FpuRegisterLocation(S0));
2862      break;
2863    }
2864
2865    case Primitive::kPrimDouble: {
2866      InvokeRuntimeCallingConvention calling_convention;
2867      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2868          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2869      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2870          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2871      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2872      break;
2873    }
2874
2875    default:
2876      LOG(FATAL) << "Unexpected rem type " << type;
2877  }
2878}
2879
2880void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2881  LocationSummary* locations = rem->GetLocations();
2882  Location out = locations->Out();
2883  Location first = locations->InAt(0);
2884  Location second = locations->InAt(1);
2885
2886  Primitive::Type type = rem->GetResultType();
2887  switch (type) {
2888    case Primitive::kPrimInt: {
2889        if (second.IsConstant()) {
2890          GenerateDivRemConstantIntegral(rem);
2891        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2892        Register reg1 = first.AsRegister<Register>();
2893        Register reg2 = second.AsRegister<Register>();
2894        Register temp = locations->GetTemp(0).AsRegister<Register>();
2895
2896        // temp = reg1 / reg2  (integer division)
2897        // dest = reg1 - temp * reg2
2898        __ sdiv(temp, reg1, reg2);
2899        __ mls(out.AsRegister<Register>(), temp, reg2, reg1);
2900      } else {
2901        InvokeRuntimeCallingConvention calling_convention;
2902        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2903        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2904        DCHECK_EQ(R1, out.AsRegister<Register>());
2905
2906        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2907      }
2908      break;
2909    }
2910
2911    case Primitive::kPrimLong: {
2912      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2913      break;
2914    }
2915
2916    case Primitive::kPrimFloat: {
2917      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2918      break;
2919    }
2920
2921    case Primitive::kPrimDouble: {
2922      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2923      break;
2924    }
2925
2926    default:
2927      LOG(FATAL) << "Unexpected rem type " << type;
2928  }
2929}
2930
2931void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2932  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2933      ? LocationSummary::kCallOnSlowPath
2934      : LocationSummary::kNoCall;
2935  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2936  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2937  if (instruction->HasUses()) {
2938    locations->SetOut(Location::SameAsFirstInput());
2939  }
2940}
2941
2942void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2943  SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2944  codegen_->AddSlowPath(slow_path);
2945
2946  LocationSummary* locations = instruction->GetLocations();
2947  Location value = locations->InAt(0);
2948
2949  switch (instruction->GetType()) {
2950    case Primitive::kPrimByte:
2951    case Primitive::kPrimChar:
2952    case Primitive::kPrimShort:
2953    case Primitive::kPrimInt: {
2954      if (value.IsRegister()) {
2955        __ CompareAndBranchIfZero(value.AsRegister<Register>(), slow_path->GetEntryLabel());
2956      } else {
2957        DCHECK(value.IsConstant()) << value;
2958        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2959          __ b(slow_path->GetEntryLabel());
2960        }
2961      }
2962      break;
2963    }
2964    case Primitive::kPrimLong: {
2965      if (value.IsRegisterPair()) {
2966        __ orrs(IP,
2967                value.AsRegisterPairLow<Register>(),
2968                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2969        __ b(slow_path->GetEntryLabel(), EQ);
2970      } else {
2971        DCHECK(value.IsConstant()) << value;
2972        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2973          __ b(slow_path->GetEntryLabel());
2974        }
2975      }
2976      break;
2977    default:
2978      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2979    }
2980  }
2981}
2982
2983void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2984  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2985
2986  LocationSummary* locations =
2987      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2988
2989  switch (op->GetResultType()) {
2990    case Primitive::kPrimInt: {
2991      locations->SetInAt(0, Location::RequiresRegister());
2992      if (op->InputAt(1)->IsConstant()) {
2993        locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant()));
2994        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2995      } else {
2996        locations->SetInAt(1, Location::RequiresRegister());
2997        // Make the output overlap, as it will be used to hold the masked
2998        // second input.
2999        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3000      }
3001      break;
3002    }
3003    case Primitive::kPrimLong: {
3004      locations->SetInAt(0, Location::RequiresRegister());
3005      if (op->InputAt(1)->IsConstant()) {
3006        locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant()));
3007        // For simplicity, use kOutputOverlap even though we only require that low registers
3008        // don't clash with high registers which the register allocator currently guarantees.
3009        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3010      } else {
3011        locations->SetInAt(1, Location::RequiresRegister());
3012        locations->AddTemp(Location::RequiresRegister());
3013        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3014      }
3015      break;
3016    }
3017    default:
3018      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
3019  }
3020}
3021
3022void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
3023  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
3024
3025  LocationSummary* locations = op->GetLocations();
3026  Location out = locations->Out();
3027  Location first = locations->InAt(0);
3028  Location second = locations->InAt(1);
3029
3030  Primitive::Type type = op->GetResultType();
3031  switch (type) {
3032    case Primitive::kPrimInt: {
3033      Register out_reg = out.AsRegister<Register>();
3034      Register first_reg = first.AsRegister<Register>();
3035      if (second.IsRegister()) {
3036        Register second_reg = second.AsRegister<Register>();
3037        // Arm doesn't mask the shift count so we need to do it ourselves.
3038        __ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
3039        if (op->IsShl()) {
3040          __ Lsl(out_reg, first_reg, out_reg);
3041        } else if (op->IsShr()) {
3042          __ Asr(out_reg, first_reg, out_reg);
3043        } else {
3044          __ Lsr(out_reg, first_reg, out_reg);
3045        }
3046      } else {
3047        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
3048        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
3049        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
3050          __ Mov(out_reg, first_reg);
3051        } else if (op->IsShl()) {
3052          __ Lsl(out_reg, first_reg, shift_value);
3053        } else if (op->IsShr()) {
3054          __ Asr(out_reg, first_reg, shift_value);
3055        } else {
3056          __ Lsr(out_reg, first_reg, shift_value);
3057        }
3058      }
3059      break;
3060    }
3061    case Primitive::kPrimLong: {
3062      Register o_h = out.AsRegisterPairHigh<Register>();
3063      Register o_l = out.AsRegisterPairLow<Register>();
3064
3065      Register high = first.AsRegisterPairHigh<Register>();
3066      Register low = first.AsRegisterPairLow<Register>();
3067
3068      if (second.IsRegister()) {
3069        Register temp = locations->GetTemp(0).AsRegister<Register>();
3070
3071        Register second_reg = second.AsRegister<Register>();
3072
3073        if (op->IsShl()) {
3074          __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
3075          // Shift the high part
3076          __ Lsl(o_h, high, o_l);
3077          // Shift the low part and `or` what overflew on the high part
3078          __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
3079          __ Lsr(temp, low, temp);
3080          __ orr(o_h, o_h, ShifterOperand(temp));
3081          // If the shift is > 32 bits, override the high part
3082          __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
3083          __ it(PL);
3084          __ Lsl(o_h, low, temp, PL);
3085          // Shift the low part
3086          __ Lsl(o_l, low, o_l);
3087        } else if (op->IsShr()) {
3088          __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
3089          // Shift the low part
3090          __ Lsr(o_l, low, o_h);
3091          // Shift the high part and `or` what underflew on the low part
3092          __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
3093          __ Lsl(temp, high, temp);
3094          __ orr(o_l, o_l, ShifterOperand(temp));
3095          // If the shift is > 32 bits, override the low part
3096          __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
3097          __ it(PL);
3098          __ Asr(o_l, high, temp, PL);
3099          // Shift the high part
3100          __ Asr(o_h, high, o_h);
3101        } else {
3102          __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
3103          // same as Shr except we use `Lsr`s and not `Asr`s
3104          __ Lsr(o_l, low, o_h);
3105          __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
3106          __ Lsl(temp, high, temp);
3107          __ orr(o_l, o_l, ShifterOperand(temp));
3108          __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
3109          __ it(PL);
3110          __ Lsr(o_l, high, temp, PL);
3111          __ Lsr(o_h, high, o_h);
3112        }
3113      } else {
3114        // Register allocator doesn't create partial overlap.
3115        DCHECK_NE(o_l, high);
3116        DCHECK_NE(o_h, low);
3117        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
3118        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxLongShiftValue);
3119        if (shift_value > 32) {
3120          if (op->IsShl()) {
3121            __ Lsl(o_h, low, shift_value - 32);
3122            __ LoadImmediate(o_l, 0);
3123          } else if (op->IsShr()) {
3124            __ Asr(o_l, high, shift_value - 32);
3125            __ Asr(o_h, high, 31);
3126          } else {
3127            __ Lsr(o_l, high, shift_value - 32);
3128            __ LoadImmediate(o_h, 0);
3129          }
3130        } else if (shift_value == 32) {
3131          if (op->IsShl()) {
3132            __ mov(o_h, ShifterOperand(low));
3133            __ LoadImmediate(o_l, 0);
3134          } else if (op->IsShr()) {
3135            __ mov(o_l, ShifterOperand(high));
3136            __ Asr(o_h, high, 31);
3137          } else {
3138            __ mov(o_l, ShifterOperand(high));
3139            __ LoadImmediate(o_h, 0);
3140          }
3141        } else {  // shift_value < 32
3142          if (op->IsShl()) {
3143            __ Lsl(o_h, high, shift_value);
3144            __ orr(o_h, o_h, ShifterOperand(low, LSR, 32 - shift_value));
3145            __ Lsl(o_l, low, shift_value);
3146          } else if (op->IsShr()) {
3147            __ Lsr(o_l, low, shift_value);
3148            __ orr(o_l, o_l, ShifterOperand(high, LSL, 32 - shift_value));
3149            __ Asr(o_h, high, shift_value);
3150          } else {
3151            __ Lsr(o_l, low, shift_value);
3152            __ orr(o_l, o_l, ShifterOperand(high, LSL, 32 - shift_value));
3153            __ Lsr(o_h, high, shift_value);
3154          }
3155        }
3156      }
3157      break;
3158    }
3159    default:
3160      LOG(FATAL) << "Unexpected operation type " << type;
3161      UNREACHABLE();
3162  }
3163}
3164
3165void LocationsBuilderARM::VisitShl(HShl* shl) {
3166  HandleShift(shl);
3167}
3168
3169void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
3170  HandleShift(shl);
3171}
3172
3173void LocationsBuilderARM::VisitShr(HShr* shr) {
3174  HandleShift(shr);
3175}
3176
3177void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
3178  HandleShift(shr);
3179}
3180
3181void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
3182  HandleShift(ushr);
3183}
3184
3185void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
3186  HandleShift(ushr);
3187}
3188
3189void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
3190  LocationSummary* locations =
3191      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3192  InvokeRuntimeCallingConvention calling_convention;
3193  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3194  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3195  locations->SetOut(Location::RegisterLocation(R0));
3196}
3197
3198void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
3199  InvokeRuntimeCallingConvention calling_convention;
3200  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
3201  // Note: if heap poisoning is enabled, the entry point takes cares
3202  // of poisoning the reference.
3203  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3204                          instruction,
3205                          instruction->GetDexPc(),
3206                          nullptr);
3207}
3208
3209void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
3210  LocationSummary* locations =
3211      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3212  InvokeRuntimeCallingConvention calling_convention;
3213  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3214  locations->SetOut(Location::RegisterLocation(R0));
3215  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3216  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3217}
3218
3219void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
3220  InvokeRuntimeCallingConvention calling_convention;
3221  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
3222  // Note: if heap poisoning is enabled, the entry point takes cares
3223  // of poisoning the reference.
3224  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3225                          instruction,
3226                          instruction->GetDexPc(),
3227                          nullptr);
3228}
3229
3230void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
3231  LocationSummary* locations =
3232      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3233  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
3234  if (location.IsStackSlot()) {
3235    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3236  } else if (location.IsDoubleStackSlot()) {
3237    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3238  }
3239  locations->SetOut(location);
3240}
3241
3242void InstructionCodeGeneratorARM::VisitParameterValue(
3243    HParameterValue* instruction ATTRIBUTE_UNUSED) {
3244  // Nothing to do, the parameter is already at its location.
3245}
3246
3247void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
3248  LocationSummary* locations =
3249      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3250  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
3251}
3252
3253void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
3254  // Nothing to do, the method is already at its location.
3255}
3256
3257void LocationsBuilderARM::VisitNot(HNot* not_) {
3258  LocationSummary* locations =
3259      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
3260  locations->SetInAt(0, Location::RequiresRegister());
3261  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3262}
3263
3264void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
3265  LocationSummary* locations = not_->GetLocations();
3266  Location out = locations->Out();
3267  Location in = locations->InAt(0);
3268  switch (not_->GetResultType()) {
3269    case Primitive::kPrimInt:
3270      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
3271      break;
3272
3273    case Primitive::kPrimLong:
3274      __ mvn(out.AsRegisterPairLow<Register>(),
3275             ShifterOperand(in.AsRegisterPairLow<Register>()));
3276      __ mvn(out.AsRegisterPairHigh<Register>(),
3277             ShifterOperand(in.AsRegisterPairHigh<Register>()));
3278      break;
3279
3280    default:
3281      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
3282  }
3283}
3284
3285void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
3286  LocationSummary* locations =
3287      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
3288  locations->SetInAt(0, Location::RequiresRegister());
3289  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3290}
3291
3292void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
3293  LocationSummary* locations = bool_not->GetLocations();
3294  Location out = locations->Out();
3295  Location in = locations->InAt(0);
3296  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
3297}
3298
3299void LocationsBuilderARM::VisitCompare(HCompare* compare) {
3300  LocationSummary* locations =
3301      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
3302  switch (compare->InputAt(0)->GetType()) {
3303    case Primitive::kPrimLong: {
3304      locations->SetInAt(0, Location::RequiresRegister());
3305      locations->SetInAt(1, Location::RequiresRegister());
3306      // Output overlaps because it is written before doing the low comparison.
3307      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3308      break;
3309    }
3310    case Primitive::kPrimFloat:
3311    case Primitive::kPrimDouble: {
3312      locations->SetInAt(0, Location::RequiresFpuRegister());
3313      locations->SetInAt(1, Location::RequiresFpuRegister());
3314      locations->SetOut(Location::RequiresRegister());
3315      break;
3316    }
3317    default:
3318      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
3319  }
3320}
3321
3322void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
3323  LocationSummary* locations = compare->GetLocations();
3324  Register out = locations->Out().AsRegister<Register>();
3325  Location left = locations->InAt(0);
3326  Location right = locations->InAt(1);
3327
3328  Label less, greater, done;
3329  Primitive::Type type = compare->InputAt(0)->GetType();
3330  switch (type) {
3331    case Primitive::kPrimLong: {
3332      __ cmp(left.AsRegisterPairHigh<Register>(),
3333             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
3334      __ b(&less, LT);
3335      __ b(&greater, GT);
3336      // Do LoadImmediate before the last `cmp`, as LoadImmediate might affect the status flags.
3337      __ LoadImmediate(out, 0);
3338      __ cmp(left.AsRegisterPairLow<Register>(),
3339             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
3340      break;
3341    }
3342    case Primitive::kPrimFloat:
3343    case Primitive::kPrimDouble: {
3344      __ LoadImmediate(out, 0);
3345      if (type == Primitive::kPrimFloat) {
3346        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
3347      } else {
3348        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
3349                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
3350      }
3351      __ vmstat();  // transfer FP status register to ARM APSR.
3352      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
3353      break;
3354    }
3355    default:
3356      LOG(FATAL) << "Unexpected compare type " << type;
3357  }
3358  __ b(&done, EQ);
3359  __ b(&less, LO);  // LO is for both: unsigned compare for longs and 'less than' for floats.
3360
3361  __ Bind(&greater);
3362  __ LoadImmediate(out, 1);
3363  __ b(&done);
3364
3365  __ Bind(&less);
3366  __ LoadImmediate(out, -1);
3367
3368  __ Bind(&done);
3369}
3370
3371void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
3372  LocationSummary* locations =
3373      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3374  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
3375    locations->SetInAt(i, Location::Any());
3376  }
3377  locations->SetOut(Location::Any());
3378}
3379
3380void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
3381  LOG(FATAL) << "Unreachable";
3382}
3383
3384void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
3385  // TODO (ported from quick): revisit Arm barrier kinds
3386  DmbOptions flavor = DmbOptions::ISH;  // quiet c++ warnings
3387  switch (kind) {
3388    case MemBarrierKind::kAnyStore:
3389    case MemBarrierKind::kLoadAny:
3390    case MemBarrierKind::kAnyAny: {
3391      flavor = DmbOptions::ISH;
3392      break;
3393    }
3394    case MemBarrierKind::kStoreStore: {
3395      flavor = DmbOptions::ISHST;
3396      break;
3397    }
3398    default:
3399      LOG(FATAL) << "Unexpected memory barrier " << kind;
3400  }
3401  __ dmb(flavor);
3402}
3403
3404void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
3405                                                         uint32_t offset,
3406                                                         Register out_lo,
3407                                                         Register out_hi) {
3408  if (offset != 0) {
3409    __ LoadImmediate(out_lo, offset);
3410    __ add(IP, addr, ShifterOperand(out_lo));
3411    addr = IP;
3412  }
3413  __ ldrexd(out_lo, out_hi, addr);
3414}
3415
3416void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
3417                                                          uint32_t offset,
3418                                                          Register value_lo,
3419                                                          Register value_hi,
3420                                                          Register temp1,
3421                                                          Register temp2,
3422                                                          HInstruction* instruction) {
3423  Label fail;
3424  if (offset != 0) {
3425    __ LoadImmediate(temp1, offset);
3426    __ add(IP, addr, ShifterOperand(temp1));
3427    addr = IP;
3428  }
3429  __ Bind(&fail);
3430  // We need a load followed by store. (The address used in a STREX instruction must
3431  // be the same as the address in the most recently executed LDREX instruction.)
3432  __ ldrexd(temp1, temp2, addr);
3433  codegen_->MaybeRecordImplicitNullCheck(instruction);
3434  __ strexd(temp1, value_lo, value_hi, addr);
3435  __ CompareAndBranchIfNonZero(temp1, &fail);
3436}
3437
3438void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
3439  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3440
3441  LocationSummary* locations =
3442      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3443  locations->SetInAt(0, Location::RequiresRegister());
3444
3445  Primitive::Type field_type = field_info.GetFieldType();
3446  if (Primitive::IsFloatingPointType(field_type)) {
3447    locations->SetInAt(1, Location::RequiresFpuRegister());
3448  } else {
3449    locations->SetInAt(1, Location::RequiresRegister());
3450  }
3451
3452  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3453  bool generate_volatile = field_info.IsVolatile()
3454      && is_wide
3455      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3456  bool needs_write_barrier =
3457      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
3458  // Temporary registers for the write barrier.
3459  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3460  if (needs_write_barrier) {
3461    locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
3462    locations->AddTemp(Location::RequiresRegister());
3463  } else if (generate_volatile) {
3464    // Arm encoding have some additional constraints for ldrexd/strexd:
3465    // - registers need to be consecutive
3466    // - the first register should be even but not R14.
3467    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3468    // enable Arm encoding.
3469    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3470
3471    locations->AddTemp(Location::RequiresRegister());
3472    locations->AddTemp(Location::RequiresRegister());
3473    if (field_type == Primitive::kPrimDouble) {
3474      // For doubles we need two more registers to copy the value.
3475      locations->AddTemp(Location::RegisterLocation(R2));
3476      locations->AddTemp(Location::RegisterLocation(R3));
3477    }
3478  }
3479}
3480
3481void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3482                                                 const FieldInfo& field_info,
3483                                                 bool value_can_be_null) {
3484  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3485
3486  LocationSummary* locations = instruction->GetLocations();
3487  Register base = locations->InAt(0).AsRegister<Register>();
3488  Location value = locations->InAt(1);
3489
3490  bool is_volatile = field_info.IsVolatile();
3491  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3492  Primitive::Type field_type = field_info.GetFieldType();
3493  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3494  bool needs_write_barrier =
3495      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
3496
3497  if (is_volatile) {
3498    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3499  }
3500
3501  switch (field_type) {
3502    case Primitive::kPrimBoolean:
3503    case Primitive::kPrimByte: {
3504      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3505      break;
3506    }
3507
3508    case Primitive::kPrimShort:
3509    case Primitive::kPrimChar: {
3510      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3511      break;
3512    }
3513
3514    case Primitive::kPrimInt:
3515    case Primitive::kPrimNot: {
3516      if (kPoisonHeapReferences && needs_write_barrier) {
3517        // Note that in the case where `value` is a null reference,
3518        // we do not enter this block, as a null reference does not
3519        // need poisoning.
3520        DCHECK_EQ(field_type, Primitive::kPrimNot);
3521        Register temp = locations->GetTemp(0).AsRegister<Register>();
3522        __ Mov(temp, value.AsRegister<Register>());
3523        __ PoisonHeapReference(temp);
3524        __ StoreToOffset(kStoreWord, temp, base, offset);
3525      } else {
3526        __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3527      }
3528      break;
3529    }
3530
3531    case Primitive::kPrimLong: {
3532      if (is_volatile && !atomic_ldrd_strd) {
3533        GenerateWideAtomicStore(base, offset,
3534                                value.AsRegisterPairLow<Register>(),
3535                                value.AsRegisterPairHigh<Register>(),
3536                                locations->GetTemp(0).AsRegister<Register>(),
3537                                locations->GetTemp(1).AsRegister<Register>(),
3538                                instruction);
3539      } else {
3540        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3541        codegen_->MaybeRecordImplicitNullCheck(instruction);
3542      }
3543      break;
3544    }
3545
3546    case Primitive::kPrimFloat: {
3547      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3548      break;
3549    }
3550
3551    case Primitive::kPrimDouble: {
3552      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3553      if (is_volatile && !atomic_ldrd_strd) {
3554        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3555        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3556
3557        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3558
3559        GenerateWideAtomicStore(base, offset,
3560                                value_reg_lo,
3561                                value_reg_hi,
3562                                locations->GetTemp(2).AsRegister<Register>(),
3563                                locations->GetTemp(3).AsRegister<Register>(),
3564                                instruction);
3565      } else {
3566        __ StoreDToOffset(value_reg, base, offset);
3567        codegen_->MaybeRecordImplicitNullCheck(instruction);
3568      }
3569      break;
3570    }
3571
3572    case Primitive::kPrimVoid:
3573      LOG(FATAL) << "Unreachable type " << field_type;
3574      UNREACHABLE();
3575  }
3576
3577  // Longs and doubles are handled in the switch.
3578  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3579    codegen_->MaybeRecordImplicitNullCheck(instruction);
3580  }
3581
3582  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3583    Register temp = locations->GetTemp(0).AsRegister<Register>();
3584    Register card = locations->GetTemp(1).AsRegister<Register>();
3585    codegen_->MarkGCCard(
3586        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3587  }
3588
3589  if (is_volatile) {
3590    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3591  }
3592}
3593
3594void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3595  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3596  LocationSummary* locations =
3597      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3598  locations->SetInAt(0, Location::RequiresRegister());
3599
3600  bool volatile_for_double = field_info.IsVolatile()
3601      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3602      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3603  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3604
3605  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3606    locations->SetOut(Location::RequiresFpuRegister());
3607  } else {
3608    locations->SetOut(Location::RequiresRegister(),
3609                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3610  }
3611  if (volatile_for_double) {
3612    // Arm encoding have some additional constraints for ldrexd/strexd:
3613    // - registers need to be consecutive
3614    // - the first register should be even but not R14.
3615    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3616    // enable Arm encoding.
3617    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3618    locations->AddTemp(Location::RequiresRegister());
3619    locations->AddTemp(Location::RequiresRegister());
3620  }
3621}
3622
3623Location LocationsBuilderARM::ArmEncodableConstantOrRegister(HInstruction* constant,
3624                                                             Opcode opcode) {
3625  DCHECK(!Primitive::IsFloatingPointType(constant->GetType()));
3626  if (constant->IsConstant() &&
3627      CanEncodeConstantAsImmediate(constant->AsConstant(), opcode)) {
3628    return Location::ConstantLocation(constant->AsConstant());
3629  }
3630  return Location::RequiresRegister();
3631}
3632
3633bool LocationsBuilderARM::CanEncodeConstantAsImmediate(HConstant* input_cst,
3634                                                       Opcode opcode) {
3635  uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst));
3636  if (Primitive::Is64BitType(input_cst->GetType())) {
3637    return CanEncodeConstantAsImmediate(Low32Bits(value), opcode) &&
3638        CanEncodeConstantAsImmediate(High32Bits(value), opcode);
3639  } else {
3640    return CanEncodeConstantAsImmediate(Low32Bits(value), opcode);
3641  }
3642}
3643
3644bool LocationsBuilderARM::CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode) {
3645  ShifterOperand so;
3646  ArmAssembler* assembler = codegen_->GetAssembler();
3647  if (assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, opcode, value, &so)) {
3648    return true;
3649  }
3650  Opcode neg_opcode = kNoOperand;
3651  switch (opcode) {
3652    case AND:
3653      neg_opcode = BIC;
3654      break;
3655    case ORR:
3656      neg_opcode = ORN;
3657      break;
3658    default:
3659      return false;
3660  }
3661  return assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, neg_opcode, ~value, &so);
3662}
3663
3664void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3665                                                 const FieldInfo& field_info) {
3666  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3667
3668  LocationSummary* locations = instruction->GetLocations();
3669  Register base = locations->InAt(0).AsRegister<Register>();
3670  Location out = locations->Out();
3671  bool is_volatile = field_info.IsVolatile();
3672  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3673  Primitive::Type field_type = field_info.GetFieldType();
3674  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3675
3676  switch (field_type) {
3677    case Primitive::kPrimBoolean: {
3678      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3679      break;
3680    }
3681
3682    case Primitive::kPrimByte: {
3683      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3684      break;
3685    }
3686
3687    case Primitive::kPrimShort: {
3688      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3689      break;
3690    }
3691
3692    case Primitive::kPrimChar: {
3693      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3694      break;
3695    }
3696
3697    case Primitive::kPrimInt:
3698    case Primitive::kPrimNot: {
3699      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3700      break;
3701    }
3702
3703    case Primitive::kPrimLong: {
3704      if (is_volatile && !atomic_ldrd_strd) {
3705        GenerateWideAtomicLoad(base, offset,
3706                               out.AsRegisterPairLow<Register>(),
3707                               out.AsRegisterPairHigh<Register>());
3708      } else {
3709        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3710      }
3711      break;
3712    }
3713
3714    case Primitive::kPrimFloat: {
3715      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3716      break;
3717    }
3718
3719    case Primitive::kPrimDouble: {
3720      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3721      if (is_volatile && !atomic_ldrd_strd) {
3722        Register lo = locations->GetTemp(0).AsRegister<Register>();
3723        Register hi = locations->GetTemp(1).AsRegister<Register>();
3724        GenerateWideAtomicLoad(base, offset, lo, hi);
3725        codegen_->MaybeRecordImplicitNullCheck(instruction);
3726        __ vmovdrr(out_reg, lo, hi);
3727      } else {
3728        __ LoadDFromOffset(out_reg, base, offset);
3729        codegen_->MaybeRecordImplicitNullCheck(instruction);
3730      }
3731      break;
3732    }
3733
3734    case Primitive::kPrimVoid:
3735      LOG(FATAL) << "Unreachable type " << field_type;
3736      UNREACHABLE();
3737  }
3738
3739  // Doubles are handled in the switch.
3740  if (field_type != Primitive::kPrimDouble) {
3741    codegen_->MaybeRecordImplicitNullCheck(instruction);
3742  }
3743
3744  if (is_volatile) {
3745    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3746  }
3747
3748  if (field_type == Primitive::kPrimNot) {
3749    __ MaybeUnpoisonHeapReference(out.AsRegister<Register>());
3750  }
3751}
3752
3753void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3754  HandleFieldSet(instruction, instruction->GetFieldInfo());
3755}
3756
3757void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3758  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3759}
3760
3761void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3762  HandleFieldGet(instruction, instruction->GetFieldInfo());
3763}
3764
3765void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3766  HandleFieldGet(instruction, instruction->GetFieldInfo());
3767}
3768
3769void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3770  HandleFieldGet(instruction, instruction->GetFieldInfo());
3771}
3772
3773void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3774  HandleFieldGet(instruction, instruction->GetFieldInfo());
3775}
3776
3777void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3778  HandleFieldSet(instruction, instruction->GetFieldInfo());
3779}
3780
3781void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3782  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3783}
3784
3785void LocationsBuilderARM::VisitUnresolvedInstanceFieldGet(
3786    HUnresolvedInstanceFieldGet* instruction) {
3787  FieldAccessCallingConventionARM calling_convention;
3788  codegen_->CreateUnresolvedFieldLocationSummary(
3789      instruction, instruction->GetFieldType(), calling_convention);
3790}
3791
3792void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldGet(
3793    HUnresolvedInstanceFieldGet* instruction) {
3794  FieldAccessCallingConventionARM calling_convention;
3795  codegen_->GenerateUnresolvedFieldAccess(instruction,
3796                                          instruction->GetFieldType(),
3797                                          instruction->GetFieldIndex(),
3798                                          instruction->GetDexPc(),
3799                                          calling_convention);
3800}
3801
3802void LocationsBuilderARM::VisitUnresolvedInstanceFieldSet(
3803    HUnresolvedInstanceFieldSet* instruction) {
3804  FieldAccessCallingConventionARM calling_convention;
3805  codegen_->CreateUnresolvedFieldLocationSummary(
3806      instruction, instruction->GetFieldType(), calling_convention);
3807}
3808
3809void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldSet(
3810    HUnresolvedInstanceFieldSet* instruction) {
3811  FieldAccessCallingConventionARM calling_convention;
3812  codegen_->GenerateUnresolvedFieldAccess(instruction,
3813                                          instruction->GetFieldType(),
3814                                          instruction->GetFieldIndex(),
3815                                          instruction->GetDexPc(),
3816                                          calling_convention);
3817}
3818
3819void LocationsBuilderARM::VisitUnresolvedStaticFieldGet(
3820    HUnresolvedStaticFieldGet* instruction) {
3821  FieldAccessCallingConventionARM calling_convention;
3822  codegen_->CreateUnresolvedFieldLocationSummary(
3823      instruction, instruction->GetFieldType(), calling_convention);
3824}
3825
3826void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldGet(
3827    HUnresolvedStaticFieldGet* instruction) {
3828  FieldAccessCallingConventionARM calling_convention;
3829  codegen_->GenerateUnresolvedFieldAccess(instruction,
3830                                          instruction->GetFieldType(),
3831                                          instruction->GetFieldIndex(),
3832                                          instruction->GetDexPc(),
3833                                          calling_convention);
3834}
3835
3836void LocationsBuilderARM::VisitUnresolvedStaticFieldSet(
3837    HUnresolvedStaticFieldSet* instruction) {
3838  FieldAccessCallingConventionARM calling_convention;
3839  codegen_->CreateUnresolvedFieldLocationSummary(
3840      instruction, instruction->GetFieldType(), calling_convention);
3841}
3842
3843void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet(
3844    HUnresolvedStaticFieldSet* instruction) {
3845  FieldAccessCallingConventionARM calling_convention;
3846  codegen_->GenerateUnresolvedFieldAccess(instruction,
3847                                          instruction->GetFieldType(),
3848                                          instruction->GetFieldIndex(),
3849                                          instruction->GetDexPc(),
3850                                          calling_convention);
3851}
3852
3853void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3854  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
3855      ? LocationSummary::kCallOnSlowPath
3856      : LocationSummary::kNoCall;
3857  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3858  locations->SetInAt(0, Location::RequiresRegister());
3859  if (instruction->HasUses()) {
3860    locations->SetOut(Location::SameAsFirstInput());
3861  }
3862}
3863
3864void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3865  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3866    return;
3867  }
3868  Location obj = instruction->GetLocations()->InAt(0);
3869
3870  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3871  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3872}
3873
3874void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3875  SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3876  codegen_->AddSlowPath(slow_path);
3877
3878  LocationSummary* locations = instruction->GetLocations();
3879  Location obj = locations->InAt(0);
3880
3881  __ CompareAndBranchIfZero(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
3882}
3883
3884void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3885  if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
3886    GenerateImplicitNullCheck(instruction);
3887  } else {
3888    GenerateExplicitNullCheck(instruction);
3889  }
3890}
3891
3892void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3893  LocationSummary* locations =
3894      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3895  locations->SetInAt(0, Location::RequiresRegister());
3896  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3897  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3898    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3899  } else {
3900    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3901  }
3902}
3903
3904void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3905  LocationSummary* locations = instruction->GetLocations();
3906  Register obj = locations->InAt(0).AsRegister<Register>();
3907  Location index = locations->InAt(1);
3908  Primitive::Type type = instruction->GetType();
3909
3910  switch (type) {
3911    case Primitive::kPrimBoolean: {
3912      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3913      Register out = locations->Out().AsRegister<Register>();
3914      if (index.IsConstant()) {
3915        size_t offset =
3916            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3917        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3918      } else {
3919        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3920        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3921      }
3922      break;
3923    }
3924
3925    case Primitive::kPrimByte: {
3926      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3927      Register out = locations->Out().AsRegister<Register>();
3928      if (index.IsConstant()) {
3929        size_t offset =
3930            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3931        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3932      } else {
3933        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3934        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3935      }
3936      break;
3937    }
3938
3939    case Primitive::kPrimShort: {
3940      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3941      Register out = locations->Out().AsRegister<Register>();
3942      if (index.IsConstant()) {
3943        size_t offset =
3944            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3945        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3946      } else {
3947        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3948        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3949      }
3950      break;
3951    }
3952
3953    case Primitive::kPrimChar: {
3954      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3955      Register out = locations->Out().AsRegister<Register>();
3956      if (index.IsConstant()) {
3957        size_t offset =
3958            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3959        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3960      } else {
3961        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3962        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3963      }
3964      break;
3965    }
3966
3967    case Primitive::kPrimInt:
3968    case Primitive::kPrimNot: {
3969      static_assert(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
3970                    "art::mirror::HeapReference<mirror::Object> and int32_t have different sizes.");
3971      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3972      Register out = locations->Out().AsRegister<Register>();
3973      if (index.IsConstant()) {
3974        size_t offset =
3975            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3976        __ LoadFromOffset(kLoadWord, out, obj, offset);
3977      } else {
3978        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3979        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3980      }
3981      break;
3982    }
3983
3984    case Primitive::kPrimLong: {
3985      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3986      Location out = locations->Out();
3987      if (index.IsConstant()) {
3988        size_t offset =
3989            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3990        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3991      } else {
3992        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3993        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3994      }
3995      break;
3996    }
3997
3998    case Primitive::kPrimFloat: {
3999      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
4000      Location out = locations->Out();
4001      DCHECK(out.IsFpuRegister());
4002      if (index.IsConstant()) {
4003        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4004        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
4005      } else {
4006        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4007        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
4008      }
4009      break;
4010    }
4011
4012    case Primitive::kPrimDouble: {
4013      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
4014      Location out = locations->Out();
4015      DCHECK(out.IsFpuRegisterPair());
4016      if (index.IsConstant()) {
4017        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
4018        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
4019      } else {
4020        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
4021        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
4022      }
4023      break;
4024    }
4025
4026    case Primitive::kPrimVoid:
4027      LOG(FATAL) << "Unreachable type " << type;
4028      UNREACHABLE();
4029  }
4030  codegen_->MaybeRecordImplicitNullCheck(instruction);
4031
4032  if (type == Primitive::kPrimNot) {
4033    Register out = locations->Out().AsRegister<Register>();
4034    __ MaybeUnpoisonHeapReference(out);
4035  }
4036}
4037
4038void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
4039  Primitive::Type value_type = instruction->GetComponentType();
4040
4041  bool needs_write_barrier =
4042      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
4043  bool may_need_runtime_call = instruction->NeedsTypeCheck();
4044
4045  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4046      instruction,
4047      may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
4048  locations->SetInAt(0, Location::RequiresRegister());
4049  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
4050  if (Primitive::IsFloatingPointType(value_type)) {
4051    locations->SetInAt(2, Location::RequiresFpuRegister());
4052  } else {
4053    locations->SetInAt(2, Location::RequiresRegister());
4054  }
4055
4056  if (needs_write_barrier) {
4057    // Temporary registers for the write barrier.
4058    locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
4059    locations->AddTemp(Location::RequiresRegister());
4060  }
4061}
4062
4063void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
4064  LocationSummary* locations = instruction->GetLocations();
4065  Register array = locations->InAt(0).AsRegister<Register>();
4066  Location index = locations->InAt(1);
4067  Primitive::Type value_type = instruction->GetComponentType();
4068  bool may_need_runtime_call = locations->CanCall();
4069  bool needs_write_barrier =
4070      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
4071
4072  switch (value_type) {
4073    case Primitive::kPrimBoolean:
4074    case Primitive::kPrimByte: {
4075      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
4076      Register value = locations->InAt(2).AsRegister<Register>();
4077      if (index.IsConstant()) {
4078        size_t offset =
4079            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
4080        __ StoreToOffset(kStoreByte, value, array, offset);
4081      } else {
4082        __ add(IP, array, ShifterOperand(index.AsRegister<Register>()));
4083        __ StoreToOffset(kStoreByte, value, IP, data_offset);
4084      }
4085      break;
4086    }
4087
4088    case Primitive::kPrimShort:
4089    case Primitive::kPrimChar: {
4090      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
4091      Register value = locations->InAt(2).AsRegister<Register>();
4092      if (index.IsConstant()) {
4093        size_t offset =
4094            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
4095        __ StoreToOffset(kStoreHalfword, value, array, offset);
4096      } else {
4097        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
4098        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
4099      }
4100      break;
4101    }
4102
4103    case Primitive::kPrimNot: {
4104      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
4105      Register value = locations->InAt(2).AsRegister<Register>();
4106      Register source = value;
4107
4108      if (instruction->InputAt(2)->IsNullConstant()) {
4109        // Just setting null.
4110        if (index.IsConstant()) {
4111          size_t offset =
4112              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4113          __ StoreToOffset(kStoreWord, source, array, offset);
4114        } else {
4115          DCHECK(index.IsRegister()) << index;
4116          __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4117          __ StoreToOffset(kStoreWord, source, IP, data_offset);
4118        }
4119        break;
4120      }
4121
4122      DCHECK(needs_write_barrier);
4123      Register temp1 = locations->GetTemp(0).AsRegister<Register>();
4124      Register temp2 = locations->GetTemp(1).AsRegister<Register>();
4125      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4126      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
4127      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
4128      Label done;
4129      SlowPathCode* slow_path = nullptr;
4130
4131      if (may_need_runtime_call) {
4132        slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM(instruction);
4133        codegen_->AddSlowPath(slow_path);
4134        if (instruction->GetValueCanBeNull()) {
4135          Label non_zero;
4136          __ CompareAndBranchIfNonZero(value, &non_zero);
4137          if (index.IsConstant()) {
4138            size_t offset =
4139               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4140            __ StoreToOffset(kStoreWord, value, array, offset);
4141          } else {
4142            DCHECK(index.IsRegister()) << index;
4143            __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4144            __ StoreToOffset(kStoreWord, value, IP, data_offset);
4145          }
4146          codegen_->MaybeRecordImplicitNullCheck(instruction);
4147          __ b(&done);
4148          __ Bind(&non_zero);
4149        }
4150
4151        __ LoadFromOffset(kLoadWord, temp1, array, class_offset);
4152        codegen_->MaybeRecordImplicitNullCheck(instruction);
4153        __ MaybeUnpoisonHeapReference(temp1);
4154        __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
4155        __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
4156        // No need to poison/unpoison, we're comparing two poisoined references.
4157        __ cmp(temp1, ShifterOperand(temp2));
4158        if (instruction->StaticTypeOfArrayIsObjectArray()) {
4159          Label do_put;
4160          __ b(&do_put, EQ);
4161          __ MaybeUnpoisonHeapReference(temp1);
4162          __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
4163          // No need to poison/unpoison, we're comparing against null.
4164          __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
4165          __ Bind(&do_put);
4166        } else {
4167          __ b(slow_path->GetEntryLabel(), NE);
4168        }
4169      }
4170
4171      if (kPoisonHeapReferences) {
4172        // Note that in the case where `value` is a null reference,
4173        // we do not enter this block, as a null reference does not
4174        // need poisoning.
4175        DCHECK_EQ(value_type, Primitive::kPrimNot);
4176        __ Mov(temp1, value);
4177        __ PoisonHeapReference(temp1);
4178        source = temp1;
4179      }
4180
4181      if (index.IsConstant()) {
4182        size_t offset =
4183            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4184        __ StoreToOffset(kStoreWord, source, array, offset);
4185      } else {
4186        DCHECK(index.IsRegister()) << index;
4187        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4188        __ StoreToOffset(kStoreWord, source, IP, data_offset);
4189      }
4190
4191      if (!may_need_runtime_call) {
4192        codegen_->MaybeRecordImplicitNullCheck(instruction);
4193      }
4194
4195      codegen_->MarkGCCard(temp1, temp2, array, value, instruction->GetValueCanBeNull());
4196
4197      if (done.IsLinked()) {
4198        __ Bind(&done);
4199      }
4200
4201      if (slow_path != nullptr) {
4202        __ Bind(slow_path->GetExitLabel());
4203      }
4204
4205      break;
4206    }
4207
4208    case Primitive::kPrimInt: {
4209      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
4210      Register value = locations->InAt(2).AsRegister<Register>();
4211      if (index.IsConstant()) {
4212        size_t offset =
4213            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4214        __ StoreToOffset(kStoreWord, value, array, offset);
4215      } else {
4216        DCHECK(index.IsRegister()) << index;
4217        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4218        __ StoreToOffset(kStoreWord, value, IP, data_offset);
4219      }
4220
4221      codegen_->MaybeRecordImplicitNullCheck(instruction);
4222      break;
4223    }
4224
4225    case Primitive::kPrimLong: {
4226      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
4227      Location value = locations->InAt(2);
4228      if (index.IsConstant()) {
4229        size_t offset =
4230            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
4231        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), array, offset);
4232      } else {
4233        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
4234        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
4235      }
4236      break;
4237    }
4238
4239    case Primitive::kPrimFloat: {
4240      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
4241      Location value = locations->InAt(2);
4242      DCHECK(value.IsFpuRegister());
4243      if (index.IsConstant()) {
4244        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
4245        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), array, offset);
4246      } else {
4247        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
4248        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
4249      }
4250      break;
4251    }
4252
4253    case Primitive::kPrimDouble: {
4254      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
4255      Location value = locations->InAt(2);
4256      DCHECK(value.IsFpuRegisterPair());
4257      if (index.IsConstant()) {
4258        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
4259        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), array, offset);
4260      } else {
4261        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
4262        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
4263      }
4264
4265      break;
4266    }
4267
4268    case Primitive::kPrimVoid:
4269      LOG(FATAL) << "Unreachable type " << value_type;
4270      UNREACHABLE();
4271  }
4272
4273  // Ints and objects are handled in the switch.
4274  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
4275    codegen_->MaybeRecordImplicitNullCheck(instruction);
4276  }
4277}
4278
4279void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
4280  LocationSummary* locations =
4281      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4282  locations->SetInAt(0, Location::RequiresRegister());
4283  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4284}
4285
4286void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
4287  LocationSummary* locations = instruction->GetLocations();
4288  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
4289  Register obj = locations->InAt(0).AsRegister<Register>();
4290  Register out = locations->Out().AsRegister<Register>();
4291  __ LoadFromOffset(kLoadWord, out, obj, offset);
4292  codegen_->MaybeRecordImplicitNullCheck(instruction);
4293}
4294
4295void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
4296  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
4297      ? LocationSummary::kCallOnSlowPath
4298      : LocationSummary::kNoCall;
4299  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4300  locations->SetInAt(0, Location::RequiresRegister());
4301  locations->SetInAt(1, Location::RequiresRegister());
4302  if (instruction->HasUses()) {
4303    locations->SetOut(Location::SameAsFirstInput());
4304  }
4305}
4306
4307void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
4308  LocationSummary* locations = instruction->GetLocations();
4309  SlowPathCode* slow_path =
4310      new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
4311  codegen_->AddSlowPath(slow_path);
4312
4313  Register index = locations->InAt(0).AsRegister<Register>();
4314  Register length = locations->InAt(1).AsRegister<Register>();
4315
4316  __ cmp(index, ShifterOperand(length));
4317  __ b(slow_path->GetEntryLabel(), HS);
4318}
4319
4320void CodeGeneratorARM::MarkGCCard(Register temp,
4321                                  Register card,
4322                                  Register object,
4323                                  Register value,
4324                                  bool can_be_null) {
4325  Label is_null;
4326  if (can_be_null) {
4327    __ CompareAndBranchIfZero(value, &is_null);
4328  }
4329  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
4330  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
4331  __ strb(card, Address(card, temp));
4332  if (can_be_null) {
4333    __ Bind(&is_null);
4334  }
4335}
4336
4337void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
4338  temp->SetLocations(nullptr);
4339}
4340
4341void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
4342  // Nothing to do, this is driven by the code generator.
4343}
4344
4345void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
4346  LOG(FATAL) << "Unreachable";
4347}
4348
4349void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
4350  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
4351}
4352
4353void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
4354  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
4355}
4356
4357void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
4358  HBasicBlock* block = instruction->GetBlock();
4359  if (block->GetLoopInformation() != nullptr) {
4360    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
4361    // The back edge will generate the suspend check.
4362    return;
4363  }
4364  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
4365    // The goto will generate the suspend check.
4366    return;
4367  }
4368  GenerateSuspendCheck(instruction, nullptr);
4369}
4370
4371void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
4372                                                       HBasicBlock* successor) {
4373  SuspendCheckSlowPathARM* slow_path =
4374      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
4375  if (slow_path == nullptr) {
4376    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
4377    instruction->SetSlowPath(slow_path);
4378    codegen_->AddSlowPath(slow_path);
4379    if (successor != nullptr) {
4380      DCHECK(successor->IsLoopHeader());
4381      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
4382    }
4383  } else {
4384    DCHECK_EQ(slow_path->GetSuccessor(), successor);
4385  }
4386
4387  __ LoadFromOffset(
4388      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
4389  if (successor == nullptr) {
4390    __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
4391    __ Bind(slow_path->GetReturnLabel());
4392  } else {
4393    __ CompareAndBranchIfZero(IP, codegen_->GetLabelOf(successor));
4394    __ b(slow_path->GetEntryLabel());
4395  }
4396}
4397
4398ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
4399  return codegen_->GetAssembler();
4400}
4401
4402void ParallelMoveResolverARM::EmitMove(size_t index) {
4403  MoveOperands* move = moves_[index];
4404  Location source = move->GetSource();
4405  Location destination = move->GetDestination();
4406
4407  if (source.IsRegister()) {
4408    if (destination.IsRegister()) {
4409      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
4410    } else {
4411      DCHECK(destination.IsStackSlot());
4412      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
4413                       SP, destination.GetStackIndex());
4414    }
4415  } else if (source.IsStackSlot()) {
4416    if (destination.IsRegister()) {
4417      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
4418                        SP, source.GetStackIndex());
4419    } else if (destination.IsFpuRegister()) {
4420      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
4421    } else {
4422      DCHECK(destination.IsStackSlot());
4423      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
4424      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4425    }
4426  } else if (source.IsFpuRegister()) {
4427    if (destination.IsFpuRegister()) {
4428      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
4429    } else {
4430      DCHECK(destination.IsStackSlot());
4431      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
4432    }
4433  } else if (source.IsDoubleStackSlot()) {
4434    if (destination.IsDoubleStackSlot()) {
4435      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
4436      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
4437    } else if (destination.IsRegisterPair()) {
4438      DCHECK(ExpectedPairLayout(destination));
4439      __ LoadFromOffset(
4440          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
4441    } else {
4442      DCHECK(destination.IsFpuRegisterPair()) << destination;
4443      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
4444                         SP,
4445                         source.GetStackIndex());
4446    }
4447  } else if (source.IsRegisterPair()) {
4448    if (destination.IsRegisterPair()) {
4449      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
4450      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
4451    } else {
4452      DCHECK(destination.IsDoubleStackSlot()) << destination;
4453      DCHECK(ExpectedPairLayout(source));
4454      __ StoreToOffset(
4455          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
4456    }
4457  } else if (source.IsFpuRegisterPair()) {
4458    if (destination.IsFpuRegisterPair()) {
4459      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
4460               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
4461    } else {
4462      DCHECK(destination.IsDoubleStackSlot()) << destination;
4463      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
4464                        SP,
4465                        destination.GetStackIndex());
4466    }
4467  } else {
4468    DCHECK(source.IsConstant()) << source;
4469    HConstant* constant = source.GetConstant();
4470    if (constant->IsIntConstant() || constant->IsNullConstant()) {
4471      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
4472      if (destination.IsRegister()) {
4473        __ LoadImmediate(destination.AsRegister<Register>(), value);
4474      } else {
4475        DCHECK(destination.IsStackSlot());
4476        __ LoadImmediate(IP, value);
4477        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4478      }
4479    } else if (constant->IsLongConstant()) {
4480      int64_t value = constant->AsLongConstant()->GetValue();
4481      if (destination.IsRegisterPair()) {
4482        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
4483        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
4484      } else {
4485        DCHECK(destination.IsDoubleStackSlot()) << destination;
4486        __ LoadImmediate(IP, Low32Bits(value));
4487        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4488        __ LoadImmediate(IP, High32Bits(value));
4489        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
4490      }
4491    } else if (constant->IsDoubleConstant()) {
4492      double value = constant->AsDoubleConstant()->GetValue();
4493      if (destination.IsFpuRegisterPair()) {
4494        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
4495      } else {
4496        DCHECK(destination.IsDoubleStackSlot()) << destination;
4497        uint64_t int_value = bit_cast<uint64_t, double>(value);
4498        __ LoadImmediate(IP, Low32Bits(int_value));
4499        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4500        __ LoadImmediate(IP, High32Bits(int_value));
4501        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
4502      }
4503    } else {
4504      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
4505      float value = constant->AsFloatConstant()->GetValue();
4506      if (destination.IsFpuRegister()) {
4507        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
4508      } else {
4509        DCHECK(destination.IsStackSlot());
4510        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
4511        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4512      }
4513    }
4514  }
4515}
4516
4517void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
4518  __ Mov(IP, reg);
4519  __ LoadFromOffset(kLoadWord, reg, SP, mem);
4520  __ StoreToOffset(kStoreWord, IP, SP, mem);
4521}
4522
4523void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
4524  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
4525  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
4526  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
4527                    SP, mem1 + stack_offset);
4528  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
4529  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
4530                   SP, mem2 + stack_offset);
4531  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
4532}
4533
4534void ParallelMoveResolverARM::EmitSwap(size_t index) {
4535  MoveOperands* move = moves_[index];
4536  Location source = move->GetSource();
4537  Location destination = move->GetDestination();
4538
4539  if (source.IsRegister() && destination.IsRegister()) {
4540    DCHECK_NE(source.AsRegister<Register>(), IP);
4541    DCHECK_NE(destination.AsRegister<Register>(), IP);
4542    __ Mov(IP, source.AsRegister<Register>());
4543    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
4544    __ Mov(destination.AsRegister<Register>(), IP);
4545  } else if (source.IsRegister() && destination.IsStackSlot()) {
4546    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
4547  } else if (source.IsStackSlot() && destination.IsRegister()) {
4548    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
4549  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
4550    Exchange(source.GetStackIndex(), destination.GetStackIndex());
4551  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
4552    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
4553    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
4554    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
4555  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
4556    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
4557    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
4558    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
4559    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
4560               destination.AsRegisterPairHigh<Register>(),
4561               DTMP);
4562  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
4563    Register low_reg = source.IsRegisterPair()
4564        ? source.AsRegisterPairLow<Register>()
4565        : destination.AsRegisterPairLow<Register>();
4566    int mem = source.IsRegisterPair()
4567        ? destination.GetStackIndex()
4568        : source.GetStackIndex();
4569    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
4570    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
4571    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
4572    __ StoreDToOffset(DTMP, SP, mem);
4573  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
4574    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
4575    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
4576    __ vmovd(DTMP, first);
4577    __ vmovd(first, second);
4578    __ vmovd(second, DTMP);
4579  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
4580    DRegister reg = source.IsFpuRegisterPair()
4581        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
4582        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
4583    int mem = source.IsFpuRegisterPair()
4584        ? destination.GetStackIndex()
4585        : source.GetStackIndex();
4586    __ vmovd(DTMP, reg);
4587    __ LoadDFromOffset(reg, SP, mem);
4588    __ StoreDToOffset(DTMP, SP, mem);
4589  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
4590    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
4591                                           : destination.AsFpuRegister<SRegister>();
4592    int mem = source.IsFpuRegister()
4593        ? destination.GetStackIndex()
4594        : source.GetStackIndex();
4595
4596    __ vmovrs(IP, reg);
4597    __ LoadSFromOffset(reg, SP, mem);
4598    __ StoreToOffset(kStoreWord, IP, SP, mem);
4599  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
4600    Exchange(source.GetStackIndex(), destination.GetStackIndex());
4601    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
4602  } else {
4603    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
4604  }
4605}
4606
4607void ParallelMoveResolverARM::SpillScratch(int reg) {
4608  __ Push(static_cast<Register>(reg));
4609}
4610
4611void ParallelMoveResolverARM::RestoreScratch(int reg) {
4612  __ Pop(static_cast<Register>(reg));
4613}
4614
4615void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
4616  InvokeRuntimeCallingConvention calling_convention;
4617  CodeGenerator::CreateLoadClassLocationSummary(
4618      cls,
4619      Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
4620      Location::RegisterLocation(R0));
4621}
4622
4623void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
4624  LocationSummary* locations = cls->GetLocations();
4625  if (cls->NeedsAccessCheck()) {
4626    codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
4627    codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
4628                            cls,
4629                            cls->GetDexPc(),
4630                            nullptr);
4631    return;
4632  }
4633
4634  Register out = locations->Out().AsRegister<Register>();
4635  Register current_method = locations->InAt(0).AsRegister<Register>();
4636  if (cls->IsReferrersClass()) {
4637    DCHECK(!cls->CanCallRuntime());
4638    DCHECK(!cls->MustGenerateClinitCheck());
4639    __ LoadFromOffset(
4640        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4641  } else {
4642    DCHECK(cls->CanCallRuntime());
4643    __ LoadFromOffset(kLoadWord,
4644                      out,
4645                      current_method,
4646                      ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
4647    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
4648    // TODO: We will need a read barrier here.
4649
4650    SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4651        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
4652    codegen_->AddSlowPath(slow_path);
4653    __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4654    if (cls->MustGenerateClinitCheck()) {
4655      GenerateClassInitializationCheck(slow_path, out);
4656    } else {
4657      __ Bind(slow_path->GetExitLabel());
4658    }
4659  }
4660}
4661
4662void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
4663  LocationSummary* locations =
4664      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
4665  locations->SetInAt(0, Location::RequiresRegister());
4666  if (check->HasUses()) {
4667    locations->SetOut(Location::SameAsFirstInput());
4668  }
4669}
4670
4671void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
4672  // We assume the class is not null.
4673  SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4674      check->GetLoadClass(), check, check->GetDexPc(), true);
4675  codegen_->AddSlowPath(slow_path);
4676  GenerateClassInitializationCheck(slow_path,
4677                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4678}
4679
4680void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4681    SlowPathCode* slow_path, Register class_reg) {
4682  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4683  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4684  __ b(slow_path->GetEntryLabel(), LT);
4685  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4686  // properly. Therefore, we do a memory fence.
4687  __ dmb(ISH);
4688  __ Bind(slow_path->GetExitLabel());
4689}
4690
4691void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4692  LocationSummary* locations =
4693      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4694  locations->SetInAt(0, Location::RequiresRegister());
4695  locations->SetOut(Location::RequiresRegister());
4696}
4697
4698void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4699  SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4700  codegen_->AddSlowPath(slow_path);
4701
4702  LocationSummary* locations = load->GetLocations();
4703  Register out = locations->Out().AsRegister<Register>();
4704  Register current_method = locations->InAt(0).AsRegister<Register>();
4705  __ LoadFromOffset(
4706      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4707  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4708  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4709  // TODO: We will need a read barrier here.
4710  __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4711  __ Bind(slow_path->GetExitLabel());
4712}
4713
4714static int32_t GetExceptionTlsOffset() {
4715  return Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4716}
4717
4718void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4719  LocationSummary* locations =
4720      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4721  locations->SetOut(Location::RequiresRegister());
4722}
4723
4724void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4725  Register out = load->GetLocations()->Out().AsRegister<Register>();
4726  __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
4727}
4728
4729void LocationsBuilderARM::VisitClearException(HClearException* clear) {
4730  new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
4731}
4732
4733void InstructionCodeGeneratorARM::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
4734  __ LoadImmediate(IP, 0);
4735  __ StoreToOffset(kStoreWord, IP, TR, GetExceptionTlsOffset());
4736}
4737
4738void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4739  LocationSummary* locations =
4740      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4741  InvokeRuntimeCallingConvention calling_convention;
4742  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4743}
4744
4745void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4746  codegen_->InvokeRuntime(
4747      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4748}
4749
4750void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4751  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
4752  switch (instruction->GetTypeCheckKind()) {
4753    case TypeCheckKind::kExactCheck:
4754    case TypeCheckKind::kAbstractClassCheck:
4755    case TypeCheckKind::kClassHierarchyCheck:
4756    case TypeCheckKind::kArrayObjectCheck:
4757      call_kind = LocationSummary::kNoCall;
4758      break;
4759    case TypeCheckKind::kUnresolvedCheck:
4760    case TypeCheckKind::kInterfaceCheck:
4761      call_kind = LocationSummary::kCall;
4762      break;
4763    case TypeCheckKind::kArrayCheck:
4764      call_kind = LocationSummary::kCallOnSlowPath;
4765      break;
4766  }
4767  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4768  if (call_kind != LocationSummary::kCall) {
4769    locations->SetInAt(0, Location::RequiresRegister());
4770    locations->SetInAt(1, Location::RequiresRegister());
4771    // The out register is used as a temporary, so it overlaps with the inputs.
4772    // Note that TypeCheckSlowPathARM uses this register too.
4773    locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4774  } else {
4775    InvokeRuntimeCallingConvention calling_convention;
4776    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4777    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
4778    locations->SetOut(Location::RegisterLocation(R0));
4779  }
4780}
4781
4782void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4783  LocationSummary* locations = instruction->GetLocations();
4784  Register obj = locations->InAt(0).AsRegister<Register>();
4785  Register cls = locations->InAt(1).AsRegister<Register>();
4786  Register out = locations->Out().AsRegister<Register>();
4787  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4788  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
4789  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
4790  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
4791  Label done, zero;
4792  SlowPathCode* slow_path = nullptr;
4793
4794  // Return 0 if `obj` is null.
4795  // avoid null check if we know obj is not null.
4796  if (instruction->MustDoNullCheck()) {
4797    __ CompareAndBranchIfZero(obj, &zero);
4798  }
4799
4800  // In case of an interface/unresolved check, we put the object class into the object register.
4801  // This is safe, as the register is caller-save, and the object must be in another
4802  // register if it survives the runtime call.
4803  Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
4804      (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
4805      ? obj
4806      : out;
4807  __ LoadFromOffset(kLoadWord, target, obj, class_offset);
4808  __ MaybeUnpoisonHeapReference(target);
4809
4810  switch (instruction->GetTypeCheckKind()) {
4811    case TypeCheckKind::kExactCheck: {
4812      __ cmp(out, ShifterOperand(cls));
4813      // Classes must be equal for the instanceof to succeed.
4814      __ b(&zero, NE);
4815      __ LoadImmediate(out, 1);
4816      __ b(&done);
4817      break;
4818    }
4819    case TypeCheckKind::kAbstractClassCheck: {
4820      // If the class is abstract, we eagerly fetch the super class of the
4821      // object to avoid doing a comparison we know will fail.
4822      Label loop;
4823      __ Bind(&loop);
4824      __ LoadFromOffset(kLoadWord, out, out, super_offset);
4825      __ MaybeUnpoisonHeapReference(out);
4826      // If `out` is null, we use it for the result, and jump to `done`.
4827      __ CompareAndBranchIfZero(out, &done);
4828      __ cmp(out, ShifterOperand(cls));
4829      __ b(&loop, NE);
4830      __ LoadImmediate(out, 1);
4831      if (zero.IsLinked()) {
4832        __ b(&done);
4833      }
4834      break;
4835    }
4836    case TypeCheckKind::kClassHierarchyCheck: {
4837      // Walk over the class hierarchy to find a match.
4838      Label loop, success;
4839      __ Bind(&loop);
4840      __ cmp(out, ShifterOperand(cls));
4841      __ b(&success, EQ);
4842      __ LoadFromOffset(kLoadWord, out, out, super_offset);
4843      __ MaybeUnpoisonHeapReference(out);
4844      __ CompareAndBranchIfNonZero(out, &loop);
4845      // If `out` is null, we use it for the result, and jump to `done`.
4846      __ b(&done);
4847      __ Bind(&success);
4848      __ LoadImmediate(out, 1);
4849      if (zero.IsLinked()) {
4850        __ b(&done);
4851      }
4852      break;
4853    }
4854    case TypeCheckKind::kArrayObjectCheck: {
4855      // Do an exact check.
4856      Label exact_check;
4857      __ cmp(out, ShifterOperand(cls));
4858      __ b(&exact_check, EQ);
4859      // Otherwise, we need to check that the object's class is a non primitive array.
4860      __ LoadFromOffset(kLoadWord, out, out, component_offset);
4861      __ MaybeUnpoisonHeapReference(out);
4862      // If `out` is null, we use it for the result, and jump to `done`.
4863      __ CompareAndBranchIfZero(out, &done);
4864      __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
4865      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
4866      __ CompareAndBranchIfNonZero(out, &zero);
4867      __ Bind(&exact_check);
4868      __ LoadImmediate(out, 1);
4869      __ b(&done);
4870      break;
4871    }
4872    case TypeCheckKind::kArrayCheck: {
4873      __ cmp(out, ShifterOperand(cls));
4874      DCHECK(locations->OnlyCallsOnSlowPath());
4875      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4876          instruction, /* is_fatal */ false);
4877      codegen_->AddSlowPath(slow_path);
4878      __ b(slow_path->GetEntryLabel(), NE);
4879      __ LoadImmediate(out, 1);
4880      if (zero.IsLinked()) {
4881        __ b(&done);
4882      }
4883      break;
4884    }
4885    case TypeCheckKind::kUnresolvedCheck:
4886    case TypeCheckKind::kInterfaceCheck:
4887    default: {
4888      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
4889                              instruction,
4890                              instruction->GetDexPc(),
4891                              nullptr);
4892      if (zero.IsLinked()) {
4893        __ b(&done);
4894      }
4895      break;
4896    }
4897  }
4898
4899  if (zero.IsLinked()) {
4900    __ Bind(&zero);
4901    __ LoadImmediate(out, 0);
4902  }
4903
4904  if (done.IsLinked()) {
4905    __ Bind(&done);
4906  }
4907
4908  if (slow_path != nullptr) {
4909    __ Bind(slow_path->GetExitLabel());
4910  }
4911}
4912
4913void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4914  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
4915  bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
4916
4917  switch (instruction->GetTypeCheckKind()) {
4918    case TypeCheckKind::kExactCheck:
4919    case TypeCheckKind::kAbstractClassCheck:
4920    case TypeCheckKind::kClassHierarchyCheck:
4921    case TypeCheckKind::kArrayObjectCheck:
4922      call_kind = throws_into_catch
4923          ? LocationSummary::kCallOnSlowPath
4924          : LocationSummary::kNoCall;
4925      break;
4926    case TypeCheckKind::kUnresolvedCheck:
4927    case TypeCheckKind::kInterfaceCheck:
4928      call_kind = LocationSummary::kCall;
4929      break;
4930    case TypeCheckKind::kArrayCheck:
4931      call_kind = LocationSummary::kCallOnSlowPath;
4932      break;
4933  }
4934
4935  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4936      instruction, call_kind);
4937  if (call_kind != LocationSummary::kCall) {
4938    locations->SetInAt(0, Location::RequiresRegister());
4939    locations->SetInAt(1, Location::RequiresRegister());
4940    // Note that TypeCheckSlowPathARM uses this register too.
4941    locations->AddTemp(Location::RequiresRegister());
4942  } else {
4943    InvokeRuntimeCallingConvention calling_convention;
4944    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4945    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
4946  }
4947}
4948
4949void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4950  LocationSummary* locations = instruction->GetLocations();
4951  Register obj = locations->InAt(0).AsRegister<Register>();
4952  Register cls = locations->InAt(1).AsRegister<Register>();
4953  Register temp = locations->WillCall()
4954      ? Register(kNoRegister)
4955      : locations->GetTemp(0).AsRegister<Register>();
4956
4957  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4958  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
4959  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
4960  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
4961  SlowPathCode* slow_path = nullptr;
4962
4963  if (!locations->WillCall()) {
4964    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4965        instruction, !locations->CanCall());
4966    codegen_->AddSlowPath(slow_path);
4967  }
4968
4969  Label done;
4970  // Avoid null check if we know obj is not null.
4971  if (instruction->MustDoNullCheck()) {
4972    __ CompareAndBranchIfZero(obj, &done);
4973  }
4974
4975  if (locations->WillCall()) {
4976    __ LoadFromOffset(kLoadWord, obj, obj, class_offset);
4977    __ MaybeUnpoisonHeapReference(obj);
4978  } else {
4979    __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4980    __ MaybeUnpoisonHeapReference(temp);
4981  }
4982
4983  switch (instruction->GetTypeCheckKind()) {
4984    case TypeCheckKind::kExactCheck:
4985    case TypeCheckKind::kArrayCheck: {
4986      __ cmp(temp, ShifterOperand(cls));
4987      // Jump to slow path for throwing the exception or doing a
4988      // more involved array check.
4989      __ b(slow_path->GetEntryLabel(), NE);
4990      break;
4991    }
4992    case TypeCheckKind::kAbstractClassCheck: {
4993      // If the class is abstract, we eagerly fetch the super class of the
4994      // object to avoid doing a comparison we know will fail.
4995      Label loop;
4996      __ Bind(&loop);
4997      __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
4998      __ MaybeUnpoisonHeapReference(temp);
4999      // Jump to the slow path to throw the exception.
5000      __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel());
5001      __ cmp(temp, ShifterOperand(cls));
5002      __ b(&loop, NE);
5003      break;
5004    }
5005    case TypeCheckKind::kClassHierarchyCheck: {
5006      // Walk over the class hierarchy to find a match.
5007      Label loop;
5008      __ Bind(&loop);
5009      __ cmp(temp, ShifterOperand(cls));
5010      __ b(&done, EQ);
5011      __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
5012      __ MaybeUnpoisonHeapReference(temp);
5013      __ CompareAndBranchIfNonZero(temp, &loop);
5014      // Jump to the slow path to throw the exception.
5015      __ b(slow_path->GetEntryLabel());
5016      break;
5017    }
5018    case TypeCheckKind::kArrayObjectCheck: {
5019      // Do an exact check.
5020      __ cmp(temp, ShifterOperand(cls));
5021      __ b(&done, EQ);
5022      // Otherwise, we need to check that the object's class is a non primitive array.
5023      __ LoadFromOffset(kLoadWord, temp, temp, component_offset);
5024      __ MaybeUnpoisonHeapReference(temp);
5025      __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel());
5026      __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
5027      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
5028      __ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel());
5029      break;
5030    }
5031    case TypeCheckKind::kUnresolvedCheck:
5032    case TypeCheckKind::kInterfaceCheck:
5033    default:
5034      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
5035                              instruction,
5036                              instruction->GetDexPc(),
5037                              nullptr);
5038      break;
5039  }
5040  __ Bind(&done);
5041
5042  if (slow_path != nullptr) {
5043    __ Bind(slow_path->GetExitLabel());
5044  }
5045}
5046
5047void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
5048  LocationSummary* locations =
5049      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
5050  InvokeRuntimeCallingConvention calling_convention;
5051  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
5052}
5053
5054void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
5055  codegen_->InvokeRuntime(instruction->IsEnter()
5056        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
5057      instruction,
5058      instruction->GetDexPc(),
5059      nullptr);
5060}
5061
5062void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction, AND); }
5063void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction, ORR); }
5064void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction, EOR); }
5065
5066void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) {
5067  LocationSummary* locations =
5068      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
5069  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
5070         || instruction->GetResultType() == Primitive::kPrimLong);
5071  // Note: GVN reorders commutative operations to have the constant on the right hand side.
5072  locations->SetInAt(0, Location::RequiresRegister());
5073  locations->SetInAt(1, ArmEncodableConstantOrRegister(instruction->InputAt(1), opcode));
5074  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
5075}
5076
5077void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
5078  HandleBitwiseOperation(instruction);
5079}
5080
5081void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
5082  HandleBitwiseOperation(instruction);
5083}
5084
5085void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
5086  HandleBitwiseOperation(instruction);
5087}
5088
5089void InstructionCodeGeneratorARM::GenerateAndConst(Register out, Register first, uint32_t value) {
5090  // Optimize special cases for individual halfs of `and-long` (`and` is simplified earlier).
5091  if (value == 0xffffffffu) {
5092    if (out != first) {
5093      __ mov(out, ShifterOperand(first));
5094    }
5095    return;
5096  }
5097  if (value == 0u) {
5098    __ mov(out, ShifterOperand(0));
5099    return;
5100  }
5101  ShifterOperand so;
5102  if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, AND, value, &so)) {
5103    __ and_(out, first, so);
5104  } else {
5105    DCHECK(__ ShifterOperandCanHold(kNoRegister, kNoRegister, BIC, ~value, &so));
5106    __ bic(out, first, ShifterOperand(~value));
5107  }
5108}
5109
5110void InstructionCodeGeneratorARM::GenerateOrrConst(Register out, Register first, uint32_t value) {
5111  // Optimize special cases for individual halfs of `or-long` (`or` is simplified earlier).
5112  if (value == 0u) {
5113    if (out != first) {
5114      __ mov(out, ShifterOperand(first));
5115    }
5116    return;
5117  }
5118  if (value == 0xffffffffu) {
5119    __ mvn(out, ShifterOperand(0));
5120    return;
5121  }
5122  ShifterOperand so;
5123  if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, ORR, value, &so)) {
5124    __ orr(out, first, so);
5125  } else {
5126    DCHECK(__ ShifterOperandCanHold(kNoRegister, kNoRegister, ORN, ~value, &so));
5127    __ orn(out, first, ShifterOperand(~value));
5128  }
5129}
5130
5131void InstructionCodeGeneratorARM::GenerateEorConst(Register out, Register first, uint32_t value) {
5132  // Optimize special case for individual halfs of `xor-long` (`xor` is simplified earlier).
5133  if (value == 0u) {
5134    if (out != first) {
5135      __ mov(out, ShifterOperand(first));
5136    }
5137    return;
5138  }
5139  __ eor(out, first, ShifterOperand(value));
5140}
5141
5142void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
5143  LocationSummary* locations = instruction->GetLocations();
5144  Location first = locations->InAt(0);
5145  Location second = locations->InAt(1);
5146  Location out = locations->Out();
5147
5148  if (second.IsConstant()) {
5149    uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
5150    uint32_t value_low = Low32Bits(value);
5151    if (instruction->GetResultType() == Primitive::kPrimInt) {
5152      Register first_reg = first.AsRegister<Register>();
5153      Register out_reg = out.AsRegister<Register>();
5154      if (instruction->IsAnd()) {
5155        GenerateAndConst(out_reg, first_reg, value_low);
5156      } else if (instruction->IsOr()) {
5157        GenerateOrrConst(out_reg, first_reg, value_low);
5158      } else {
5159        DCHECK(instruction->IsXor());
5160        GenerateEorConst(out_reg, first_reg, value_low);
5161      }
5162    } else {
5163      DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
5164      uint32_t value_high = High32Bits(value);
5165      Register first_low = first.AsRegisterPairLow<Register>();
5166      Register first_high = first.AsRegisterPairHigh<Register>();
5167      Register out_low = out.AsRegisterPairLow<Register>();
5168      Register out_high = out.AsRegisterPairHigh<Register>();
5169      if (instruction->IsAnd()) {
5170        GenerateAndConst(out_low, first_low, value_low);
5171        GenerateAndConst(out_high, first_high, value_high);
5172      } else if (instruction->IsOr()) {
5173        GenerateOrrConst(out_low, first_low, value_low);
5174        GenerateOrrConst(out_high, first_high, value_high);
5175      } else {
5176        DCHECK(instruction->IsXor());
5177        GenerateEorConst(out_low, first_low, value_low);
5178        GenerateEorConst(out_high, first_high, value_high);
5179      }
5180    }
5181    return;
5182  }
5183
5184  if (instruction->GetResultType() == Primitive::kPrimInt) {
5185    Register first_reg = first.AsRegister<Register>();
5186    ShifterOperand second_reg(second.AsRegister<Register>());
5187    Register out_reg = out.AsRegister<Register>();
5188    if (instruction->IsAnd()) {
5189      __ and_(out_reg, first_reg, second_reg);
5190    } else if (instruction->IsOr()) {
5191      __ orr(out_reg, first_reg, second_reg);
5192    } else {
5193      DCHECK(instruction->IsXor());
5194      __ eor(out_reg, first_reg, second_reg);
5195    }
5196  } else {
5197    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
5198    Register first_low = first.AsRegisterPairLow<Register>();
5199    Register first_high = first.AsRegisterPairHigh<Register>();
5200    ShifterOperand second_low(second.AsRegisterPairLow<Register>());
5201    ShifterOperand second_high(second.AsRegisterPairHigh<Register>());
5202    Register out_low = out.AsRegisterPairLow<Register>();
5203    Register out_high = out.AsRegisterPairHigh<Register>();
5204    if (instruction->IsAnd()) {
5205      __ and_(out_low, first_low, second_low);
5206      __ and_(out_high, first_high, second_high);
5207    } else if (instruction->IsOr()) {
5208      __ orr(out_low, first_low, second_low);
5209      __ orr(out_high, first_high, second_high);
5210    } else {
5211      DCHECK(instruction->IsXor());
5212      __ eor(out_low, first_low, second_low);
5213      __ eor(out_high, first_high, second_high);
5214    }
5215  }
5216}
5217
5218HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOrDirectDispatch(
5219      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
5220      MethodReference target_method) {
5221  if (desired_dispatch_info.method_load_kind ==
5222      HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
5223    // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
5224    return HInvokeStaticOrDirect::DispatchInfo {
5225      HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
5226      HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
5227      0u,
5228      0u
5229    };
5230  }
5231  if (desired_dispatch_info.code_ptr_location ==
5232      HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative) {
5233    const DexFile& outer_dex_file = GetGraph()->GetDexFile();
5234    if (&outer_dex_file != target_method.dex_file) {
5235      // Calls across dex files are more likely to exceed the available BL range,
5236      // so use absolute patch with fixup if available and kCallArtMethod otherwise.
5237      HInvokeStaticOrDirect::CodePtrLocation code_ptr_location =
5238          (desired_dispatch_info.method_load_kind ==
5239           HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup)
5240          ? HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup
5241          : HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
5242      return HInvokeStaticOrDirect::DispatchInfo {
5243        desired_dispatch_info.method_load_kind,
5244        code_ptr_location,
5245        desired_dispatch_info.method_load_data,
5246        0u
5247      };
5248    }
5249  }
5250  return desired_dispatch_info;
5251}
5252
5253void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
5254  // For better instruction scheduling we load the direct code pointer before the method pointer.
5255  switch (invoke->GetCodePtrLocation()) {
5256    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
5257      // LR = code address from literal pool with link-time patch.
5258      __ LoadLiteral(LR, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
5259      break;
5260    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
5261      // LR = invoke->GetDirectCodePtr();
5262      __ LoadImmediate(LR, invoke->GetDirectCodePtr());
5263      break;
5264    default:
5265      break;
5266  }
5267
5268  Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
5269  switch (invoke->GetMethodLoadKind()) {
5270    case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
5271      // temp = thread->string_init_entrypoint
5272      __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, invoke->GetStringInitOffset());
5273      break;
5274    case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
5275      callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
5276      break;
5277    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
5278      __ LoadImmediate(temp.AsRegister<Register>(), invoke->GetMethodAddress());
5279      break;
5280    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
5281      __ LoadLiteral(temp.AsRegister<Register>(),
5282                     DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
5283      break;
5284    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
5285      // TODO: Implement this type.
5286      // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
5287      LOG(FATAL) << "Unsupported";
5288      UNREACHABLE();
5289    case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
5290      Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
5291      Register method_reg;
5292      Register reg = temp.AsRegister<Register>();
5293      if (current_method.IsRegister()) {
5294        method_reg = current_method.AsRegister<Register>();
5295      } else {
5296        DCHECK(invoke->GetLocations()->Intrinsified());
5297        DCHECK(!current_method.IsValid());
5298        method_reg = reg;
5299        __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
5300      }
5301      // temp = current_method->dex_cache_resolved_methods_;
5302      __ LoadFromOffset(
5303          kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset(
5304              kArmPointerSize).Int32Value());
5305      // temp = temp[index_in_cache]
5306      uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
5307      __ LoadFromOffset(kLoadWord, reg, reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
5308      break;
5309    }
5310  }
5311
5312  switch (invoke->GetCodePtrLocation()) {
5313    case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
5314      __ bl(GetFrameEntryLabel());
5315      break;
5316    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
5317      relative_call_patches_.emplace_back(invoke->GetTargetMethod());
5318      __ BindTrackedLabel(&relative_call_patches_.back().label);
5319      // Arbitrarily branch to the BL itself, override at link time.
5320      __ bl(&relative_call_patches_.back().label);
5321      break;
5322    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
5323    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
5324      // LR prepared above for better instruction scheduling.
5325      // LR()
5326      __ blx(LR);
5327      break;
5328    case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
5329      // LR = callee_method->entry_point_from_quick_compiled_code_
5330      __ LoadFromOffset(
5331          kLoadWord, LR, callee_method.AsRegister<Register>(),
5332          ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value());
5333      // LR()
5334      __ blx(LR);
5335      break;
5336  }
5337
5338  DCHECK(!IsLeafMethod());
5339}
5340
5341void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
5342  Register temp = temp_location.AsRegister<Register>();
5343  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
5344      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
5345  LocationSummary* locations = invoke->GetLocations();
5346  Location receiver = locations->InAt(0);
5347  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
5348  // temp = object->GetClass();
5349  DCHECK(receiver.IsRegister());
5350  __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
5351  MaybeRecordImplicitNullCheck(invoke);
5352  __ MaybeUnpoisonHeapReference(temp);
5353  // temp = temp->GetMethodAt(method_offset);
5354  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
5355      kArmWordSize).Int32Value();
5356  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
5357  // LR = temp->GetEntryPoint();
5358  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
5359  // LR();
5360  __ blx(LR);
5361}
5362
5363void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
5364  DCHECK(linker_patches->empty());
5365  size_t size = method_patches_.size() + call_patches_.size() + relative_call_patches_.size();
5366  linker_patches->reserve(size);
5367  for (const auto& entry : method_patches_) {
5368    const MethodReference& target_method = entry.first;
5369    Literal* literal = entry.second;
5370    DCHECK(literal->GetLabel()->IsBound());
5371    uint32_t literal_offset = literal->GetLabel()->Position();
5372    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
5373                                                       target_method.dex_file,
5374                                                       target_method.dex_method_index));
5375  }
5376  for (const auto& entry : call_patches_) {
5377    const MethodReference& target_method = entry.first;
5378    Literal* literal = entry.second;
5379    DCHECK(literal->GetLabel()->IsBound());
5380    uint32_t literal_offset = literal->GetLabel()->Position();
5381    linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
5382                                                     target_method.dex_file,
5383                                                     target_method.dex_method_index));
5384  }
5385  for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
5386    uint32_t literal_offset = info.label.Position();
5387    linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
5388                                                             info.target_method.dex_file,
5389                                                             info.target_method.dex_method_index));
5390  }
5391}
5392
5393Literal* CodeGeneratorARM::DeduplicateMethodLiteral(MethodReference target_method,
5394                                                    MethodToLiteralMap* map) {
5395  // Look up the literal for target_method.
5396  auto lb = map->lower_bound(target_method);
5397  if (lb != map->end() && !map->key_comp()(target_method, lb->first)) {
5398    return lb->second;
5399  }
5400  // We don't have a literal for this method yet, insert a new one.
5401  Literal* literal = __ NewLiteral<uint32_t>(0u);
5402  map->PutBefore(lb, target_method, literal);
5403  return literal;
5404}
5405
5406Literal* CodeGeneratorARM::DeduplicateMethodAddressLiteral(MethodReference target_method) {
5407  return DeduplicateMethodLiteral(target_method, &method_patches_);
5408}
5409
5410Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_method) {
5411  return DeduplicateMethodLiteral(target_method, &call_patches_);
5412}
5413
5414void LocationsBuilderARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
5415  // Nothing to do, this should be removed during prepare for register allocator.
5416  LOG(FATAL) << "Unreachable";
5417}
5418
5419void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
5420  // Nothing to do, this should be removed during prepare for register allocator.
5421  LOG(FATAL) << "Unreachable";
5422}
5423
5424void LocationsBuilderARM::VisitFakeString(HFakeString* instruction) {
5425  DCHECK(codegen_->IsBaseline());
5426  LocationSummary* locations =
5427      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
5428  locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
5429}
5430
5431void InstructionCodeGeneratorARM::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
5432  DCHECK(codegen_->IsBaseline());
5433  // Will be generated at use site.
5434}
5435
5436// Simple implementation of packed switch - generate cascaded compare/jumps.
5437void LocationsBuilderARM::VisitPackedSwitch(HPackedSwitch* switch_instr) {
5438  LocationSummary* locations =
5439      new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
5440  locations->SetInAt(0, Location::RequiresRegister());
5441  if (switch_instr->GetNumEntries() >= kPackedSwitchJumpTableThreshold &&
5442      codegen_->GetAssembler()->IsThumb()) {
5443    locations->AddTemp(Location::RequiresRegister());  // We need a temp for the table base.
5444    if (switch_instr->GetStartValue() != 0) {
5445      locations->AddTemp(Location::RequiresRegister());  // We need a temp for the bias.
5446    }
5447  }
5448}
5449
5450void InstructionCodeGeneratorARM::VisitPackedSwitch(HPackedSwitch* switch_instr) {
5451  int32_t lower_bound = switch_instr->GetStartValue();
5452  uint32_t num_entries = switch_instr->GetNumEntries();
5453  LocationSummary* locations = switch_instr->GetLocations();
5454  Register value_reg = locations->InAt(0).AsRegister<Register>();
5455  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
5456
5457  if (num_entries < kPackedSwitchJumpTableThreshold || !codegen_->GetAssembler()->IsThumb()) {
5458    // Create a series of compare/jumps.
5459    const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
5460    for (uint32_t i = 0; i < num_entries; i++) {
5461      GenerateCompareWithImmediate(value_reg, lower_bound + i);
5462      __ b(codegen_->GetLabelOf(successors[i]), EQ);
5463    }
5464
5465    // And the default for any other value.
5466    if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
5467      __ b(codegen_->GetLabelOf(default_block));
5468    }
5469  } else {
5470    // Create a table lookup.
5471    Register temp_reg = locations->GetTemp(0).AsRegister<Register>();
5472
5473    // Materialize a pointer to the switch table
5474    std::vector<Label*> labels(num_entries);
5475    const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
5476    for (uint32_t i = 0; i < num_entries; i++) {
5477      labels[i] = codegen_->GetLabelOf(successors[i]);
5478    }
5479    JumpTable* table = __ CreateJumpTable(std::move(labels), temp_reg);
5480
5481    // Remove the bias.
5482    Register key_reg;
5483    if (lower_bound != 0) {
5484      key_reg = locations->GetTemp(1).AsRegister<Register>();
5485      __ AddConstant(key_reg, value_reg, -lower_bound);
5486    } else {
5487      key_reg = value_reg;
5488    }
5489
5490    // Check whether the value is in the table, jump to default block if not.
5491    __ CmpConstant(key_reg, num_entries - 1);
5492    __ b(codegen_->GetLabelOf(default_block), Condition::HI);
5493
5494    // Load the displacement from the table.
5495    __ ldr(temp_reg, Address(temp_reg, key_reg, Shift::LSL, 2));
5496
5497    // Dispatch is a direct add to the PC (for Thumb2).
5498    __ EmitJumpTableDispatch(table, temp_reg);
5499  }
5500}
5501
5502void CodeGeneratorARM::MoveFromReturnRegister(Location trg, Primitive::Type type) {
5503  if (!trg.IsValid()) {
5504    DCHECK(type == Primitive::kPrimVoid);
5505    return;
5506  }
5507
5508  DCHECK_NE(type, Primitive::kPrimVoid);
5509
5510  Location return_loc = InvokeDexCallingConventionVisitorARM().GetReturnLocation(type);
5511  if (return_loc.Equals(trg)) {
5512    return;
5513  }
5514
5515  // TODO: Consider pairs in the parallel move resolver, then this could be nicely merged
5516  //       with the last branch.
5517  if (type == Primitive::kPrimLong) {
5518    HParallelMove parallel_move(GetGraph()->GetArena());
5519    parallel_move.AddMove(return_loc.ToLow(), trg.ToLow(), Primitive::kPrimInt, nullptr);
5520    parallel_move.AddMove(return_loc.ToHigh(), trg.ToHigh(), Primitive::kPrimInt, nullptr);
5521    GetMoveResolver()->EmitNativeCode(&parallel_move);
5522  } else if (type == Primitive::kPrimDouble) {
5523    HParallelMove parallel_move(GetGraph()->GetArena());
5524    parallel_move.AddMove(return_loc.ToLow(), trg.ToLow(), Primitive::kPrimFloat, nullptr);
5525    parallel_move.AddMove(return_loc.ToHigh(), trg.ToHigh(), Primitive::kPrimFloat, nullptr);
5526    GetMoveResolver()->EmitNativeCode(&parallel_move);
5527  } else {
5528    // Let the parallel move resolver take care of all of this.
5529    HParallelMove parallel_move(GetGraph()->GetArena());
5530    parallel_move.AddMove(return_loc, trg, type, nullptr);
5531    GetMoveResolver()->EmitNativeCode(&parallel_move);
5532  }
5533}
5534
5535#undef __
5536#undef QUICK_ENTRY_POINT
5537
5538}  // namespace arm
5539}  // namespace art
5540