code_generator_arm.cc revision 5233f93ee336b3581ccdb993ff6342c52fec34b0
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_arm.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
20#include "art_method.h"
21#include "code_generator_utils.h"
22#include "compiled_method.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "gc/accounting/card_table.h"
25#include "intrinsics.h"
26#include "intrinsics_arm.h"
27#include "mirror/array-inl.h"
28#include "mirror/class-inl.h"
29#include "thread.h"
30#include "utils/arm/assembler_arm.h"
31#include "utils/arm/managed_register_arm.h"
32#include "utils/assembler.h"
33#include "utils/stack_checks.h"
34
35namespace art {
36
37namespace arm {
38
39static bool ExpectedPairLayout(Location location) {
40  // We expected this for both core and fpu register pairs.
41  return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
42}
43
44static constexpr int kCurrentMethodStackOffset = 0;
45static constexpr Register kMethodRegisterArgument = R0;
46
47// We unconditionally allocate R5 to ensure we can do long operations
48// with baseline.
49static constexpr Register kCoreSavedRegisterForBaseline = R5;
50static constexpr Register kCoreCalleeSaves[] =
51    { R5, R6, R7, R8, R10, R11, LR };
52static constexpr SRegister kFpuCalleeSaves[] =
53    { S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 };
54
55// D31 cannot be split into two S registers, and the register allocator only works on
56// S registers. Therefore there is no need to block it.
57static constexpr DRegister DTMP = D31;
58
59#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
60#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
61
62class NullCheckSlowPathARM : public SlowPathCode {
63 public:
64  explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
65
66  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
67    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
68    __ Bind(GetEntryLabel());
69    if (instruction_->CanThrowIntoCatchBlock()) {
70      // Live registers will be restored in the catch block if caught.
71      SaveLiveRegisters(codegen, instruction_->GetLocations());
72    }
73    arm_codegen->InvokeRuntime(
74        QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
75  }
76
77  bool IsFatal() const OVERRIDE { return true; }
78
79  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; }
80
81 private:
82  HNullCheck* const instruction_;
83  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
84};
85
86class DivZeroCheckSlowPathARM : public SlowPathCode {
87 public:
88  explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
89
90  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
91    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
92    __ Bind(GetEntryLabel());
93    if (instruction_->CanThrowIntoCatchBlock()) {
94      // Live registers will be restored in the catch block if caught.
95      SaveLiveRegisters(codegen, instruction_->GetLocations());
96    }
97    arm_codegen->InvokeRuntime(
98        QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
99  }
100
101  bool IsFatal() const OVERRIDE { return true; }
102
103  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; }
104
105 private:
106  HDivZeroCheck* const instruction_;
107  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
108};
109
110class SuspendCheckSlowPathARM : public SlowPathCode {
111 public:
112  SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
113      : instruction_(instruction), successor_(successor) {}
114
115  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
116    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
117    __ Bind(GetEntryLabel());
118    SaveLiveRegisters(codegen, instruction_->GetLocations());
119    arm_codegen->InvokeRuntime(
120        QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
121    RestoreLiveRegisters(codegen, instruction_->GetLocations());
122    if (successor_ == nullptr) {
123      __ b(GetReturnLabel());
124    } else {
125      __ b(arm_codegen->GetLabelOf(successor_));
126    }
127  }
128
129  Label* GetReturnLabel() {
130    DCHECK(successor_ == nullptr);
131    return &return_label_;
132  }
133
134  HBasicBlock* GetSuccessor() const {
135    return successor_;
136  }
137
138  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; }
139
140 private:
141  HSuspendCheck* const instruction_;
142  // If not null, the block to branch to after the suspend check.
143  HBasicBlock* const successor_;
144
145  // If `successor_` is null, the label to branch to after the suspend check.
146  Label return_label_;
147
148  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
149};
150
151class BoundsCheckSlowPathARM : public SlowPathCode {
152 public:
153  explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction)
154      : instruction_(instruction) {}
155
156  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
157    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
158    LocationSummary* locations = instruction_->GetLocations();
159
160    __ Bind(GetEntryLabel());
161    if (instruction_->CanThrowIntoCatchBlock()) {
162      // Live registers will be restored in the catch block if caught.
163      SaveLiveRegisters(codegen, instruction_->GetLocations());
164    }
165    // We're moving two locations to locations that could overlap, so we need a parallel
166    // move resolver.
167    InvokeRuntimeCallingConvention calling_convention;
168    codegen->EmitParallelMoves(
169        locations->InAt(0),
170        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
171        Primitive::kPrimInt,
172        locations->InAt(1),
173        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
174        Primitive::kPrimInt);
175    arm_codegen->InvokeRuntime(
176        QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
177  }
178
179  bool IsFatal() const OVERRIDE { return true; }
180
181  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; }
182
183 private:
184  HBoundsCheck* const instruction_;
185
186  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
187};
188
189class LoadClassSlowPathARM : public SlowPathCode {
190 public:
191  LoadClassSlowPathARM(HLoadClass* cls,
192                       HInstruction* at,
193                       uint32_t dex_pc,
194                       bool do_clinit)
195      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
196    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
197  }
198
199  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
200    LocationSummary* locations = at_->GetLocations();
201
202    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
203    __ Bind(GetEntryLabel());
204    SaveLiveRegisters(codegen, locations);
205
206    InvokeRuntimeCallingConvention calling_convention;
207    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
208    int32_t entry_point_offset = do_clinit_
209        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
210        : QUICK_ENTRY_POINT(pInitializeType);
211    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
212
213    // Move the class to the desired location.
214    Location out = locations->Out();
215    if (out.IsValid()) {
216      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
217      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
218    }
219    RestoreLiveRegisters(codegen, locations);
220    __ b(GetExitLabel());
221  }
222
223  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM"; }
224
225 private:
226  // The class this slow path will load.
227  HLoadClass* const cls_;
228
229  // The instruction where this slow path is happening.
230  // (Might be the load class or an initialization check).
231  HInstruction* const at_;
232
233  // The dex PC of `at_`.
234  const uint32_t dex_pc_;
235
236  // Whether to initialize the class.
237  const bool do_clinit_;
238
239  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
240};
241
242class LoadStringSlowPathARM : public SlowPathCode {
243 public:
244  explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
245
246  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
247    LocationSummary* locations = instruction_->GetLocations();
248    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
249
250    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
251    __ Bind(GetEntryLabel());
252    SaveLiveRegisters(codegen, locations);
253
254    InvokeRuntimeCallingConvention calling_convention;
255    __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
256    arm_codegen->InvokeRuntime(
257        QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
258    arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
259
260    RestoreLiveRegisters(codegen, locations);
261    __ b(GetExitLabel());
262  }
263
264  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; }
265
266 private:
267  HLoadString* const instruction_;
268
269  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
270};
271
272class TypeCheckSlowPathARM : public SlowPathCode {
273 public:
274  TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal)
275      : instruction_(instruction), is_fatal_(is_fatal) {}
276
277  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
278    LocationSummary* locations = instruction_->GetLocations();
279    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
280                                                        : locations->Out();
281    DCHECK(instruction_->IsCheckCast()
282           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
283
284    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
285    __ Bind(GetEntryLabel());
286
287    if (instruction_->IsCheckCast()) {
288      // The codegen for the instruction overwrites `temp`, so put it back in place.
289      Register obj = locations->InAt(0).AsRegister<Register>();
290      Register temp = locations->GetTemp(0).AsRegister<Register>();
291      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
292      __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
293      __ MaybeUnpoisonHeapReference(temp);
294    }
295
296    if (!is_fatal_) {
297      SaveLiveRegisters(codegen, locations);
298    }
299
300    // We're moving two locations to locations that could overlap, so we need a parallel
301    // move resolver.
302    InvokeRuntimeCallingConvention calling_convention;
303    codegen->EmitParallelMoves(
304        locations->InAt(1),
305        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
306        Primitive::kPrimNot,
307        object_class,
308        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
309        Primitive::kPrimNot);
310
311    if (instruction_->IsInstanceOf()) {
312      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
313                                 instruction_,
314                                 instruction_->GetDexPc(),
315                                 this);
316      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
317    } else {
318      DCHECK(instruction_->IsCheckCast());
319      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
320                                 instruction_,
321                                 instruction_->GetDexPc(),
322                                 this);
323    }
324
325    if (!is_fatal_) {
326      RestoreLiveRegisters(codegen, locations);
327      __ b(GetExitLabel());
328    }
329  }
330
331  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM"; }
332
333  bool IsFatal() const OVERRIDE { return is_fatal_; }
334
335 private:
336  HInstruction* const instruction_;
337  const bool is_fatal_;
338
339  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
340};
341
342class DeoptimizationSlowPathARM : public SlowPathCode {
343 public:
344  explicit DeoptimizationSlowPathARM(HInstruction* instruction)
345    : instruction_(instruction) {}
346
347  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
348    __ Bind(GetEntryLabel());
349    SaveLiveRegisters(codegen, instruction_->GetLocations());
350    DCHECK(instruction_->IsDeoptimize());
351    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
352    uint32_t dex_pc = deoptimize->GetDexPc();
353    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
354    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
355  }
356
357  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
358
359 private:
360  HInstruction* const instruction_;
361  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
362};
363
364#undef __
365#define __ down_cast<ArmAssembler*>(GetAssembler())->
366
367inline Condition ARMSignedOrFPCondition(IfCondition cond) {
368  switch (cond) {
369    case kCondEQ: return EQ;
370    case kCondNE: return NE;
371    case kCondLT: return LT;
372    case kCondLE: return LE;
373    case kCondGT: return GT;
374    case kCondGE: return GE;
375  }
376  LOG(FATAL) << "Unreachable";
377  UNREACHABLE();
378}
379
380inline Condition ARMUnsignedCondition(IfCondition cond) {
381  switch (cond) {
382    case kCondEQ: return EQ;
383    case kCondNE: return NE;
384    case kCondLT: return LO;
385    case kCondLE: return LS;
386    case kCondGT: return HI;
387    case kCondGE: return HS;
388  }
389  LOG(FATAL) << "Unreachable";
390  UNREACHABLE();
391}
392
393void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
394  stream << Register(reg);
395}
396
397void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
398  stream << SRegister(reg);
399}
400
401size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
402  __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
403  return kArmWordSize;
404}
405
406size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
407  __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
408  return kArmWordSize;
409}
410
411size_t CodeGeneratorARM::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
412  __ StoreSToOffset(static_cast<SRegister>(reg_id), SP, stack_index);
413  return kArmWordSize;
414}
415
416size_t CodeGeneratorARM::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
417  __ LoadSFromOffset(static_cast<SRegister>(reg_id), SP, stack_index);
418  return kArmWordSize;
419}
420
421CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
422                                   const ArmInstructionSetFeatures& isa_features,
423                                   const CompilerOptions& compiler_options,
424                                   OptimizingCompilerStats* stats)
425    : CodeGenerator(graph,
426                    kNumberOfCoreRegisters,
427                    kNumberOfSRegisters,
428                    kNumberOfRegisterPairs,
429                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
430                                        arraysize(kCoreCalleeSaves)),
431                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
432                                        arraysize(kFpuCalleeSaves)),
433                    compiler_options,
434                    stats),
435      block_labels_(nullptr),
436      location_builder_(graph, this),
437      instruction_visitor_(graph, this),
438      move_resolver_(graph->GetArena(), this),
439      assembler_(),
440      isa_features_(isa_features),
441      method_patches_(MethodReferenceComparator(),
442                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
443      call_patches_(MethodReferenceComparator(),
444                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
445      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
446  // Always save the LR register to mimic Quick.
447  AddAllocatedRegister(Location::RegisterLocation(LR));
448}
449
450void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
451  // Ensure that we fix up branches and literal loads and emit the literal pool.
452  __ FinalizeCode();
453
454  // Adjust native pc offsets in stack maps.
455  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
456    uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
457    uint32_t new_position = __ GetAdjustedPosition(old_position);
458    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
459  }
460  // Adjust native pc offsets of block labels.
461  for (HBasicBlock* block : *block_order_) {
462    // Get the label directly from block_labels_ rather than through GetLabelOf() to avoid
463    // FirstNonEmptyBlock() which could lead to adjusting a label more than once.
464    DCHECK_LT(block->GetBlockId(), GetGraph()->GetBlocks().size());
465    Label* block_label = &block_labels_[block->GetBlockId()];
466    DCHECK_EQ(block_label->IsBound(), !block->IsSingleJump());
467    if (block_label->IsBound()) {
468      __ AdjustLabelPosition(block_label);
469    }
470  }
471  // Adjust pc offsets for the disassembly information.
472  if (disasm_info_ != nullptr) {
473    GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
474    frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
475    frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
476    for (auto& it : *disasm_info_->GetInstructionIntervals()) {
477      it.second.start = __ GetAdjustedPosition(it.second.start);
478      it.second.end = __ GetAdjustedPosition(it.second.end);
479    }
480    for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
481      it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
482      it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
483    }
484  }
485  // Adjust pc offsets for relative call patches.
486  for (MethodPatchInfo<Label>& info : relative_call_patches_) {
487    __ AdjustLabelPosition(&info.label);
488  }
489
490  CodeGenerator::Finalize(allocator);
491}
492
493Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
494  switch (type) {
495    case Primitive::kPrimLong: {
496      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
497      ArmManagedRegister pair =
498          ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
499      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
500      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
501
502      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
503      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
504      UpdateBlockedPairRegisters();
505      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
506    }
507
508    case Primitive::kPrimByte:
509    case Primitive::kPrimBoolean:
510    case Primitive::kPrimChar:
511    case Primitive::kPrimShort:
512    case Primitive::kPrimInt:
513    case Primitive::kPrimNot: {
514      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
515      // Block all register pairs that contain `reg`.
516      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
517        ArmManagedRegister current =
518            ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
519        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
520          blocked_register_pairs_[i] = true;
521        }
522      }
523      return Location::RegisterLocation(reg);
524    }
525
526    case Primitive::kPrimFloat: {
527      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
528      return Location::FpuRegisterLocation(reg);
529    }
530
531    case Primitive::kPrimDouble: {
532      int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
533      DCHECK_EQ(reg % 2, 0);
534      return Location::FpuRegisterPairLocation(reg, reg + 1);
535    }
536
537    case Primitive::kPrimVoid:
538      LOG(FATAL) << "Unreachable type " << type;
539  }
540
541  return Location();
542}
543
544void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
545  // Don't allocate the dalvik style register pair passing.
546  blocked_register_pairs_[R1_R2] = true;
547
548  // Stack register, LR and PC are always reserved.
549  blocked_core_registers_[SP] = true;
550  blocked_core_registers_[LR] = true;
551  blocked_core_registers_[PC] = true;
552
553  // Reserve thread register.
554  blocked_core_registers_[TR] = true;
555
556  // Reserve temp register.
557  blocked_core_registers_[IP] = true;
558
559  if (is_baseline) {
560    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
561      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
562    }
563
564    blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
565
566    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
567      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
568    }
569  }
570
571  UpdateBlockedPairRegisters();
572}
573
574void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
575  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
576    ArmManagedRegister current =
577        ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
578    if (blocked_core_registers_[current.AsRegisterPairLow()]
579        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
580      blocked_register_pairs_[i] = true;
581    }
582  }
583}
584
585InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
586      : HGraphVisitor(graph),
587        assembler_(codegen->GetAssembler()),
588        codegen_(codegen) {}
589
590void CodeGeneratorARM::ComputeSpillMask() {
591  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
592  // Save one extra register for baseline. Note that on thumb2, there is no easy
593  // instruction to restore just the PC, so this actually helps both baseline
594  // and non-baseline to save and restore at least two registers at entry and exit.
595  core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
596  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
597  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
598  // We use vpush and vpop for saving and restoring floating point registers, which take
599  // a SRegister and the number of registers to save/restore after that SRegister. We
600  // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
601  // but in the range.
602  if (fpu_spill_mask_ != 0) {
603    uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
604    uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
605    for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
606      fpu_spill_mask_ |= (1 << i);
607    }
608  }
609}
610
611static dwarf::Reg DWARFReg(Register reg) {
612  return dwarf::Reg::ArmCore(static_cast<int>(reg));
613}
614
615static dwarf::Reg DWARFReg(SRegister reg) {
616  return dwarf::Reg::ArmFp(static_cast<int>(reg));
617}
618
619void CodeGeneratorARM::GenerateFrameEntry() {
620  bool skip_overflow_check =
621      IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
622  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
623  __ Bind(&frame_entry_label_);
624
625  if (HasEmptyFrame()) {
626    return;
627  }
628
629  if (!skip_overflow_check) {
630    __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
631    __ LoadFromOffset(kLoadWord, IP, IP, 0);
632    RecordPcInfo(nullptr, 0);
633  }
634
635  __ PushList(core_spill_mask_);
636  __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
637  __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, core_spill_mask_, kArmWordSize);
638  if (fpu_spill_mask_ != 0) {
639    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
640    __ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
641    __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
642    __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
643  }
644  int adjust = GetFrameSize() - FrameEntrySpillSize();
645  __ AddConstant(SP, -adjust);
646  __ cfi().AdjustCFAOffset(adjust);
647  __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
648}
649
650void CodeGeneratorARM::GenerateFrameExit() {
651  if (HasEmptyFrame()) {
652    __ bx(LR);
653    return;
654  }
655  __ cfi().RememberState();
656  int adjust = GetFrameSize() - FrameEntrySpillSize();
657  __ AddConstant(SP, adjust);
658  __ cfi().AdjustCFAOffset(-adjust);
659  if (fpu_spill_mask_ != 0) {
660    SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
661    __ vpops(start_register, POPCOUNT(fpu_spill_mask_));
662    __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
663    __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
664  }
665  // Pop LR into PC to return.
666  DCHECK_NE(core_spill_mask_ & (1 << LR), 0U);
667  uint32_t pop_mask = (core_spill_mask_ & (~(1 << LR))) | 1 << PC;
668  __ PopList(pop_mask);
669  __ cfi().RestoreState();
670  __ cfi().DefCFAOffset(GetFrameSize());
671}
672
673void CodeGeneratorARM::Bind(HBasicBlock* block) {
674  __ Bind(GetLabelOf(block));
675}
676
677Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
678  switch (load->GetType()) {
679    case Primitive::kPrimLong:
680    case Primitive::kPrimDouble:
681      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
682
683    case Primitive::kPrimInt:
684    case Primitive::kPrimNot:
685    case Primitive::kPrimFloat:
686      return Location::StackSlot(GetStackSlot(load->GetLocal()));
687
688    case Primitive::kPrimBoolean:
689    case Primitive::kPrimByte:
690    case Primitive::kPrimChar:
691    case Primitive::kPrimShort:
692    case Primitive::kPrimVoid:
693      LOG(FATAL) << "Unexpected type " << load->GetType();
694      UNREACHABLE();
695  }
696
697  LOG(FATAL) << "Unreachable";
698  UNREACHABLE();
699}
700
701Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
702  switch (type) {
703    case Primitive::kPrimBoolean:
704    case Primitive::kPrimByte:
705    case Primitive::kPrimChar:
706    case Primitive::kPrimShort:
707    case Primitive::kPrimInt:
708    case Primitive::kPrimNot: {
709      uint32_t index = gp_index_++;
710      uint32_t stack_index = stack_index_++;
711      if (index < calling_convention.GetNumberOfRegisters()) {
712        return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
713      } else {
714        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
715      }
716    }
717
718    case Primitive::kPrimLong: {
719      uint32_t index = gp_index_;
720      uint32_t stack_index = stack_index_;
721      gp_index_ += 2;
722      stack_index_ += 2;
723      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
724        if (calling_convention.GetRegisterAt(index) == R1) {
725          // Skip R1, and use R2_R3 instead.
726          gp_index_++;
727          index++;
728        }
729      }
730      if (index + 1 < calling_convention.GetNumberOfRegisters()) {
731        DCHECK_EQ(calling_convention.GetRegisterAt(index) + 1,
732                  calling_convention.GetRegisterAt(index + 1));
733
734        return Location::RegisterPairLocation(calling_convention.GetRegisterAt(index),
735                                              calling_convention.GetRegisterAt(index + 1));
736      } else {
737        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
738      }
739    }
740
741    case Primitive::kPrimFloat: {
742      uint32_t stack_index = stack_index_++;
743      if (float_index_ % 2 == 0) {
744        float_index_ = std::max(double_index_, float_index_);
745      }
746      if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
747        return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
748      } else {
749        return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
750      }
751    }
752
753    case Primitive::kPrimDouble: {
754      double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
755      uint32_t stack_index = stack_index_;
756      stack_index_ += 2;
757      if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
758        uint32_t index = double_index_;
759        double_index_ += 2;
760        Location result = Location::FpuRegisterPairLocation(
761          calling_convention.GetFpuRegisterAt(index),
762          calling_convention.GetFpuRegisterAt(index + 1));
763        DCHECK(ExpectedPairLayout(result));
764        return result;
765      } else {
766        return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
767      }
768    }
769
770    case Primitive::kPrimVoid:
771      LOG(FATAL) << "Unexpected parameter type " << type;
772      break;
773  }
774  return Location();
775}
776
777Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) const {
778  switch (type) {
779    case Primitive::kPrimBoolean:
780    case Primitive::kPrimByte:
781    case Primitive::kPrimChar:
782    case Primitive::kPrimShort:
783    case Primitive::kPrimInt:
784    case Primitive::kPrimNot: {
785      return Location::RegisterLocation(R0);
786    }
787
788    case Primitive::kPrimFloat: {
789      return Location::FpuRegisterLocation(S0);
790    }
791
792    case Primitive::kPrimLong: {
793      return Location::RegisterPairLocation(R0, R1);
794    }
795
796    case Primitive::kPrimDouble: {
797      return Location::FpuRegisterPairLocation(S0, S1);
798    }
799
800    case Primitive::kPrimVoid:
801      return Location();
802  }
803
804  UNREACHABLE();
805}
806
807Location InvokeDexCallingConventionVisitorARM::GetMethodLocation() const {
808  return Location::RegisterLocation(kMethodRegisterArgument);
809}
810
811void CodeGeneratorARM::Move32(Location destination, Location source) {
812  if (source.Equals(destination)) {
813    return;
814  }
815  if (destination.IsRegister()) {
816    if (source.IsRegister()) {
817      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
818    } else if (source.IsFpuRegister()) {
819      __ vmovrs(destination.AsRegister<Register>(), source.AsFpuRegister<SRegister>());
820    } else {
821      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
822    }
823  } else if (destination.IsFpuRegister()) {
824    if (source.IsRegister()) {
825      __ vmovsr(destination.AsFpuRegister<SRegister>(), source.AsRegister<Register>());
826    } else if (source.IsFpuRegister()) {
827      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
828    } else {
829      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
830    }
831  } else {
832    DCHECK(destination.IsStackSlot()) << destination;
833    if (source.IsRegister()) {
834      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
835    } else if (source.IsFpuRegister()) {
836      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
837    } else {
838      DCHECK(source.IsStackSlot()) << source;
839      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
840      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
841    }
842  }
843}
844
845void CodeGeneratorARM::Move64(Location destination, Location source) {
846  if (source.Equals(destination)) {
847    return;
848  }
849  if (destination.IsRegisterPair()) {
850    if (source.IsRegisterPair()) {
851      EmitParallelMoves(
852          Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
853          Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
854          Primitive::kPrimInt,
855          Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
856          Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
857          Primitive::kPrimInt);
858    } else if (source.IsFpuRegister()) {
859      UNIMPLEMENTED(FATAL);
860    } else {
861      DCHECK(source.IsDoubleStackSlot());
862      DCHECK(ExpectedPairLayout(destination));
863      __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
864                        SP, source.GetStackIndex());
865    }
866  } else if (destination.IsFpuRegisterPair()) {
867    if (source.IsDoubleStackSlot()) {
868      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
869                         SP,
870                         source.GetStackIndex());
871    } else {
872      UNIMPLEMENTED(FATAL);
873    }
874  } else {
875    DCHECK(destination.IsDoubleStackSlot());
876    if (source.IsRegisterPair()) {
877      // No conflict possible, so just do the moves.
878      if (source.AsRegisterPairLow<Register>() == R1) {
879        DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
880        __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
881        __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
882      } else {
883        __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
884                         SP, destination.GetStackIndex());
885      }
886    } else if (source.IsFpuRegisterPair()) {
887      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
888                        SP,
889                        destination.GetStackIndex());
890    } else {
891      DCHECK(source.IsDoubleStackSlot());
892      EmitParallelMoves(
893          Location::StackSlot(source.GetStackIndex()),
894          Location::StackSlot(destination.GetStackIndex()),
895          Primitive::kPrimInt,
896          Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
897          Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
898          Primitive::kPrimInt);
899    }
900  }
901}
902
903void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
904  LocationSummary* locations = instruction->GetLocations();
905  if (instruction->IsCurrentMethod()) {
906    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
907  } else if (locations != nullptr && locations->Out().Equals(location)) {
908    return;
909  } else if (locations != nullptr && locations->Out().IsConstant()) {
910    HConstant* const_to_move = locations->Out().GetConstant();
911    if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
912      int32_t value = GetInt32ValueOf(const_to_move);
913      if (location.IsRegister()) {
914        __ LoadImmediate(location.AsRegister<Register>(), value);
915      } else {
916        DCHECK(location.IsStackSlot());
917        __ LoadImmediate(IP, value);
918        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
919      }
920    } else {
921      DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
922      int64_t value = const_to_move->AsLongConstant()->GetValue();
923      if (location.IsRegisterPair()) {
924        __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
925        __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
926      } else {
927        DCHECK(location.IsDoubleStackSlot());
928        __ LoadImmediate(IP, Low32Bits(value));
929        __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
930        __ LoadImmediate(IP, High32Bits(value));
931        __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
932      }
933    }
934  } else if (instruction->IsLoadLocal()) {
935    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
936    switch (instruction->GetType()) {
937      case Primitive::kPrimBoolean:
938      case Primitive::kPrimByte:
939      case Primitive::kPrimChar:
940      case Primitive::kPrimShort:
941      case Primitive::kPrimInt:
942      case Primitive::kPrimNot:
943      case Primitive::kPrimFloat:
944        Move32(location, Location::StackSlot(stack_slot));
945        break;
946
947      case Primitive::kPrimLong:
948      case Primitive::kPrimDouble:
949        Move64(location, Location::DoubleStackSlot(stack_slot));
950        break;
951
952      default:
953        LOG(FATAL) << "Unexpected type " << instruction->GetType();
954    }
955  } else if (instruction->IsTemporary()) {
956    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
957    if (temp_location.IsStackSlot()) {
958      Move32(location, temp_location);
959    } else {
960      DCHECK(temp_location.IsDoubleStackSlot());
961      Move64(location, temp_location);
962    }
963  } else {
964    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
965    switch (instruction->GetType()) {
966      case Primitive::kPrimBoolean:
967      case Primitive::kPrimByte:
968      case Primitive::kPrimChar:
969      case Primitive::kPrimShort:
970      case Primitive::kPrimNot:
971      case Primitive::kPrimInt:
972      case Primitive::kPrimFloat:
973        Move32(location, locations->Out());
974        break;
975
976      case Primitive::kPrimLong:
977      case Primitive::kPrimDouble:
978        Move64(location, locations->Out());
979        break;
980
981      default:
982        LOG(FATAL) << "Unexpected type " << instruction->GetType();
983    }
984  }
985}
986
987void CodeGeneratorARM::MoveConstant(Location location, int32_t value) {
988  DCHECK(location.IsRegister());
989  __ LoadImmediate(location.AsRegister<Register>(), value);
990}
991
992void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
993                                     HInstruction* instruction,
994                                     uint32_t dex_pc,
995                                     SlowPathCode* slow_path) {
996  InvokeRuntime(GetThreadOffset<kArmWordSize>(entrypoint).Int32Value(),
997                instruction,
998                dex_pc,
999                slow_path);
1000}
1001
1002void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
1003                                     HInstruction* instruction,
1004                                     uint32_t dex_pc,
1005                                     SlowPathCode* slow_path) {
1006  ValidateInvokeRuntime(instruction, slow_path);
1007  __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
1008  __ blx(LR);
1009  RecordPcInfo(instruction, dex_pc, slow_path);
1010}
1011
1012void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) {
1013  DCHECK(!successor->IsExitBlock());
1014
1015  HBasicBlock* block = got->GetBlock();
1016  HInstruction* previous = got->GetPrevious();
1017
1018  HLoopInformation* info = block->GetLoopInformation();
1019  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1020    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1021    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1022    return;
1023  }
1024
1025  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1026    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1027  }
1028  if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
1029    __ b(codegen_->GetLabelOf(successor));
1030  }
1031}
1032
1033void LocationsBuilderARM::VisitGoto(HGoto* got) {
1034  got->SetLocations(nullptr);
1035}
1036
1037void InstructionCodeGeneratorARM::VisitGoto(HGoto* got) {
1038  HandleGoto(got, got->GetSuccessor());
1039}
1040
1041void LocationsBuilderARM::VisitTryBoundary(HTryBoundary* try_boundary) {
1042  try_boundary->SetLocations(nullptr);
1043}
1044
1045void InstructionCodeGeneratorARM::VisitTryBoundary(HTryBoundary* try_boundary) {
1046  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1047  if (!successor->IsExitBlock()) {
1048    HandleGoto(try_boundary, successor);
1049  }
1050}
1051
1052void LocationsBuilderARM::VisitExit(HExit* exit) {
1053  exit->SetLocations(nullptr);
1054}
1055
1056void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
1057  UNUSED(exit);
1058}
1059
1060void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
1061  ShifterOperand operand;
1062  if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, right, &operand)) {
1063    __ cmp(left, operand);
1064  } else {
1065    Register temp = IP;
1066    __ LoadImmediate(temp, right);
1067    __ cmp(left, ShifterOperand(temp));
1068  }
1069}
1070
1071void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
1072                                                  Label* true_label,
1073                                                  Label* false_label) {
1074  __ vmstat();  // transfer FP status register to ARM APSR.
1075  if (cond->IsFPConditionTrueIfNaN()) {
1076    __ b(true_label, VS);  // VS for unordered.
1077  } else if (cond->IsFPConditionFalseIfNaN()) {
1078    __ b(false_label, VS);  // VS for unordered.
1079  }
1080  __ b(true_label, ARMSignedOrFPCondition(cond->GetCondition()));
1081}
1082
1083void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
1084                                                               Label* true_label,
1085                                                               Label* false_label) {
1086  LocationSummary* locations = cond->GetLocations();
1087  Location left = locations->InAt(0);
1088  Location right = locations->InAt(1);
1089  IfCondition if_cond = cond->GetCondition();
1090
1091  Register left_high = left.AsRegisterPairHigh<Register>();
1092  Register left_low = left.AsRegisterPairLow<Register>();
1093  IfCondition true_high_cond = if_cond;
1094  IfCondition false_high_cond = cond->GetOppositeCondition();
1095  Condition final_condition = ARMUnsignedCondition(if_cond);
1096
1097  // Set the conditions for the test, remembering that == needs to be
1098  // decided using the low words.
1099  switch (if_cond) {
1100    case kCondEQ:
1101    case kCondNE:
1102      // Nothing to do.
1103      break;
1104    case kCondLT:
1105      false_high_cond = kCondGT;
1106      break;
1107    case kCondLE:
1108      true_high_cond = kCondLT;
1109      break;
1110    case kCondGT:
1111      false_high_cond = kCondLT;
1112      break;
1113    case kCondGE:
1114      true_high_cond = kCondGT;
1115      break;
1116  }
1117  if (right.IsConstant()) {
1118    int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
1119    int32_t val_low = Low32Bits(value);
1120    int32_t val_high = High32Bits(value);
1121
1122    GenerateCompareWithImmediate(left_high, val_high);
1123    if (if_cond == kCondNE) {
1124      __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
1125    } else if (if_cond == kCondEQ) {
1126      __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
1127    } else {
1128      __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
1129      __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
1130    }
1131    // Must be equal high, so compare the lows.
1132    GenerateCompareWithImmediate(left_low, val_low);
1133  } else {
1134    Register right_high = right.AsRegisterPairHigh<Register>();
1135    Register right_low = right.AsRegisterPairLow<Register>();
1136
1137    __ cmp(left_high, ShifterOperand(right_high));
1138    if (if_cond == kCondNE) {
1139      __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
1140    } else if (if_cond == kCondEQ) {
1141      __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
1142    } else {
1143      __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
1144      __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
1145    }
1146    // Must be equal high, so compare the lows.
1147    __ cmp(left_low, ShifterOperand(right_low));
1148  }
1149  // The last comparison might be unsigned.
1150  __ b(true_label, final_condition);
1151}
1152
1153void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HIf* if_instr,
1154                                                               HCondition* condition,
1155                                                               Label* true_target,
1156                                                               Label* false_target,
1157                                                               Label* always_true_target) {
1158  LocationSummary* locations = condition->GetLocations();
1159  Location left = locations->InAt(0);
1160  Location right = locations->InAt(1);
1161
1162  // We don't want true_target as a nullptr.
1163  if (true_target == nullptr) {
1164    true_target = always_true_target;
1165  }
1166  bool falls_through = (false_target == nullptr);
1167
1168  // FP compares don't like null false_targets.
1169  if (false_target == nullptr) {
1170    false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1171  }
1172
1173  Primitive::Type type = condition->InputAt(0)->GetType();
1174  switch (type) {
1175    case Primitive::kPrimLong:
1176      GenerateLongComparesAndJumps(condition, true_target, false_target);
1177      break;
1178    case Primitive::kPrimFloat:
1179      __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
1180      GenerateFPJumps(condition, true_target, false_target);
1181      break;
1182    case Primitive::kPrimDouble:
1183      __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
1184               FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
1185      GenerateFPJumps(condition, true_target, false_target);
1186      break;
1187    default:
1188      LOG(FATAL) << "Unexpected compare type " << type;
1189  }
1190
1191  if (!falls_through) {
1192    __ b(false_target);
1193  }
1194}
1195
1196void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction,
1197                                                        Label* true_target,
1198                                                        Label* false_target,
1199                                                        Label* always_true_target) {
1200  HInstruction* cond = instruction->InputAt(0);
1201  if (cond->IsIntConstant()) {
1202    // Constant condition, statically compared against 1.
1203    int32_t cond_value = cond->AsIntConstant()->GetValue();
1204    if (cond_value == 1) {
1205      if (always_true_target != nullptr) {
1206        __ b(always_true_target);
1207      }
1208      return;
1209    } else {
1210      DCHECK_EQ(cond_value, 0);
1211    }
1212  } else {
1213    if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1214      // Condition has been materialized, compare the output to 0
1215      DCHECK(instruction->GetLocations()->InAt(0).IsRegister());
1216      __ CompareAndBranchIfNonZero(instruction->GetLocations()->InAt(0).AsRegister<Register>(),
1217                                   true_target);
1218    } else {
1219      // Condition has not been materialized, use its inputs as the
1220      // comparison and its condition as the branch condition.
1221      Primitive::Type type =
1222          cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt;
1223      // Is this a long or FP comparison that has been folded into the HCondition?
1224      if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
1225        // Generate the comparison directly.
1226        GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(),
1227                                     true_target, false_target, always_true_target);
1228        return;
1229      }
1230
1231      LocationSummary* locations = cond->GetLocations();
1232      DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0);
1233      Register left = locations->InAt(0).AsRegister<Register>();
1234      Location right = locations->InAt(1);
1235      if (right.IsRegister()) {
1236        __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
1237      } else {
1238        DCHECK(right.IsConstant());
1239        GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
1240      }
1241      __ b(true_target, ARMSignedOrFPCondition(cond->AsCondition()->GetCondition()));
1242    }
1243  }
1244  if (false_target != nullptr) {
1245    __ b(false_target);
1246  }
1247}
1248
1249void LocationsBuilderARM::VisitIf(HIf* if_instr) {
1250  LocationSummary* locations =
1251      new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
1252  HInstruction* cond = if_instr->InputAt(0);
1253  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
1254    locations->SetInAt(0, Location::RequiresRegister());
1255  }
1256}
1257
1258void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
1259  Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
1260  Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
1261  Label* always_true_target = true_target;
1262  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1263                                if_instr->IfTrueSuccessor())) {
1264    always_true_target = nullptr;
1265  }
1266  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
1267                                if_instr->IfFalseSuccessor())) {
1268    false_target = nullptr;
1269  }
1270  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
1271}
1272
1273void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1274  LocationSummary* locations = new (GetGraph()->GetArena())
1275      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
1276  HInstruction* cond = deoptimize->InputAt(0);
1277  DCHECK(cond->IsCondition());
1278  if (cond->AsCondition()->NeedsMaterialization()) {
1279    locations->SetInAt(0, Location::RequiresRegister());
1280  }
1281}
1282
1283void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
1284  SlowPathCode* slow_path = new (GetGraph()->GetArena())
1285      DeoptimizationSlowPathARM(deoptimize);
1286  codegen_->AddSlowPath(slow_path);
1287  Label* slow_path_entry = slow_path->GetEntryLabel();
1288  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
1289}
1290
1291void LocationsBuilderARM::VisitCondition(HCondition* cond) {
1292  LocationSummary* locations =
1293      new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
1294  // Handle the long/FP comparisons made in instruction simplification.
1295  switch (cond->InputAt(0)->GetType()) {
1296    case Primitive::kPrimLong:
1297      locations->SetInAt(0, Location::RequiresRegister());
1298      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1299      if (cond->NeedsMaterialization()) {
1300        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1301      }
1302      break;
1303
1304    case Primitive::kPrimFloat:
1305    case Primitive::kPrimDouble:
1306      locations->SetInAt(0, Location::RequiresFpuRegister());
1307      locations->SetInAt(1, Location::RequiresFpuRegister());
1308      if (cond->NeedsMaterialization()) {
1309        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1310      }
1311      break;
1312
1313    default:
1314      locations->SetInAt(0, Location::RequiresRegister());
1315      locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
1316      if (cond->NeedsMaterialization()) {
1317        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1318      }
1319  }
1320}
1321
1322void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
1323  if (!cond->NeedsMaterialization()) {
1324    return;
1325  }
1326
1327  LocationSummary* locations = cond->GetLocations();
1328  Location left = locations->InAt(0);
1329  Location right = locations->InAt(1);
1330  Register out = locations->Out().AsRegister<Register>();
1331  Label true_label, false_label;
1332
1333  switch (cond->InputAt(0)->GetType()) {
1334    default: {
1335      // Integer case.
1336      if (right.IsRegister()) {
1337        __ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
1338      } else {
1339        DCHECK(right.IsConstant());
1340        GenerateCompareWithImmediate(left.AsRegister<Register>(),
1341                                     CodeGenerator::GetInt32ValueOf(right.GetConstant()));
1342      }
1343      __ it(ARMSignedOrFPCondition(cond->GetCondition()), kItElse);
1344      __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
1345             ARMSignedOrFPCondition(cond->GetCondition()));
1346      __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
1347             ARMSignedOrFPCondition(cond->GetOppositeCondition()));
1348      return;
1349    }
1350    case Primitive::kPrimLong:
1351      GenerateLongComparesAndJumps(cond, &true_label, &false_label);
1352      break;
1353    case Primitive::kPrimFloat:
1354      __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
1355      GenerateFPJumps(cond, &true_label, &false_label);
1356      break;
1357    case Primitive::kPrimDouble:
1358      __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
1359               FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
1360      GenerateFPJumps(cond, &true_label, &false_label);
1361      break;
1362  }
1363
1364  // Convert the jumps into the result.
1365  Label done_label;
1366
1367  // False case: result = 0.
1368  __ Bind(&false_label);
1369  __ LoadImmediate(out, 0);
1370  __ b(&done_label);
1371
1372  // True case: result = 1.
1373  __ Bind(&true_label);
1374  __ LoadImmediate(out, 1);
1375  __ Bind(&done_label);
1376}
1377
1378void LocationsBuilderARM::VisitEqual(HEqual* comp) {
1379  VisitCondition(comp);
1380}
1381
1382void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
1383  VisitCondition(comp);
1384}
1385
1386void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
1387  VisitCondition(comp);
1388}
1389
1390void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
1391  VisitCondition(comp);
1392}
1393
1394void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
1395  VisitCondition(comp);
1396}
1397
1398void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
1399  VisitCondition(comp);
1400}
1401
1402void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1403  VisitCondition(comp);
1404}
1405
1406void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
1407  VisitCondition(comp);
1408}
1409
1410void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
1411  VisitCondition(comp);
1412}
1413
1414void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
1415  VisitCondition(comp);
1416}
1417
1418void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1419  VisitCondition(comp);
1420}
1421
1422void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
1423  VisitCondition(comp);
1424}
1425
1426void LocationsBuilderARM::VisitLocal(HLocal* local) {
1427  local->SetLocations(nullptr);
1428}
1429
1430void InstructionCodeGeneratorARM::VisitLocal(HLocal* local) {
1431  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
1432}
1433
1434void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
1435  load->SetLocations(nullptr);
1436}
1437
1438void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
1439  // Nothing to do, this is driven by the code generator.
1440  UNUSED(load);
1441}
1442
1443void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
1444  LocationSummary* locations =
1445      new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall);
1446  switch (store->InputAt(1)->GetType()) {
1447    case Primitive::kPrimBoolean:
1448    case Primitive::kPrimByte:
1449    case Primitive::kPrimChar:
1450    case Primitive::kPrimShort:
1451    case Primitive::kPrimInt:
1452    case Primitive::kPrimNot:
1453    case Primitive::kPrimFloat:
1454      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
1455      break;
1456
1457    case Primitive::kPrimLong:
1458    case Primitive::kPrimDouble:
1459      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
1460      break;
1461
1462    default:
1463      LOG(FATAL) << "Unexpected local type " << store->InputAt(1)->GetType();
1464  }
1465}
1466
1467void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
1468  UNUSED(store);
1469}
1470
1471void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
1472  LocationSummary* locations =
1473      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1474  locations->SetOut(Location::ConstantLocation(constant));
1475}
1476
1477void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
1478  // Will be generated at use site.
1479  UNUSED(constant);
1480}
1481
1482void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
1483  LocationSummary* locations =
1484      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1485  locations->SetOut(Location::ConstantLocation(constant));
1486}
1487
1488void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
1489  // Will be generated at use site.
1490  UNUSED(constant);
1491}
1492
1493void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
1494  LocationSummary* locations =
1495      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1496  locations->SetOut(Location::ConstantLocation(constant));
1497}
1498
1499void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
1500  // Will be generated at use site.
1501  UNUSED(constant);
1502}
1503
1504void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
1505  LocationSummary* locations =
1506      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1507  locations->SetOut(Location::ConstantLocation(constant));
1508}
1509
1510void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
1511  // Will be generated at use site.
1512  UNUSED(constant);
1513}
1514
1515void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
1516  LocationSummary* locations =
1517      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1518  locations->SetOut(Location::ConstantLocation(constant));
1519}
1520
1521void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
1522  // Will be generated at use site.
1523  UNUSED(constant);
1524}
1525
1526void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1527  memory_barrier->SetLocations(nullptr);
1528}
1529
1530void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
1531  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
1532}
1533
1534void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
1535  ret->SetLocations(nullptr);
1536}
1537
1538void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
1539  UNUSED(ret);
1540  codegen_->GenerateFrameExit();
1541}
1542
1543void LocationsBuilderARM::VisitReturn(HReturn* ret) {
1544  LocationSummary* locations =
1545      new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
1546  locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
1547}
1548
1549void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
1550  UNUSED(ret);
1551  codegen_->GenerateFrameExit();
1552}
1553
1554void LocationsBuilderARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
1555  // The trampoline uses the same calling convention as dex calling conventions,
1556  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
1557  // the method_idx.
1558  HandleInvoke(invoke);
1559}
1560
1561void InstructionCodeGeneratorARM::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
1562  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
1563}
1564
1565void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1566  // When we do not run baseline, explicit clinit checks triggered by static
1567  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1568  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1569
1570  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1571                                         codegen_->GetInstructionSetFeatures());
1572  if (intrinsic.TryDispatch(invoke)) {
1573    return;
1574  }
1575
1576  HandleInvoke(invoke);
1577}
1578
1579static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
1580  if (invoke->GetLocations()->Intrinsified()) {
1581    IntrinsicCodeGeneratorARM intrinsic(codegen);
1582    intrinsic.Dispatch(invoke);
1583    return true;
1584  }
1585  return false;
1586}
1587
1588void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
1589  // When we do not run baseline, explicit clinit checks triggered by static
1590  // invokes must have been pruned by art::PrepareForRegisterAllocation.
1591  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
1592
1593  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1594    return;
1595  }
1596
1597  LocationSummary* locations = invoke->GetLocations();
1598  codegen_->GenerateStaticOrDirectCall(
1599      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
1600  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1601}
1602
1603void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
1604  InvokeDexCallingConventionVisitorARM calling_convention_visitor;
1605  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
1606}
1607
1608void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1609  IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
1610                                         codegen_->GetInstructionSetFeatures());
1611  if (intrinsic.TryDispatch(invoke)) {
1612    return;
1613  }
1614
1615  HandleInvoke(invoke);
1616}
1617
1618void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
1619  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
1620    return;
1621  }
1622
1623  codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
1624  DCHECK(!codegen_->IsLeafMethod());
1625  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1626}
1627
1628void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1629  HandleInvoke(invoke);
1630  // Add the hidden argument.
1631  invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
1632}
1633
1634void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
1635  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
1636  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
1637  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
1638      invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
1639  LocationSummary* locations = invoke->GetLocations();
1640  Location receiver = locations->InAt(0);
1641  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
1642
1643  // Set the hidden argument.
1644  __ LoadImmediate(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
1645                   invoke->GetDexMethodIndex());
1646
1647  // temp = object->GetClass();
1648  if (receiver.IsStackSlot()) {
1649    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
1650    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
1651  } else {
1652    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
1653  }
1654  codegen_->MaybeRecordImplicitNullCheck(invoke);
1655  __ MaybeUnpoisonHeapReference(temp);
1656  // temp = temp->GetImtEntryAt(method_offset);
1657  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
1658      kArmWordSize).Int32Value();
1659  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
1660  // LR = temp->GetEntryPoint();
1661  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
1662  // LR();
1663  __ blx(LR);
1664  DCHECK(!codegen_->IsLeafMethod());
1665  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1666}
1667
1668void LocationsBuilderARM::VisitNeg(HNeg* neg) {
1669  LocationSummary* locations =
1670      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
1671  switch (neg->GetResultType()) {
1672    case Primitive::kPrimInt: {
1673      locations->SetInAt(0, Location::RequiresRegister());
1674      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1675      break;
1676    }
1677    case Primitive::kPrimLong: {
1678      locations->SetInAt(0, Location::RequiresRegister());
1679      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1680      break;
1681    }
1682
1683    case Primitive::kPrimFloat:
1684    case Primitive::kPrimDouble:
1685      locations->SetInAt(0, Location::RequiresFpuRegister());
1686      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1687      break;
1688
1689    default:
1690      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1691  }
1692}
1693
1694void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
1695  LocationSummary* locations = neg->GetLocations();
1696  Location out = locations->Out();
1697  Location in = locations->InAt(0);
1698  switch (neg->GetResultType()) {
1699    case Primitive::kPrimInt:
1700      DCHECK(in.IsRegister());
1701      __ rsb(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(0));
1702      break;
1703
1704    case Primitive::kPrimLong:
1705      DCHECK(in.IsRegisterPair());
1706      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
1707      __ rsbs(out.AsRegisterPairLow<Register>(),
1708              in.AsRegisterPairLow<Register>(),
1709              ShifterOperand(0));
1710      // We cannot emit an RSC (Reverse Subtract with Carry)
1711      // instruction here, as it does not exist in the Thumb-2
1712      // instruction set.  We use the following approach
1713      // using SBC and SUB instead.
1714      //
1715      // out.hi = -C
1716      __ sbc(out.AsRegisterPairHigh<Register>(),
1717             out.AsRegisterPairHigh<Register>(),
1718             ShifterOperand(out.AsRegisterPairHigh<Register>()));
1719      // out.hi = out.hi - in.hi
1720      __ sub(out.AsRegisterPairHigh<Register>(),
1721             out.AsRegisterPairHigh<Register>(),
1722             ShifterOperand(in.AsRegisterPairHigh<Register>()));
1723      break;
1724
1725    case Primitive::kPrimFloat:
1726      DCHECK(in.IsFpuRegister());
1727      __ vnegs(out.AsFpuRegister<SRegister>(), in.AsFpuRegister<SRegister>());
1728      break;
1729
1730    case Primitive::kPrimDouble:
1731      DCHECK(in.IsFpuRegisterPair());
1732      __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
1733               FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
1734      break;
1735
1736    default:
1737      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
1738  }
1739}
1740
1741void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
1742  Primitive::Type result_type = conversion->GetResultType();
1743  Primitive::Type input_type = conversion->GetInputType();
1744  DCHECK_NE(result_type, input_type);
1745
1746  // The float-to-long, double-to-long and long-to-float type conversions
1747  // rely on a call to the runtime.
1748  LocationSummary::CallKind call_kind =
1749      (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
1750        && result_type == Primitive::kPrimLong)
1751       || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
1752      ? LocationSummary::kCall
1753      : LocationSummary::kNoCall;
1754  LocationSummary* locations =
1755      new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
1756
1757  // The Java language does not allow treating boolean as an integral type but
1758  // our bit representation makes it safe.
1759
1760  switch (result_type) {
1761    case Primitive::kPrimByte:
1762      switch (input_type) {
1763        case Primitive::kPrimBoolean:
1764          // Boolean input is a result of code transformations.
1765        case Primitive::kPrimShort:
1766        case Primitive::kPrimInt:
1767        case Primitive::kPrimChar:
1768          // Processing a Dex `int-to-byte' instruction.
1769          locations->SetInAt(0, Location::RequiresRegister());
1770          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1771          break;
1772
1773        default:
1774          LOG(FATAL) << "Unexpected type conversion from " << input_type
1775                     << " to " << result_type;
1776      }
1777      break;
1778
1779    case Primitive::kPrimShort:
1780      switch (input_type) {
1781        case Primitive::kPrimBoolean:
1782          // Boolean input is a result of code transformations.
1783        case Primitive::kPrimByte:
1784        case Primitive::kPrimInt:
1785        case Primitive::kPrimChar:
1786          // Processing a Dex `int-to-short' instruction.
1787          locations->SetInAt(0, Location::RequiresRegister());
1788          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1789          break;
1790
1791        default:
1792          LOG(FATAL) << "Unexpected type conversion from " << input_type
1793                     << " to " << result_type;
1794      }
1795      break;
1796
1797    case Primitive::kPrimInt:
1798      switch (input_type) {
1799        case Primitive::kPrimLong:
1800          // Processing a Dex `long-to-int' instruction.
1801          locations->SetInAt(0, Location::Any());
1802          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1803          break;
1804
1805        case Primitive::kPrimFloat:
1806          // Processing a Dex `float-to-int' instruction.
1807          locations->SetInAt(0, Location::RequiresFpuRegister());
1808          locations->SetOut(Location::RequiresRegister());
1809          locations->AddTemp(Location::RequiresFpuRegister());
1810          break;
1811
1812        case Primitive::kPrimDouble:
1813          // Processing a Dex `double-to-int' instruction.
1814          locations->SetInAt(0, Location::RequiresFpuRegister());
1815          locations->SetOut(Location::RequiresRegister());
1816          locations->AddTemp(Location::RequiresFpuRegister());
1817          break;
1818
1819        default:
1820          LOG(FATAL) << "Unexpected type conversion from " << input_type
1821                     << " to " << result_type;
1822      }
1823      break;
1824
1825    case Primitive::kPrimLong:
1826      switch (input_type) {
1827        case Primitive::kPrimBoolean:
1828          // Boolean input is a result of code transformations.
1829        case Primitive::kPrimByte:
1830        case Primitive::kPrimShort:
1831        case Primitive::kPrimInt:
1832        case Primitive::kPrimChar:
1833          // Processing a Dex `int-to-long' instruction.
1834          locations->SetInAt(0, Location::RequiresRegister());
1835          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1836          break;
1837
1838        case Primitive::kPrimFloat: {
1839          // Processing a Dex `float-to-long' instruction.
1840          InvokeRuntimeCallingConvention calling_convention;
1841          locations->SetInAt(0, Location::FpuRegisterLocation(
1842              calling_convention.GetFpuRegisterAt(0)));
1843          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1844          break;
1845        }
1846
1847        case Primitive::kPrimDouble: {
1848          // Processing a Dex `double-to-long' instruction.
1849          InvokeRuntimeCallingConvention calling_convention;
1850          locations->SetInAt(0, Location::FpuRegisterPairLocation(
1851              calling_convention.GetFpuRegisterAt(0),
1852              calling_convention.GetFpuRegisterAt(1)));
1853          locations->SetOut(Location::RegisterPairLocation(R0, R1));
1854          break;
1855        }
1856
1857        default:
1858          LOG(FATAL) << "Unexpected type conversion from " << input_type
1859                     << " to " << result_type;
1860      }
1861      break;
1862
1863    case Primitive::kPrimChar:
1864      switch (input_type) {
1865        case Primitive::kPrimBoolean:
1866          // Boolean input is a result of code transformations.
1867        case Primitive::kPrimByte:
1868        case Primitive::kPrimShort:
1869        case Primitive::kPrimInt:
1870          // Processing a Dex `int-to-char' instruction.
1871          locations->SetInAt(0, Location::RequiresRegister());
1872          locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1873          break;
1874
1875        default:
1876          LOG(FATAL) << "Unexpected type conversion from " << input_type
1877                     << " to " << result_type;
1878      }
1879      break;
1880
1881    case Primitive::kPrimFloat:
1882      switch (input_type) {
1883        case Primitive::kPrimBoolean:
1884          // Boolean input is a result of code transformations.
1885        case Primitive::kPrimByte:
1886        case Primitive::kPrimShort:
1887        case Primitive::kPrimInt:
1888        case Primitive::kPrimChar:
1889          // Processing a Dex `int-to-float' instruction.
1890          locations->SetInAt(0, Location::RequiresRegister());
1891          locations->SetOut(Location::RequiresFpuRegister());
1892          break;
1893
1894        case Primitive::kPrimLong: {
1895          // Processing a Dex `long-to-float' instruction.
1896          InvokeRuntimeCallingConvention calling_convention;
1897          locations->SetInAt(0, Location::RegisterPairLocation(
1898              calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
1899          locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1900          break;
1901        }
1902
1903        case Primitive::kPrimDouble:
1904          // Processing a Dex `double-to-float' instruction.
1905          locations->SetInAt(0, Location::RequiresFpuRegister());
1906          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1907          break;
1908
1909        default:
1910          LOG(FATAL) << "Unexpected type conversion from " << input_type
1911                     << " to " << result_type;
1912      };
1913      break;
1914
1915    case Primitive::kPrimDouble:
1916      switch (input_type) {
1917        case Primitive::kPrimBoolean:
1918          // Boolean input is a result of code transformations.
1919        case Primitive::kPrimByte:
1920        case Primitive::kPrimShort:
1921        case Primitive::kPrimInt:
1922        case Primitive::kPrimChar:
1923          // Processing a Dex `int-to-double' instruction.
1924          locations->SetInAt(0, Location::RequiresRegister());
1925          locations->SetOut(Location::RequiresFpuRegister());
1926          break;
1927
1928        case Primitive::kPrimLong:
1929          // Processing a Dex `long-to-double' instruction.
1930          locations->SetInAt(0, Location::RequiresRegister());
1931          locations->SetOut(Location::RequiresFpuRegister());
1932          locations->AddTemp(Location::RequiresFpuRegister());
1933          locations->AddTemp(Location::RequiresFpuRegister());
1934          break;
1935
1936        case Primitive::kPrimFloat:
1937          // Processing a Dex `float-to-double' instruction.
1938          locations->SetInAt(0, Location::RequiresFpuRegister());
1939          locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1940          break;
1941
1942        default:
1943          LOG(FATAL) << "Unexpected type conversion from " << input_type
1944                     << " to " << result_type;
1945      };
1946      break;
1947
1948    default:
1949      LOG(FATAL) << "Unexpected type conversion from " << input_type
1950                 << " to " << result_type;
1951  }
1952}
1953
1954void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
1955  LocationSummary* locations = conversion->GetLocations();
1956  Location out = locations->Out();
1957  Location in = locations->InAt(0);
1958  Primitive::Type result_type = conversion->GetResultType();
1959  Primitive::Type input_type = conversion->GetInputType();
1960  DCHECK_NE(result_type, input_type);
1961  switch (result_type) {
1962    case Primitive::kPrimByte:
1963      switch (input_type) {
1964        case Primitive::kPrimBoolean:
1965          // Boolean input is a result of code transformations.
1966        case Primitive::kPrimShort:
1967        case Primitive::kPrimInt:
1968        case Primitive::kPrimChar:
1969          // Processing a Dex `int-to-byte' instruction.
1970          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 8);
1971          break;
1972
1973        default:
1974          LOG(FATAL) << "Unexpected type conversion from " << input_type
1975                     << " to " << result_type;
1976      }
1977      break;
1978
1979    case Primitive::kPrimShort:
1980      switch (input_type) {
1981        case Primitive::kPrimBoolean:
1982          // Boolean input is a result of code transformations.
1983        case Primitive::kPrimByte:
1984        case Primitive::kPrimInt:
1985        case Primitive::kPrimChar:
1986          // Processing a Dex `int-to-short' instruction.
1987          __ sbfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
1988          break;
1989
1990        default:
1991          LOG(FATAL) << "Unexpected type conversion from " << input_type
1992                     << " to " << result_type;
1993      }
1994      break;
1995
1996    case Primitive::kPrimInt:
1997      switch (input_type) {
1998        case Primitive::kPrimLong:
1999          // Processing a Dex `long-to-int' instruction.
2000          DCHECK(out.IsRegister());
2001          if (in.IsRegisterPair()) {
2002            __ Mov(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
2003          } else if (in.IsDoubleStackSlot()) {
2004            __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), SP, in.GetStackIndex());
2005          } else {
2006            DCHECK(in.IsConstant());
2007            DCHECK(in.GetConstant()->IsLongConstant());
2008            int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
2009            __ LoadImmediate(out.AsRegister<Register>(), static_cast<int32_t>(value));
2010          }
2011          break;
2012
2013        case Primitive::kPrimFloat: {
2014          // Processing a Dex `float-to-int' instruction.
2015          SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2016          __ vmovs(temp, in.AsFpuRegister<SRegister>());
2017          __ vcvtis(temp, temp);
2018          __ vmovrs(out.AsRegister<Register>(), temp);
2019          break;
2020        }
2021
2022        case Primitive::kPrimDouble: {
2023          // Processing a Dex `double-to-int' instruction.
2024          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2025          DRegister temp_d = FromLowSToD(temp_s);
2026          __ vmovd(temp_d, FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
2027          __ vcvtid(temp_s, temp_d);
2028          __ vmovrs(out.AsRegister<Register>(), temp_s);
2029          break;
2030        }
2031
2032        default:
2033          LOG(FATAL) << "Unexpected type conversion from " << input_type
2034                     << " to " << result_type;
2035      }
2036      break;
2037
2038    case Primitive::kPrimLong:
2039      switch (input_type) {
2040        case Primitive::kPrimBoolean:
2041          // Boolean input is a result of code transformations.
2042        case Primitive::kPrimByte:
2043        case Primitive::kPrimShort:
2044        case Primitive::kPrimInt:
2045        case Primitive::kPrimChar:
2046          // Processing a Dex `int-to-long' instruction.
2047          DCHECK(out.IsRegisterPair());
2048          DCHECK(in.IsRegister());
2049          __ Mov(out.AsRegisterPairLow<Register>(), in.AsRegister<Register>());
2050          // Sign extension.
2051          __ Asr(out.AsRegisterPairHigh<Register>(),
2052                 out.AsRegisterPairLow<Register>(),
2053                 31);
2054          break;
2055
2056        case Primitive::kPrimFloat:
2057          // Processing a Dex `float-to-long' instruction.
2058          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
2059                                  conversion,
2060                                  conversion->GetDexPc(),
2061                                  nullptr);
2062          break;
2063
2064        case Primitive::kPrimDouble:
2065          // Processing a Dex `double-to-long' instruction.
2066          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
2067                                  conversion,
2068                                  conversion->GetDexPc(),
2069                                  nullptr);
2070          break;
2071
2072        default:
2073          LOG(FATAL) << "Unexpected type conversion from " << input_type
2074                     << " to " << result_type;
2075      }
2076      break;
2077
2078    case Primitive::kPrimChar:
2079      switch (input_type) {
2080        case Primitive::kPrimBoolean:
2081          // Boolean input is a result of code transformations.
2082        case Primitive::kPrimByte:
2083        case Primitive::kPrimShort:
2084        case Primitive::kPrimInt:
2085          // Processing a Dex `int-to-char' instruction.
2086          __ ubfx(out.AsRegister<Register>(), in.AsRegister<Register>(), 0, 16);
2087          break;
2088
2089        default:
2090          LOG(FATAL) << "Unexpected type conversion from " << input_type
2091                     << " to " << result_type;
2092      }
2093      break;
2094
2095    case Primitive::kPrimFloat:
2096      switch (input_type) {
2097        case Primitive::kPrimBoolean:
2098          // Boolean input is a result of code transformations.
2099        case Primitive::kPrimByte:
2100        case Primitive::kPrimShort:
2101        case Primitive::kPrimInt:
2102        case Primitive::kPrimChar: {
2103          // Processing a Dex `int-to-float' instruction.
2104          __ vmovsr(out.AsFpuRegister<SRegister>(), in.AsRegister<Register>());
2105          __ vcvtsi(out.AsFpuRegister<SRegister>(), out.AsFpuRegister<SRegister>());
2106          break;
2107        }
2108
2109        case Primitive::kPrimLong:
2110          // Processing a Dex `long-to-float' instruction.
2111          codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
2112                                  conversion,
2113                                  conversion->GetDexPc(),
2114                                  nullptr);
2115          break;
2116
2117        case Primitive::kPrimDouble:
2118          // Processing a Dex `double-to-float' instruction.
2119          __ vcvtsd(out.AsFpuRegister<SRegister>(),
2120                    FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
2121          break;
2122
2123        default:
2124          LOG(FATAL) << "Unexpected type conversion from " << input_type
2125                     << " to " << result_type;
2126      };
2127      break;
2128
2129    case Primitive::kPrimDouble:
2130      switch (input_type) {
2131        case Primitive::kPrimBoolean:
2132          // Boolean input is a result of code transformations.
2133        case Primitive::kPrimByte:
2134        case Primitive::kPrimShort:
2135        case Primitive::kPrimInt:
2136        case Primitive::kPrimChar: {
2137          // Processing a Dex `int-to-double' instruction.
2138          __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.AsRegister<Register>());
2139          __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2140                    out.AsFpuRegisterPairLow<SRegister>());
2141          break;
2142        }
2143
2144        case Primitive::kPrimLong: {
2145          // Processing a Dex `long-to-double' instruction.
2146          Register low = in.AsRegisterPairLow<Register>();
2147          Register high = in.AsRegisterPairHigh<Register>();
2148          SRegister out_s = out.AsFpuRegisterPairLow<SRegister>();
2149          DRegister out_d = FromLowSToD(out_s);
2150          SRegister temp_s = locations->GetTemp(0).AsFpuRegisterPairLow<SRegister>();
2151          DRegister temp_d = FromLowSToD(temp_s);
2152          SRegister constant_s = locations->GetTemp(1).AsFpuRegisterPairLow<SRegister>();
2153          DRegister constant_d = FromLowSToD(constant_s);
2154
2155          // temp_d = int-to-double(high)
2156          __ vmovsr(temp_s, high);
2157          __ vcvtdi(temp_d, temp_s);
2158          // constant_d = k2Pow32EncodingForDouble
2159          __ LoadDImmediate(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
2160          // out_d = unsigned-to-double(low)
2161          __ vmovsr(out_s, low);
2162          __ vcvtdu(out_d, out_s);
2163          // out_d += temp_d * constant_d
2164          __ vmlad(out_d, temp_d, constant_d);
2165          break;
2166        }
2167
2168        case Primitive::kPrimFloat:
2169          // Processing a Dex `float-to-double' instruction.
2170          __ vcvtds(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2171                    in.AsFpuRegister<SRegister>());
2172          break;
2173
2174        default:
2175          LOG(FATAL) << "Unexpected type conversion from " << input_type
2176                     << " to " << result_type;
2177      };
2178      break;
2179
2180    default:
2181      LOG(FATAL) << "Unexpected type conversion from " << input_type
2182                 << " to " << result_type;
2183  }
2184}
2185
2186void LocationsBuilderARM::VisitAdd(HAdd* add) {
2187  LocationSummary* locations =
2188      new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
2189  switch (add->GetResultType()) {
2190    case Primitive::kPrimInt: {
2191      locations->SetInAt(0, Location::RequiresRegister());
2192      locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
2193      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2194      break;
2195    }
2196
2197    case Primitive::kPrimLong: {
2198      locations->SetInAt(0, Location::RequiresRegister());
2199      locations->SetInAt(1, Location::RequiresRegister());
2200      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2201      break;
2202    }
2203
2204    case Primitive::kPrimFloat:
2205    case Primitive::kPrimDouble: {
2206      locations->SetInAt(0, Location::RequiresFpuRegister());
2207      locations->SetInAt(1, Location::RequiresFpuRegister());
2208      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2209      break;
2210    }
2211
2212    default:
2213      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2214  }
2215}
2216
2217void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
2218  LocationSummary* locations = add->GetLocations();
2219  Location out = locations->Out();
2220  Location first = locations->InAt(0);
2221  Location second = locations->InAt(1);
2222  switch (add->GetResultType()) {
2223    case Primitive::kPrimInt:
2224      if (second.IsRegister()) {
2225        __ add(out.AsRegister<Register>(),
2226               first.AsRegister<Register>(),
2227               ShifterOperand(second.AsRegister<Register>()));
2228      } else {
2229        __ AddConstant(out.AsRegister<Register>(),
2230                       first.AsRegister<Register>(),
2231                       second.GetConstant()->AsIntConstant()->GetValue());
2232      }
2233      break;
2234
2235    case Primitive::kPrimLong: {
2236      DCHECK(second.IsRegisterPair());
2237      __ adds(out.AsRegisterPairLow<Register>(),
2238              first.AsRegisterPairLow<Register>(),
2239              ShifterOperand(second.AsRegisterPairLow<Register>()));
2240      __ adc(out.AsRegisterPairHigh<Register>(),
2241             first.AsRegisterPairHigh<Register>(),
2242             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2243      break;
2244    }
2245
2246    case Primitive::kPrimFloat:
2247      __ vadds(out.AsFpuRegister<SRegister>(),
2248               first.AsFpuRegister<SRegister>(),
2249               second.AsFpuRegister<SRegister>());
2250      break;
2251
2252    case Primitive::kPrimDouble:
2253      __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2254               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2255               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2256      break;
2257
2258    default:
2259      LOG(FATAL) << "Unexpected add type " << add->GetResultType();
2260  }
2261}
2262
2263void LocationsBuilderARM::VisitSub(HSub* sub) {
2264  LocationSummary* locations =
2265      new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
2266  switch (sub->GetResultType()) {
2267    case Primitive::kPrimInt: {
2268      locations->SetInAt(0, Location::RequiresRegister());
2269      locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
2270      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2271      break;
2272    }
2273
2274    case Primitive::kPrimLong: {
2275      locations->SetInAt(0, Location::RequiresRegister());
2276      locations->SetInAt(1, Location::RequiresRegister());
2277      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2278      break;
2279    }
2280    case Primitive::kPrimFloat:
2281    case Primitive::kPrimDouble: {
2282      locations->SetInAt(0, Location::RequiresFpuRegister());
2283      locations->SetInAt(1, Location::RequiresFpuRegister());
2284      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2285      break;
2286    }
2287    default:
2288      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2289  }
2290}
2291
2292void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
2293  LocationSummary* locations = sub->GetLocations();
2294  Location out = locations->Out();
2295  Location first = locations->InAt(0);
2296  Location second = locations->InAt(1);
2297  switch (sub->GetResultType()) {
2298    case Primitive::kPrimInt: {
2299      if (second.IsRegister()) {
2300        __ sub(out.AsRegister<Register>(),
2301               first.AsRegister<Register>(),
2302               ShifterOperand(second.AsRegister<Register>()));
2303      } else {
2304        __ AddConstant(out.AsRegister<Register>(),
2305                       first.AsRegister<Register>(),
2306                       -second.GetConstant()->AsIntConstant()->GetValue());
2307      }
2308      break;
2309    }
2310
2311    case Primitive::kPrimLong: {
2312      DCHECK(second.IsRegisterPair());
2313      __ subs(out.AsRegisterPairLow<Register>(),
2314              first.AsRegisterPairLow<Register>(),
2315              ShifterOperand(second.AsRegisterPairLow<Register>()));
2316      __ sbc(out.AsRegisterPairHigh<Register>(),
2317             first.AsRegisterPairHigh<Register>(),
2318             ShifterOperand(second.AsRegisterPairHigh<Register>()));
2319      break;
2320    }
2321
2322    case Primitive::kPrimFloat: {
2323      __ vsubs(out.AsFpuRegister<SRegister>(),
2324               first.AsFpuRegister<SRegister>(),
2325               second.AsFpuRegister<SRegister>());
2326      break;
2327    }
2328
2329    case Primitive::kPrimDouble: {
2330      __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2331               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2332               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2333      break;
2334    }
2335
2336
2337    default:
2338      LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
2339  }
2340}
2341
2342void LocationsBuilderARM::VisitMul(HMul* mul) {
2343  LocationSummary* locations =
2344      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2345  switch (mul->GetResultType()) {
2346    case Primitive::kPrimInt:
2347    case Primitive::kPrimLong:  {
2348      locations->SetInAt(0, Location::RequiresRegister());
2349      locations->SetInAt(1, Location::RequiresRegister());
2350      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2351      break;
2352    }
2353
2354    case Primitive::kPrimFloat:
2355    case Primitive::kPrimDouble: {
2356      locations->SetInAt(0, Location::RequiresFpuRegister());
2357      locations->SetInAt(1, Location::RequiresFpuRegister());
2358      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2359      break;
2360    }
2361
2362    default:
2363      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2364  }
2365}
2366
2367void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
2368  LocationSummary* locations = mul->GetLocations();
2369  Location out = locations->Out();
2370  Location first = locations->InAt(0);
2371  Location second = locations->InAt(1);
2372  switch (mul->GetResultType()) {
2373    case Primitive::kPrimInt: {
2374      __ mul(out.AsRegister<Register>(),
2375             first.AsRegister<Register>(),
2376             second.AsRegister<Register>());
2377      break;
2378    }
2379    case Primitive::kPrimLong: {
2380      Register out_hi = out.AsRegisterPairHigh<Register>();
2381      Register out_lo = out.AsRegisterPairLow<Register>();
2382      Register in1_hi = first.AsRegisterPairHigh<Register>();
2383      Register in1_lo = first.AsRegisterPairLow<Register>();
2384      Register in2_hi = second.AsRegisterPairHigh<Register>();
2385      Register in2_lo = second.AsRegisterPairLow<Register>();
2386
2387      // Extra checks to protect caused by the existence of R1_R2.
2388      // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
2389      // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
2390      DCHECK_NE(out_hi, in1_lo);
2391      DCHECK_NE(out_hi, in2_lo);
2392
2393      // input: in1 - 64 bits, in2 - 64 bits
2394      // output: out
2395      // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
2396      // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
2397      // parts: out.lo = (in1.lo * in2.lo)[31:0]
2398
2399      // IP <- in1.lo * in2.hi
2400      __ mul(IP, in1_lo, in2_hi);
2401      // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
2402      __ mla(out_hi, in1_hi, in2_lo, IP);
2403      // out.lo <- (in1.lo * in2.lo)[31:0];
2404      __ umull(out_lo, IP, in1_lo, in2_lo);
2405      // out.hi <- in2.hi * in1.lo +  in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
2406      __ add(out_hi, out_hi, ShifterOperand(IP));
2407      break;
2408    }
2409
2410    case Primitive::kPrimFloat: {
2411      __ vmuls(out.AsFpuRegister<SRegister>(),
2412               first.AsFpuRegister<SRegister>(),
2413               second.AsFpuRegister<SRegister>());
2414      break;
2415    }
2416
2417    case Primitive::kPrimDouble: {
2418      __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2419               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2420               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2421      break;
2422    }
2423
2424    default:
2425      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2426  }
2427}
2428
2429void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2430  DCHECK(instruction->IsDiv() || instruction->IsRem());
2431  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2432
2433  LocationSummary* locations = instruction->GetLocations();
2434  Location second = locations->InAt(1);
2435  DCHECK(second.IsConstant());
2436
2437  Register out = locations->Out().AsRegister<Register>();
2438  Register dividend = locations->InAt(0).AsRegister<Register>();
2439  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2440  DCHECK(imm == 1 || imm == -1);
2441
2442  if (instruction->IsRem()) {
2443    __ LoadImmediate(out, 0);
2444  } else {
2445    if (imm == 1) {
2446      __ Mov(out, dividend);
2447    } else {
2448      __ rsb(out, dividend, ShifterOperand(0));
2449    }
2450  }
2451}
2452
2453void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2454  DCHECK(instruction->IsDiv() || instruction->IsRem());
2455  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2456
2457  LocationSummary* locations = instruction->GetLocations();
2458  Location second = locations->InAt(1);
2459  DCHECK(second.IsConstant());
2460
2461  Register out = locations->Out().AsRegister<Register>();
2462  Register dividend = locations->InAt(0).AsRegister<Register>();
2463  Register temp = locations->GetTemp(0).AsRegister<Register>();
2464  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2465  uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
2466  DCHECK(IsPowerOfTwo(abs_imm));
2467  int ctz_imm = CTZ(abs_imm);
2468
2469  if (ctz_imm == 1) {
2470    __ Lsr(temp, dividend, 32 - ctz_imm);
2471  } else {
2472    __ Asr(temp, dividend, 31);
2473    __ Lsr(temp, temp, 32 - ctz_imm);
2474  }
2475  __ add(out, temp, ShifterOperand(dividend));
2476
2477  if (instruction->IsDiv()) {
2478    __ Asr(out, out, ctz_imm);
2479    if (imm < 0) {
2480      __ rsb(out, out, ShifterOperand(0));
2481    }
2482  } else {
2483    __ ubfx(out, out, 0, ctz_imm);
2484    __ sub(out, out, ShifterOperand(temp));
2485  }
2486}
2487
2488void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2489  DCHECK(instruction->IsDiv() || instruction->IsRem());
2490  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2491
2492  LocationSummary* locations = instruction->GetLocations();
2493  Location second = locations->InAt(1);
2494  DCHECK(second.IsConstant());
2495
2496  Register out = locations->Out().AsRegister<Register>();
2497  Register dividend = locations->InAt(0).AsRegister<Register>();
2498  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
2499  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
2500  int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2501
2502  int64_t magic;
2503  int shift;
2504  CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
2505
2506  __ LoadImmediate(temp1, magic);
2507  __ smull(temp2, temp1, dividend, temp1);
2508
2509  if (imm > 0 && magic < 0) {
2510    __ add(temp1, temp1, ShifterOperand(dividend));
2511  } else if (imm < 0 && magic > 0) {
2512    __ sub(temp1, temp1, ShifterOperand(dividend));
2513  }
2514
2515  if (shift != 0) {
2516    __ Asr(temp1, temp1, shift);
2517  }
2518
2519  if (instruction->IsDiv()) {
2520    __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
2521  } else {
2522    __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
2523    // TODO: Strength reduction for mls.
2524    __ LoadImmediate(temp2, imm);
2525    __ mls(out, temp1, temp2, dividend);
2526  }
2527}
2528
2529void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
2530  DCHECK(instruction->IsDiv() || instruction->IsRem());
2531  DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
2532
2533  LocationSummary* locations = instruction->GetLocations();
2534  Location second = locations->InAt(1);
2535  DCHECK(second.IsConstant());
2536
2537  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
2538  if (imm == 0) {
2539    // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2540  } else if (imm == 1 || imm == -1) {
2541    DivRemOneOrMinusOne(instruction);
2542  } else if (IsPowerOfTwo(std::abs(imm))) {
2543    DivRemByPowerOfTwo(instruction);
2544  } else {
2545    DCHECK(imm <= -2 || imm >= 2);
2546    GenerateDivRemWithAnyConstant(instruction);
2547  }
2548}
2549
2550void LocationsBuilderARM::VisitDiv(HDiv* div) {
2551  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
2552  if (div->GetResultType() == Primitive::kPrimLong) {
2553    // pLdiv runtime call.
2554    call_kind = LocationSummary::kCall;
2555  } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
2556    // sdiv will be replaced by other instruction sequence.
2557  } else if (div->GetResultType() == Primitive::kPrimInt &&
2558             !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2559    // pIdivmod runtime call.
2560    call_kind = LocationSummary::kCall;
2561  }
2562
2563  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
2564
2565  switch (div->GetResultType()) {
2566    case Primitive::kPrimInt: {
2567      if (div->InputAt(1)->IsConstant()) {
2568        locations->SetInAt(0, Location::RequiresRegister());
2569        locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2570        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2571        int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
2572        if (abs_imm <= 1) {
2573          // No temp register required.
2574        } else {
2575          locations->AddTemp(Location::RequiresRegister());
2576          if (!IsPowerOfTwo(abs_imm)) {
2577            locations->AddTemp(Location::RequiresRegister());
2578          }
2579        }
2580      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2581        locations->SetInAt(0, Location::RequiresRegister());
2582        locations->SetInAt(1, Location::RequiresRegister());
2583        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2584      } else {
2585        InvokeRuntimeCallingConvention calling_convention;
2586        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2587        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2588        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2589        //       we only need the former.
2590        locations->SetOut(Location::RegisterLocation(R0));
2591      }
2592      break;
2593    }
2594    case Primitive::kPrimLong: {
2595      InvokeRuntimeCallingConvention calling_convention;
2596      locations->SetInAt(0, Location::RegisterPairLocation(
2597          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2598      locations->SetInAt(1, Location::RegisterPairLocation(
2599          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2600      locations->SetOut(Location::RegisterPairLocation(R0, R1));
2601      break;
2602    }
2603    case Primitive::kPrimFloat:
2604    case Primitive::kPrimDouble: {
2605      locations->SetInAt(0, Location::RequiresFpuRegister());
2606      locations->SetInAt(1, Location::RequiresFpuRegister());
2607      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2608      break;
2609    }
2610
2611    default:
2612      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2613  }
2614}
2615
2616void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
2617  LocationSummary* locations = div->GetLocations();
2618  Location out = locations->Out();
2619  Location first = locations->InAt(0);
2620  Location second = locations->InAt(1);
2621
2622  switch (div->GetResultType()) {
2623    case Primitive::kPrimInt: {
2624      if (second.IsConstant()) {
2625        GenerateDivRemConstantIntegral(div);
2626      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2627        __ sdiv(out.AsRegister<Register>(),
2628                first.AsRegister<Register>(),
2629                second.AsRegister<Register>());
2630      } else {
2631        InvokeRuntimeCallingConvention calling_convention;
2632        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2633        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2634        DCHECK_EQ(R0, out.AsRegister<Register>());
2635
2636        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
2637      }
2638      break;
2639    }
2640
2641    case Primitive::kPrimLong: {
2642      InvokeRuntimeCallingConvention calling_convention;
2643      DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
2644      DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
2645      DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
2646      DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
2647      DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
2648      DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
2649
2650      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
2651      break;
2652    }
2653
2654    case Primitive::kPrimFloat: {
2655      __ vdivs(out.AsFpuRegister<SRegister>(),
2656               first.AsFpuRegister<SRegister>(),
2657               second.AsFpuRegister<SRegister>());
2658      break;
2659    }
2660
2661    case Primitive::kPrimDouble: {
2662      __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
2663               FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
2664               FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
2665      break;
2666    }
2667
2668    default:
2669      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2670  }
2671}
2672
2673void LocationsBuilderARM::VisitRem(HRem* rem) {
2674  Primitive::Type type = rem->GetResultType();
2675
2676  // Most remainders are implemented in the runtime.
2677  LocationSummary::CallKind call_kind = LocationSummary::kCall;
2678  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
2679    // sdiv will be replaced by other instruction sequence.
2680    call_kind = LocationSummary::kNoCall;
2681  } else if ((rem->GetResultType() == Primitive::kPrimInt)
2682             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2683    // Have hardware divide instruction for int, do it with three instructions.
2684    call_kind = LocationSummary::kNoCall;
2685  }
2686
2687  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2688
2689  switch (type) {
2690    case Primitive::kPrimInt: {
2691      if (rem->InputAt(1)->IsConstant()) {
2692        locations->SetInAt(0, Location::RequiresRegister());
2693        locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
2694        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2695        int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
2696        if (abs_imm <= 1) {
2697          // No temp register required.
2698        } else {
2699          locations->AddTemp(Location::RequiresRegister());
2700          if (!IsPowerOfTwo(abs_imm)) {
2701            locations->AddTemp(Location::RequiresRegister());
2702          }
2703        }
2704      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2705        locations->SetInAt(0, Location::RequiresRegister());
2706        locations->SetInAt(1, Location::RequiresRegister());
2707        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2708        locations->AddTemp(Location::RequiresRegister());
2709      } else {
2710        InvokeRuntimeCallingConvention calling_convention;
2711        locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2712        locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2713        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
2714        //       we only need the latter.
2715        locations->SetOut(Location::RegisterLocation(R1));
2716      }
2717      break;
2718    }
2719    case Primitive::kPrimLong: {
2720      InvokeRuntimeCallingConvention calling_convention;
2721      locations->SetInAt(0, Location::RegisterPairLocation(
2722          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
2723      locations->SetInAt(1, Location::RegisterPairLocation(
2724          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
2725      // The runtime helper puts the output in R2,R3.
2726      locations->SetOut(Location::RegisterPairLocation(R2, R3));
2727      break;
2728    }
2729    case Primitive::kPrimFloat: {
2730      InvokeRuntimeCallingConvention calling_convention;
2731      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2732      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2733      locations->SetOut(Location::FpuRegisterLocation(S0));
2734      break;
2735    }
2736
2737    case Primitive::kPrimDouble: {
2738      InvokeRuntimeCallingConvention calling_convention;
2739      locations->SetInAt(0, Location::FpuRegisterPairLocation(
2740          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
2741      locations->SetInAt(1, Location::FpuRegisterPairLocation(
2742          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
2743      locations->SetOut(Location::Location::FpuRegisterPairLocation(S0, S1));
2744      break;
2745    }
2746
2747    default:
2748      LOG(FATAL) << "Unexpected rem type " << type;
2749  }
2750}
2751
2752void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
2753  LocationSummary* locations = rem->GetLocations();
2754  Location out = locations->Out();
2755  Location first = locations->InAt(0);
2756  Location second = locations->InAt(1);
2757
2758  Primitive::Type type = rem->GetResultType();
2759  switch (type) {
2760    case Primitive::kPrimInt: {
2761        if (second.IsConstant()) {
2762          GenerateDivRemConstantIntegral(rem);
2763        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
2764        Register reg1 = first.AsRegister<Register>();
2765        Register reg2 = second.AsRegister<Register>();
2766        Register temp = locations->GetTemp(0).AsRegister<Register>();
2767
2768        // temp = reg1 / reg2  (integer division)
2769        // dest = reg1 - temp * reg2
2770        __ sdiv(temp, reg1, reg2);
2771        __ mls(out.AsRegister<Register>(), temp, reg2, reg1);
2772      } else {
2773        InvokeRuntimeCallingConvention calling_convention;
2774        DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
2775        DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
2776        DCHECK_EQ(R1, out.AsRegister<Register>());
2777
2778        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
2779      }
2780      break;
2781    }
2782
2783    case Primitive::kPrimLong: {
2784      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
2785      break;
2786    }
2787
2788    case Primitive::kPrimFloat: {
2789      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
2790      break;
2791    }
2792
2793    case Primitive::kPrimDouble: {
2794      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
2795      break;
2796    }
2797
2798    default:
2799      LOG(FATAL) << "Unexpected rem type " << type;
2800  }
2801}
2802
2803void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2804  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2805      ? LocationSummary::kCallOnSlowPath
2806      : LocationSummary::kNoCall;
2807  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2808  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2809  if (instruction->HasUses()) {
2810    locations->SetOut(Location::SameAsFirstInput());
2811  }
2812}
2813
2814void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2815  SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
2816  codegen_->AddSlowPath(slow_path);
2817
2818  LocationSummary* locations = instruction->GetLocations();
2819  Location value = locations->InAt(0);
2820
2821  switch (instruction->GetType()) {
2822    case Primitive::kPrimByte:
2823    case Primitive::kPrimChar:
2824    case Primitive::kPrimShort:
2825    case Primitive::kPrimInt: {
2826      if (value.IsRegister()) {
2827        __ CompareAndBranchIfZero(value.AsRegister<Register>(), slow_path->GetEntryLabel());
2828      } else {
2829        DCHECK(value.IsConstant()) << value;
2830        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
2831          __ b(slow_path->GetEntryLabel());
2832        }
2833      }
2834      break;
2835    }
2836    case Primitive::kPrimLong: {
2837      if (value.IsRegisterPair()) {
2838        __ orrs(IP,
2839                value.AsRegisterPairLow<Register>(),
2840                ShifterOperand(value.AsRegisterPairHigh<Register>()));
2841        __ b(slow_path->GetEntryLabel(), EQ);
2842      } else {
2843        DCHECK(value.IsConstant()) << value;
2844        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
2845          __ b(slow_path->GetEntryLabel());
2846        }
2847      }
2848      break;
2849    default:
2850      LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
2851    }
2852  }
2853}
2854
2855void LocationsBuilderARM::HandleShift(HBinaryOperation* op) {
2856  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2857
2858  LocationSummary* locations =
2859      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
2860
2861  switch (op->GetResultType()) {
2862    case Primitive::kPrimInt: {
2863      locations->SetInAt(0, Location::RequiresRegister());
2864      locations->SetInAt(1, Location::RegisterOrConstant(op->InputAt(1)));
2865      // Make the output overlap, as it will be used to hold the masked
2866      // second input.
2867      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2868      break;
2869    }
2870    case Primitive::kPrimLong: {
2871      locations->SetInAt(0, Location::RequiresRegister());
2872      locations->SetInAt(1, Location::RequiresRegister());
2873      locations->AddTemp(Location::RequiresRegister());
2874      locations->SetOut(Location::RequiresRegister());
2875      break;
2876    }
2877    default:
2878      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
2879  }
2880}
2881
2882void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
2883  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
2884
2885  LocationSummary* locations = op->GetLocations();
2886  Location out = locations->Out();
2887  Location first = locations->InAt(0);
2888  Location second = locations->InAt(1);
2889
2890  Primitive::Type type = op->GetResultType();
2891  switch (type) {
2892    case Primitive::kPrimInt: {
2893      Register out_reg = out.AsRegister<Register>();
2894      Register first_reg = first.AsRegister<Register>();
2895      // Arm doesn't mask the shift count so we need to do it ourselves.
2896      if (second.IsRegister()) {
2897        Register second_reg = second.AsRegister<Register>();
2898        __ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue));
2899        if (op->IsShl()) {
2900          __ Lsl(out_reg, first_reg, out_reg);
2901        } else if (op->IsShr()) {
2902          __ Asr(out_reg, first_reg, out_reg);
2903        } else {
2904          __ Lsr(out_reg, first_reg, out_reg);
2905        }
2906      } else {
2907        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
2908        uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue);
2909        if (shift_value == 0) {  // arm does not support shifting with 0 immediate.
2910          __ Mov(out_reg, first_reg);
2911        } else if (op->IsShl()) {
2912          __ Lsl(out_reg, first_reg, shift_value);
2913        } else if (op->IsShr()) {
2914          __ Asr(out_reg, first_reg, shift_value);
2915        } else {
2916          __ Lsr(out_reg, first_reg, shift_value);
2917        }
2918      }
2919      break;
2920    }
2921    case Primitive::kPrimLong: {
2922      Register o_h = out.AsRegisterPairHigh<Register>();
2923      Register o_l = out.AsRegisterPairLow<Register>();
2924
2925      Register temp = locations->GetTemp(0).AsRegister<Register>();
2926
2927      Register high = first.AsRegisterPairHigh<Register>();
2928      Register low = first.AsRegisterPairLow<Register>();
2929
2930      Register second_reg = second.AsRegister<Register>();
2931
2932      if (op->IsShl()) {
2933        __ and_(o_l, second_reg, ShifterOperand(kMaxLongShiftValue));
2934        // Shift the high part
2935        __ Lsl(o_h, high, o_l);
2936        // Shift the low part and `or` what overflew on the high part
2937        __ rsb(temp, o_l, ShifterOperand(kArmBitsPerWord));
2938        __ Lsr(temp, low, temp);
2939        __ orr(o_h, o_h, ShifterOperand(temp));
2940        // If the shift is > 32 bits, override the high part
2941        __ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
2942        __ it(PL);
2943        __ Lsl(o_h, low, temp, PL);
2944        // Shift the low part
2945        __ Lsl(o_l, low, o_l);
2946      } else if (op->IsShr()) {
2947        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2948        // Shift the low part
2949        __ Lsr(o_l, low, o_h);
2950        // Shift the high part and `or` what underflew on the low part
2951        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2952        __ Lsl(temp, high, temp);
2953        __ orr(o_l, o_l, ShifterOperand(temp));
2954        // If the shift is > 32 bits, override the low part
2955        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
2956        __ it(PL);
2957        __ Asr(o_l, high, temp, PL);
2958        // Shift the high part
2959        __ Asr(o_h, high, o_h);
2960      } else {
2961        __ and_(o_h, second_reg, ShifterOperand(kMaxLongShiftValue));
2962        // same as Shr except we use `Lsr`s and not `Asr`s
2963        __ Lsr(o_l, low, o_h);
2964        __ rsb(temp, o_h, ShifterOperand(kArmBitsPerWord));
2965        __ Lsl(temp, high, temp);
2966        __ orr(o_l, o_l, ShifterOperand(temp));
2967        __ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
2968        __ it(PL);
2969        __ Lsr(o_l, high, temp, PL);
2970        __ Lsr(o_h, high, o_h);
2971      }
2972      break;
2973    }
2974    default:
2975      LOG(FATAL) << "Unexpected operation type " << type;
2976  }
2977}
2978
2979void LocationsBuilderARM::VisitShl(HShl* shl) {
2980  HandleShift(shl);
2981}
2982
2983void InstructionCodeGeneratorARM::VisitShl(HShl* shl) {
2984  HandleShift(shl);
2985}
2986
2987void LocationsBuilderARM::VisitShr(HShr* shr) {
2988  HandleShift(shr);
2989}
2990
2991void InstructionCodeGeneratorARM::VisitShr(HShr* shr) {
2992  HandleShift(shr);
2993}
2994
2995void LocationsBuilderARM::VisitUShr(HUShr* ushr) {
2996  HandleShift(ushr);
2997}
2998
2999void InstructionCodeGeneratorARM::VisitUShr(HUShr* ushr) {
3000  HandleShift(ushr);
3001}
3002
3003void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
3004  LocationSummary* locations =
3005      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3006  InvokeRuntimeCallingConvention calling_convention;
3007  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3008  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3009  locations->SetOut(Location::RegisterLocation(R0));
3010}
3011
3012void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
3013  InvokeRuntimeCallingConvention calling_convention;
3014  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
3015  // Note: if heap poisoning is enabled, the entry point takes cares
3016  // of poisoning the reference.
3017  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3018                          instruction,
3019                          instruction->GetDexPc(),
3020                          nullptr);
3021}
3022
3023void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
3024  LocationSummary* locations =
3025      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3026  InvokeRuntimeCallingConvention calling_convention;
3027  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3028  locations->SetOut(Location::RegisterLocation(R0));
3029  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3030  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3031}
3032
3033void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
3034  InvokeRuntimeCallingConvention calling_convention;
3035  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
3036  // Note: if heap poisoning is enabled, the entry point takes cares
3037  // of poisoning the reference.
3038  codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3039                          instruction,
3040                          instruction->GetDexPc(),
3041                          nullptr);
3042}
3043
3044void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
3045  LocationSummary* locations =
3046      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3047  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
3048  if (location.IsStackSlot()) {
3049    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3050  } else if (location.IsDoubleStackSlot()) {
3051    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3052  }
3053  locations->SetOut(location);
3054}
3055
3056void InstructionCodeGeneratorARM::VisitParameterValue(
3057    HParameterValue* instruction ATTRIBUTE_UNUSED) {
3058  // Nothing to do, the parameter is already at its location.
3059}
3060
3061void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
3062  LocationSummary* locations =
3063      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3064  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
3065}
3066
3067void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
3068  // Nothing to do, the method is already at its location.
3069}
3070
3071void LocationsBuilderARM::VisitNot(HNot* not_) {
3072  LocationSummary* locations =
3073      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
3074  locations->SetInAt(0, Location::RequiresRegister());
3075  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3076}
3077
3078void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
3079  LocationSummary* locations = not_->GetLocations();
3080  Location out = locations->Out();
3081  Location in = locations->InAt(0);
3082  switch (not_->GetResultType()) {
3083    case Primitive::kPrimInt:
3084      __ mvn(out.AsRegister<Register>(), ShifterOperand(in.AsRegister<Register>()));
3085      break;
3086
3087    case Primitive::kPrimLong:
3088      __ mvn(out.AsRegisterPairLow<Register>(),
3089             ShifterOperand(in.AsRegisterPairLow<Register>()));
3090      __ mvn(out.AsRegisterPairHigh<Register>(),
3091             ShifterOperand(in.AsRegisterPairHigh<Register>()));
3092      break;
3093
3094    default:
3095      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
3096  }
3097}
3098
3099void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
3100  LocationSummary* locations =
3101      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
3102  locations->SetInAt(0, Location::RequiresRegister());
3103  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3104}
3105
3106void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
3107  LocationSummary* locations = bool_not->GetLocations();
3108  Location out = locations->Out();
3109  Location in = locations->InAt(0);
3110  __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
3111}
3112
3113void LocationsBuilderARM::VisitCompare(HCompare* compare) {
3114  LocationSummary* locations =
3115      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
3116  switch (compare->InputAt(0)->GetType()) {
3117    case Primitive::kPrimLong: {
3118      locations->SetInAt(0, Location::RequiresRegister());
3119      locations->SetInAt(1, Location::RequiresRegister());
3120      // Output overlaps because it is written before doing the low comparison.
3121      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3122      break;
3123    }
3124    case Primitive::kPrimFloat:
3125    case Primitive::kPrimDouble: {
3126      locations->SetInAt(0, Location::RequiresFpuRegister());
3127      locations->SetInAt(1, Location::RequiresFpuRegister());
3128      locations->SetOut(Location::RequiresRegister());
3129      break;
3130    }
3131    default:
3132      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
3133  }
3134}
3135
3136void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
3137  LocationSummary* locations = compare->GetLocations();
3138  Register out = locations->Out().AsRegister<Register>();
3139  Location left = locations->InAt(0);
3140  Location right = locations->InAt(1);
3141
3142  Label less, greater, done;
3143  Primitive::Type type = compare->InputAt(0)->GetType();
3144  switch (type) {
3145    case Primitive::kPrimLong: {
3146      __ cmp(left.AsRegisterPairHigh<Register>(),
3147             ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
3148      __ b(&less, LT);
3149      __ b(&greater, GT);
3150      // Do LoadImmediate before the last `cmp`, as LoadImmediate might affect the status flags.
3151      __ LoadImmediate(out, 0);
3152      __ cmp(left.AsRegisterPairLow<Register>(),
3153             ShifterOperand(right.AsRegisterPairLow<Register>()));  // Unsigned compare.
3154      break;
3155    }
3156    case Primitive::kPrimFloat:
3157    case Primitive::kPrimDouble: {
3158      __ LoadImmediate(out, 0);
3159      if (type == Primitive::kPrimFloat) {
3160        __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
3161      } else {
3162        __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
3163                 FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
3164      }
3165      __ vmstat();  // transfer FP status register to ARM APSR.
3166      __ b(compare->IsGtBias() ? &greater : &less, VS);  // VS for unordered.
3167      break;
3168    }
3169    default:
3170      LOG(FATAL) << "Unexpected compare type " << type;
3171  }
3172  __ b(&done, EQ);
3173  __ b(&less, LO);  // LO is for both: unsigned compare for longs and 'less than' for floats.
3174
3175  __ Bind(&greater);
3176  __ LoadImmediate(out, 1);
3177  __ b(&done);
3178
3179  __ Bind(&less);
3180  __ LoadImmediate(out, -1);
3181
3182  __ Bind(&done);
3183}
3184
3185void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
3186  LocationSummary* locations =
3187      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3188  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
3189    locations->SetInAt(i, Location::Any());
3190  }
3191  locations->SetOut(Location::Any());
3192}
3193
3194void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
3195  UNUSED(instruction);
3196  LOG(FATAL) << "Unreachable";
3197}
3198
3199void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) {
3200  // TODO (ported from quick): revisit Arm barrier kinds
3201  DmbOptions flavor = DmbOptions::ISH;  // quiet c++ warnings
3202  switch (kind) {
3203    case MemBarrierKind::kAnyStore:
3204    case MemBarrierKind::kLoadAny:
3205    case MemBarrierKind::kAnyAny: {
3206      flavor = DmbOptions::ISH;
3207      break;
3208    }
3209    case MemBarrierKind::kStoreStore: {
3210      flavor = DmbOptions::ISHST;
3211      break;
3212    }
3213    default:
3214      LOG(FATAL) << "Unexpected memory barrier " << kind;
3215  }
3216  __ dmb(flavor);
3217}
3218
3219void InstructionCodeGeneratorARM::GenerateWideAtomicLoad(Register addr,
3220                                                         uint32_t offset,
3221                                                         Register out_lo,
3222                                                         Register out_hi) {
3223  if (offset != 0) {
3224    __ LoadImmediate(out_lo, offset);
3225    __ add(IP, addr, ShifterOperand(out_lo));
3226    addr = IP;
3227  }
3228  __ ldrexd(out_lo, out_hi, addr);
3229}
3230
3231void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
3232                                                          uint32_t offset,
3233                                                          Register value_lo,
3234                                                          Register value_hi,
3235                                                          Register temp1,
3236                                                          Register temp2,
3237                                                          HInstruction* instruction) {
3238  Label fail;
3239  if (offset != 0) {
3240    __ LoadImmediate(temp1, offset);
3241    __ add(IP, addr, ShifterOperand(temp1));
3242    addr = IP;
3243  }
3244  __ Bind(&fail);
3245  // We need a load followed by store. (The address used in a STREX instruction must
3246  // be the same as the address in the most recently executed LDREX instruction.)
3247  __ ldrexd(temp1, temp2, addr);
3248  codegen_->MaybeRecordImplicitNullCheck(instruction);
3249  __ strexd(temp1, value_lo, value_hi, addr);
3250  __ CompareAndBranchIfNonZero(temp1, &fail);
3251}
3252
3253void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
3254  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3255
3256  LocationSummary* locations =
3257      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3258  locations->SetInAt(0, Location::RequiresRegister());
3259
3260  Primitive::Type field_type = field_info.GetFieldType();
3261  if (Primitive::IsFloatingPointType(field_type)) {
3262    locations->SetInAt(1, Location::RequiresFpuRegister());
3263  } else {
3264    locations->SetInAt(1, Location::RequiresRegister());
3265  }
3266
3267  bool is_wide = field_type == Primitive::kPrimLong || field_type == Primitive::kPrimDouble;
3268  bool generate_volatile = field_info.IsVolatile()
3269      && is_wide
3270      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3271  bool needs_write_barrier =
3272      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
3273  // Temporary registers for the write barrier.
3274  // TODO: consider renaming StoreNeedsWriteBarrier to StoreNeedsGCMark.
3275  if (needs_write_barrier) {
3276    locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
3277    locations->AddTemp(Location::RequiresRegister());
3278  } else if (generate_volatile) {
3279    // Arm encoding have some additional constraints for ldrexd/strexd:
3280    // - registers need to be consecutive
3281    // - the first register should be even but not R14.
3282    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3283    // enable Arm encoding.
3284    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3285
3286    locations->AddTemp(Location::RequiresRegister());
3287    locations->AddTemp(Location::RequiresRegister());
3288    if (field_type == Primitive::kPrimDouble) {
3289      // For doubles we need two more registers to copy the value.
3290      locations->AddTemp(Location::RegisterLocation(R2));
3291      locations->AddTemp(Location::RegisterLocation(R3));
3292    }
3293  }
3294}
3295
3296void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
3297                                                 const FieldInfo& field_info,
3298                                                 bool value_can_be_null) {
3299  DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
3300
3301  LocationSummary* locations = instruction->GetLocations();
3302  Register base = locations->InAt(0).AsRegister<Register>();
3303  Location value = locations->InAt(1);
3304
3305  bool is_volatile = field_info.IsVolatile();
3306  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3307  Primitive::Type field_type = field_info.GetFieldType();
3308  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3309  bool needs_write_barrier =
3310      CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
3311
3312  if (is_volatile) {
3313    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
3314  }
3315
3316  switch (field_type) {
3317    case Primitive::kPrimBoolean:
3318    case Primitive::kPrimByte: {
3319      __ StoreToOffset(kStoreByte, value.AsRegister<Register>(), base, offset);
3320      break;
3321    }
3322
3323    case Primitive::kPrimShort:
3324    case Primitive::kPrimChar: {
3325      __ StoreToOffset(kStoreHalfword, value.AsRegister<Register>(), base, offset);
3326      break;
3327    }
3328
3329    case Primitive::kPrimInt:
3330    case Primitive::kPrimNot: {
3331      if (kPoisonHeapReferences && needs_write_barrier) {
3332        // Note that in the case where `value` is a null reference,
3333        // we do not enter this block, as a null reference does not
3334        // need poisoning.
3335        DCHECK_EQ(field_type, Primitive::kPrimNot);
3336        Register temp = locations->GetTemp(0).AsRegister<Register>();
3337        __ Mov(temp, value.AsRegister<Register>());
3338        __ PoisonHeapReference(temp);
3339        __ StoreToOffset(kStoreWord, temp, base, offset);
3340      } else {
3341        __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
3342      }
3343      break;
3344    }
3345
3346    case Primitive::kPrimLong: {
3347      if (is_volatile && !atomic_ldrd_strd) {
3348        GenerateWideAtomicStore(base, offset,
3349                                value.AsRegisterPairLow<Register>(),
3350                                value.AsRegisterPairHigh<Register>(),
3351                                locations->GetTemp(0).AsRegister<Register>(),
3352                                locations->GetTemp(1).AsRegister<Register>(),
3353                                instruction);
3354      } else {
3355        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
3356        codegen_->MaybeRecordImplicitNullCheck(instruction);
3357      }
3358      break;
3359    }
3360
3361    case Primitive::kPrimFloat: {
3362      __ StoreSToOffset(value.AsFpuRegister<SRegister>(), base, offset);
3363      break;
3364    }
3365
3366    case Primitive::kPrimDouble: {
3367      DRegister value_reg = FromLowSToD(value.AsFpuRegisterPairLow<SRegister>());
3368      if (is_volatile && !atomic_ldrd_strd) {
3369        Register value_reg_lo = locations->GetTemp(0).AsRegister<Register>();
3370        Register value_reg_hi = locations->GetTemp(1).AsRegister<Register>();
3371
3372        __ vmovrrd(value_reg_lo, value_reg_hi, value_reg);
3373
3374        GenerateWideAtomicStore(base, offset,
3375                                value_reg_lo,
3376                                value_reg_hi,
3377                                locations->GetTemp(2).AsRegister<Register>(),
3378                                locations->GetTemp(3).AsRegister<Register>(),
3379                                instruction);
3380      } else {
3381        __ StoreDToOffset(value_reg, base, offset);
3382        codegen_->MaybeRecordImplicitNullCheck(instruction);
3383      }
3384      break;
3385    }
3386
3387    case Primitive::kPrimVoid:
3388      LOG(FATAL) << "Unreachable type " << field_type;
3389      UNREACHABLE();
3390  }
3391
3392  // Longs and doubles are handled in the switch.
3393  if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
3394    codegen_->MaybeRecordImplicitNullCheck(instruction);
3395  }
3396
3397  if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
3398    Register temp = locations->GetTemp(0).AsRegister<Register>();
3399    Register card = locations->GetTemp(1).AsRegister<Register>();
3400    codegen_->MarkGCCard(
3401        temp, card, base, value.AsRegister<Register>(), value_can_be_null);
3402  }
3403
3404  if (is_volatile) {
3405    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
3406  }
3407}
3408
3409void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
3410  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3411  LocationSummary* locations =
3412      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3413  locations->SetInAt(0, Location::RequiresRegister());
3414
3415  bool volatile_for_double = field_info.IsVolatile()
3416      && (field_info.GetFieldType() == Primitive::kPrimDouble)
3417      && !codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3418  bool overlap = field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong);
3419
3420  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3421    locations->SetOut(Location::RequiresFpuRegister());
3422  } else {
3423    locations->SetOut(Location::RequiresRegister(),
3424                      (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap));
3425  }
3426  if (volatile_for_double) {
3427    // Arm encoding have some additional constraints for ldrexd/strexd:
3428    // - registers need to be consecutive
3429    // - the first register should be even but not R14.
3430    // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever
3431    // enable Arm encoding.
3432    DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet());
3433    locations->AddTemp(Location::RequiresRegister());
3434    locations->AddTemp(Location::RequiresRegister());
3435  }
3436}
3437
3438void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
3439                                                 const FieldInfo& field_info) {
3440  DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
3441
3442  LocationSummary* locations = instruction->GetLocations();
3443  Register base = locations->InAt(0).AsRegister<Register>();
3444  Location out = locations->Out();
3445  bool is_volatile = field_info.IsVolatile();
3446  bool atomic_ldrd_strd = codegen_->GetInstructionSetFeatures().HasAtomicLdrdAndStrd();
3447  Primitive::Type field_type = field_info.GetFieldType();
3448  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
3449
3450  switch (field_type) {
3451    case Primitive::kPrimBoolean: {
3452      __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset);
3453      break;
3454    }
3455
3456    case Primitive::kPrimByte: {
3457      __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset);
3458      break;
3459    }
3460
3461    case Primitive::kPrimShort: {
3462      __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset);
3463      break;
3464    }
3465
3466    case Primitive::kPrimChar: {
3467      __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset);
3468      break;
3469    }
3470
3471    case Primitive::kPrimInt:
3472    case Primitive::kPrimNot: {
3473      __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset);
3474      break;
3475    }
3476
3477    case Primitive::kPrimLong: {
3478      if (is_volatile && !atomic_ldrd_strd) {
3479        GenerateWideAtomicLoad(base, offset,
3480                               out.AsRegisterPairLow<Register>(),
3481                               out.AsRegisterPairHigh<Register>());
3482      } else {
3483        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset);
3484      }
3485      break;
3486    }
3487
3488    case Primitive::kPrimFloat: {
3489      __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset);
3490      break;
3491    }
3492
3493    case Primitive::kPrimDouble: {
3494      DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>());
3495      if (is_volatile && !atomic_ldrd_strd) {
3496        Register lo = locations->GetTemp(0).AsRegister<Register>();
3497        Register hi = locations->GetTemp(1).AsRegister<Register>();
3498        GenerateWideAtomicLoad(base, offset, lo, hi);
3499        codegen_->MaybeRecordImplicitNullCheck(instruction);
3500        __ vmovdrr(out_reg, lo, hi);
3501      } else {
3502        __ LoadDFromOffset(out_reg, base, offset);
3503        codegen_->MaybeRecordImplicitNullCheck(instruction);
3504      }
3505      break;
3506    }
3507
3508    case Primitive::kPrimVoid:
3509      LOG(FATAL) << "Unreachable type " << field_type;
3510      UNREACHABLE();
3511  }
3512
3513  // Doubles are handled in the switch.
3514  if (field_type != Primitive::kPrimDouble) {
3515    codegen_->MaybeRecordImplicitNullCheck(instruction);
3516  }
3517
3518  if (is_volatile) {
3519    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
3520  }
3521
3522  if (field_type == Primitive::kPrimNot) {
3523    __ MaybeUnpoisonHeapReference(out.AsRegister<Register>());
3524  }
3525}
3526
3527void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3528  HandleFieldSet(instruction, instruction->GetFieldInfo());
3529}
3530
3531void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3532  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3533}
3534
3535void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3536  HandleFieldGet(instruction, instruction->GetFieldInfo());
3537}
3538
3539void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3540  HandleFieldGet(instruction, instruction->GetFieldInfo());
3541}
3542
3543void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3544  HandleFieldGet(instruction, instruction->GetFieldInfo());
3545}
3546
3547void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3548  HandleFieldGet(instruction, instruction->GetFieldInfo());
3549}
3550
3551void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3552  HandleFieldSet(instruction, instruction->GetFieldInfo());
3553}
3554
3555void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3556  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3557}
3558
3559void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
3560  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
3561      ? LocationSummary::kCallOnSlowPath
3562      : LocationSummary::kNoCall;
3563  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3564  locations->SetInAt(0, Location::RequiresRegister());
3565  if (instruction->HasUses()) {
3566    locations->SetOut(Location::SameAsFirstInput());
3567  }
3568}
3569
3570void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
3571  if (codegen_->CanMoveNullCheckToUser(instruction)) {
3572    return;
3573  }
3574  Location obj = instruction->GetLocations()->InAt(0);
3575
3576  __ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
3577  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3578}
3579
3580void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
3581  SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
3582  codegen_->AddSlowPath(slow_path);
3583
3584  LocationSummary* locations = instruction->GetLocations();
3585  Location obj = locations->InAt(0);
3586
3587  __ CompareAndBranchIfZero(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
3588}
3589
3590void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
3591  if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
3592    GenerateImplicitNullCheck(instruction);
3593  } else {
3594    GenerateExplicitNullCheck(instruction);
3595  }
3596}
3597
3598void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
3599  LocationSummary* locations =
3600      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3601  locations->SetInAt(0, Location::RequiresRegister());
3602  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3603  if (Primitive::IsFloatingPointType(instruction->GetType())) {
3604    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3605  } else {
3606    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3607  }
3608}
3609
3610void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
3611  LocationSummary* locations = instruction->GetLocations();
3612  Register obj = locations->InAt(0).AsRegister<Register>();
3613  Location index = locations->InAt(1);
3614  Primitive::Type type = instruction->GetType();
3615
3616  switch (type) {
3617    case Primitive::kPrimBoolean: {
3618      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3619      Register out = locations->Out().AsRegister<Register>();
3620      if (index.IsConstant()) {
3621        size_t offset =
3622            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3623        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
3624      } else {
3625        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3626        __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
3627      }
3628      break;
3629    }
3630
3631    case Primitive::kPrimByte: {
3632      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
3633      Register out = locations->Out().AsRegister<Register>();
3634      if (index.IsConstant()) {
3635        size_t offset =
3636            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3637        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
3638      } else {
3639        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3640        __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
3641      }
3642      break;
3643    }
3644
3645    case Primitive::kPrimShort: {
3646      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
3647      Register out = locations->Out().AsRegister<Register>();
3648      if (index.IsConstant()) {
3649        size_t offset =
3650            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3651        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
3652      } else {
3653        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3654        __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
3655      }
3656      break;
3657    }
3658
3659    case Primitive::kPrimChar: {
3660      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3661      Register out = locations->Out().AsRegister<Register>();
3662      if (index.IsConstant()) {
3663        size_t offset =
3664            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3665        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
3666      } else {
3667        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3668        __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
3669      }
3670      break;
3671    }
3672
3673    case Primitive::kPrimInt:
3674    case Primitive::kPrimNot: {
3675      static_assert(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
3676                    "art::mirror::HeapReference<mirror::Object> and int32_t have different sizes.");
3677      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3678      Register out = locations->Out().AsRegister<Register>();
3679      if (index.IsConstant()) {
3680        size_t offset =
3681            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3682        __ LoadFromOffset(kLoadWord, out, obj, offset);
3683      } else {
3684        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3685        __ LoadFromOffset(kLoadWord, out, IP, data_offset);
3686      }
3687      break;
3688    }
3689
3690    case Primitive::kPrimLong: {
3691      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3692      Location out = locations->Out();
3693      if (index.IsConstant()) {
3694        size_t offset =
3695            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3696        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
3697      } else {
3698        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3699        __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
3700      }
3701      break;
3702    }
3703
3704    case Primitive::kPrimFloat: {
3705      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3706      Location out = locations->Out();
3707      DCHECK(out.IsFpuRegister());
3708      if (index.IsConstant()) {
3709        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3710        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset);
3711      } else {
3712        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3713        __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset);
3714      }
3715      break;
3716    }
3717
3718    case Primitive::kPrimDouble: {
3719      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3720      Location out = locations->Out();
3721      DCHECK(out.IsFpuRegisterPair());
3722      if (index.IsConstant()) {
3723        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3724        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3725      } else {
3726        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3727        __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3728      }
3729      break;
3730    }
3731
3732    case Primitive::kPrimVoid:
3733      LOG(FATAL) << "Unreachable type " << type;
3734      UNREACHABLE();
3735  }
3736  codegen_->MaybeRecordImplicitNullCheck(instruction);
3737
3738  if (type == Primitive::kPrimNot) {
3739    Register out = locations->Out().AsRegister<Register>();
3740    __ MaybeUnpoisonHeapReference(out);
3741  }
3742}
3743
3744void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
3745  Primitive::Type value_type = instruction->GetComponentType();
3746
3747  bool needs_write_barrier =
3748      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3749  bool needs_runtime_call = instruction->NeedsTypeCheck();
3750
3751  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
3752      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
3753  if (needs_runtime_call) {
3754    InvokeRuntimeCallingConvention calling_convention;
3755    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3756    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3757    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3758  } else {
3759    locations->SetInAt(0, Location::RequiresRegister());
3760    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
3761    if (Primitive::IsFloatingPointType(value_type)) {
3762      locations->SetInAt(2, Location::RequiresFpuRegister());
3763    } else {
3764      locations->SetInAt(2, Location::RequiresRegister());
3765    }
3766
3767    if (needs_write_barrier) {
3768      // Temporary registers for the write barrier.
3769      locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
3770      locations->AddTemp(Location::RequiresRegister());
3771    }
3772  }
3773}
3774
3775void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
3776  LocationSummary* locations = instruction->GetLocations();
3777  Register obj = locations->InAt(0).AsRegister<Register>();
3778  Location index = locations->InAt(1);
3779  Primitive::Type value_type = instruction->GetComponentType();
3780  bool needs_runtime_call = locations->WillCall();
3781  bool needs_write_barrier =
3782      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
3783
3784  switch (value_type) {
3785    case Primitive::kPrimBoolean:
3786    case Primitive::kPrimByte: {
3787      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
3788      Register value = locations->InAt(2).AsRegister<Register>();
3789      if (index.IsConstant()) {
3790        size_t offset =
3791            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
3792        __ StoreToOffset(kStoreByte, value, obj, offset);
3793      } else {
3794        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
3795        __ StoreToOffset(kStoreByte, value, IP, data_offset);
3796      }
3797      break;
3798    }
3799
3800    case Primitive::kPrimShort:
3801    case Primitive::kPrimChar: {
3802      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
3803      Register value = locations->InAt(2).AsRegister<Register>();
3804      if (index.IsConstant()) {
3805        size_t offset =
3806            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
3807        __ StoreToOffset(kStoreHalfword, value, obj, offset);
3808      } else {
3809        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
3810        __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
3811      }
3812      break;
3813    }
3814
3815    case Primitive::kPrimInt:
3816    case Primitive::kPrimNot: {
3817      if (!needs_runtime_call) {
3818        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
3819        Register value = locations->InAt(2).AsRegister<Register>();
3820        Register source = value;
3821        if (kPoisonHeapReferences && needs_write_barrier) {
3822          // Note that in the case where `value` is a null reference,
3823          // we do not enter this block, as a null reference does not
3824          // need poisoning.
3825          DCHECK_EQ(value_type, Primitive::kPrimNot);
3826          Register temp = locations->GetTemp(0).AsRegister<Register>();
3827          __ Mov(temp, value);
3828          __ PoisonHeapReference(temp);
3829          source = temp;
3830        }
3831        if (index.IsConstant()) {
3832          size_t offset =
3833              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3834          __ StoreToOffset(kStoreWord, source, obj, offset);
3835        } else {
3836          DCHECK(index.IsRegister()) << index;
3837          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3838          __ StoreToOffset(kStoreWord, source, IP, data_offset);
3839        }
3840        codegen_->MaybeRecordImplicitNullCheck(instruction);
3841        if (needs_write_barrier) {
3842          DCHECK_EQ(value_type, Primitive::kPrimNot);
3843          Register temp = locations->GetTemp(0).AsRegister<Register>();
3844          Register card = locations->GetTemp(1).AsRegister<Register>();
3845          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
3846        }
3847      } else {
3848        DCHECK_EQ(value_type, Primitive::kPrimNot);
3849        // Note: if heap poisoning is enabled, pAputObject takes cares
3850        // of poisoning the reference.
3851        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
3852                                instruction,
3853                                instruction->GetDexPc(),
3854                                nullptr);
3855      }
3856      break;
3857    }
3858
3859    case Primitive::kPrimLong: {
3860      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
3861      Location value = locations->InAt(2);
3862      if (index.IsConstant()) {
3863        size_t offset =
3864            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3865        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
3866      } else {
3867        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3868        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
3869      }
3870      break;
3871    }
3872
3873    case Primitive::kPrimFloat: {
3874      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
3875      Location value = locations->InAt(2);
3876      DCHECK(value.IsFpuRegister());
3877      if (index.IsConstant()) {
3878        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
3879        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
3880      } else {
3881        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
3882        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
3883      }
3884      break;
3885    }
3886
3887    case Primitive::kPrimDouble: {
3888      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
3889      Location value = locations->InAt(2);
3890      DCHECK(value.IsFpuRegisterPair());
3891      if (index.IsConstant()) {
3892        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
3893        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
3894      } else {
3895        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
3896        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
3897      }
3898
3899      break;
3900    }
3901
3902    case Primitive::kPrimVoid:
3903      LOG(FATAL) << "Unreachable type " << value_type;
3904      UNREACHABLE();
3905  }
3906
3907  // Ints and objects are handled in the switch.
3908  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
3909    codegen_->MaybeRecordImplicitNullCheck(instruction);
3910  }
3911}
3912
3913void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
3914  LocationSummary* locations =
3915      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3916  locations->SetInAt(0, Location::RequiresRegister());
3917  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3918}
3919
3920void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
3921  LocationSummary* locations = instruction->GetLocations();
3922  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
3923  Register obj = locations->InAt(0).AsRegister<Register>();
3924  Register out = locations->Out().AsRegister<Register>();
3925  __ LoadFromOffset(kLoadWord, out, obj, offset);
3926  codegen_->MaybeRecordImplicitNullCheck(instruction);
3927}
3928
3929void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3930  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
3931      ? LocationSummary::kCallOnSlowPath
3932      : LocationSummary::kNoCall;
3933  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3934  locations->SetInAt(0, Location::RequiresRegister());
3935  locations->SetInAt(1, Location::RequiresRegister());
3936  if (instruction->HasUses()) {
3937    locations->SetOut(Location::SameAsFirstInput());
3938  }
3939}
3940
3941void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
3942  LocationSummary* locations = instruction->GetLocations();
3943  SlowPathCode* slow_path =
3944      new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
3945  codegen_->AddSlowPath(slow_path);
3946
3947  Register index = locations->InAt(0).AsRegister<Register>();
3948  Register length = locations->InAt(1).AsRegister<Register>();
3949
3950  __ cmp(index, ShifterOperand(length));
3951  __ b(slow_path->GetEntryLabel(), HS);
3952}
3953
3954void CodeGeneratorARM::MarkGCCard(Register temp,
3955                                  Register card,
3956                                  Register object,
3957                                  Register value,
3958                                  bool can_be_null) {
3959  Label is_null;
3960  if (can_be_null) {
3961    __ CompareAndBranchIfZero(value, &is_null);
3962  }
3963  __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
3964  __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
3965  __ strb(card, Address(card, temp));
3966  if (can_be_null) {
3967    __ Bind(&is_null);
3968  }
3969}
3970
3971void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
3972  temp->SetLocations(nullptr);
3973}
3974
3975void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
3976  // Nothing to do, this is driven by the code generator.
3977  UNUSED(temp);
3978}
3979
3980void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
3981  UNUSED(instruction);
3982  LOG(FATAL) << "Unreachable";
3983}
3984
3985void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) {
3986  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3987}
3988
3989void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3990  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3991}
3992
3993void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
3994  HBasicBlock* block = instruction->GetBlock();
3995  if (block->GetLoopInformation() != nullptr) {
3996    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3997    // The back edge will generate the suspend check.
3998    return;
3999  }
4000  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
4001    // The goto will generate the suspend check.
4002    return;
4003  }
4004  GenerateSuspendCheck(instruction, nullptr);
4005}
4006
4007void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
4008                                                       HBasicBlock* successor) {
4009  SuspendCheckSlowPathARM* slow_path =
4010      down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
4011  if (slow_path == nullptr) {
4012    slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
4013    instruction->SetSlowPath(slow_path);
4014    codegen_->AddSlowPath(slow_path);
4015    if (successor != nullptr) {
4016      DCHECK(successor->IsLoopHeader());
4017      codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
4018    }
4019  } else {
4020    DCHECK_EQ(slow_path->GetSuccessor(), successor);
4021  }
4022
4023  __ LoadFromOffset(
4024      kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
4025  if (successor == nullptr) {
4026    __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
4027    __ Bind(slow_path->GetReturnLabel());
4028  } else {
4029    __ CompareAndBranchIfZero(IP, codegen_->GetLabelOf(successor));
4030    __ b(slow_path->GetEntryLabel());
4031  }
4032}
4033
4034ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
4035  return codegen_->GetAssembler();
4036}
4037
4038void ParallelMoveResolverARM::EmitMove(size_t index) {
4039  DCHECK_LT(index, moves_.size());
4040  MoveOperands* move = moves_[index];
4041  Location source = move->GetSource();
4042  Location destination = move->GetDestination();
4043
4044  if (source.IsRegister()) {
4045    if (destination.IsRegister()) {
4046      __ Mov(destination.AsRegister<Register>(), source.AsRegister<Register>());
4047    } else {
4048      DCHECK(destination.IsStackSlot());
4049      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(),
4050                       SP, destination.GetStackIndex());
4051    }
4052  } else if (source.IsStackSlot()) {
4053    if (destination.IsRegister()) {
4054      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(),
4055                        SP, source.GetStackIndex());
4056    } else if (destination.IsFpuRegister()) {
4057      __ LoadSFromOffset(destination.AsFpuRegister<SRegister>(), SP, source.GetStackIndex());
4058    } else {
4059      DCHECK(destination.IsStackSlot());
4060      __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
4061      __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4062    }
4063  } else if (source.IsFpuRegister()) {
4064    if (destination.IsFpuRegister()) {
4065      __ vmovs(destination.AsFpuRegister<SRegister>(), source.AsFpuRegister<SRegister>());
4066    } else {
4067      DCHECK(destination.IsStackSlot());
4068      __ StoreSToOffset(source.AsFpuRegister<SRegister>(), SP, destination.GetStackIndex());
4069    }
4070  } else if (source.IsDoubleStackSlot()) {
4071    if (destination.IsDoubleStackSlot()) {
4072      __ LoadDFromOffset(DTMP, SP, source.GetStackIndex());
4073      __ StoreDToOffset(DTMP, SP, destination.GetStackIndex());
4074    } else if (destination.IsRegisterPair()) {
4075      DCHECK(ExpectedPairLayout(destination));
4076      __ LoadFromOffset(
4077          kLoadWordPair, destination.AsRegisterPairLow<Register>(), SP, source.GetStackIndex());
4078    } else {
4079      DCHECK(destination.IsFpuRegisterPair()) << destination;
4080      __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
4081                         SP,
4082                         source.GetStackIndex());
4083    }
4084  } else if (source.IsRegisterPair()) {
4085    if (destination.IsRegisterPair()) {
4086      __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
4087      __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
4088    } else {
4089      DCHECK(destination.IsDoubleStackSlot()) << destination;
4090      DCHECK(ExpectedPairLayout(source));
4091      __ StoreToOffset(
4092          kStoreWordPair, source.AsRegisterPairLow<Register>(), SP, destination.GetStackIndex());
4093    }
4094  } else if (source.IsFpuRegisterPair()) {
4095    if (destination.IsFpuRegisterPair()) {
4096      __ vmovd(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
4097               FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
4098    } else {
4099      DCHECK(destination.IsDoubleStackSlot()) << destination;
4100      __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
4101                        SP,
4102                        destination.GetStackIndex());
4103    }
4104  } else {
4105    DCHECK(source.IsConstant()) << source;
4106    HConstant* constant = source.GetConstant();
4107    if (constant->IsIntConstant() || constant->IsNullConstant()) {
4108      int32_t value = CodeGenerator::GetInt32ValueOf(constant);
4109      if (destination.IsRegister()) {
4110        __ LoadImmediate(destination.AsRegister<Register>(), value);
4111      } else {
4112        DCHECK(destination.IsStackSlot());
4113        __ LoadImmediate(IP, value);
4114        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4115      }
4116    } else if (constant->IsLongConstant()) {
4117      int64_t value = constant->AsLongConstant()->GetValue();
4118      if (destination.IsRegisterPair()) {
4119        __ LoadImmediate(destination.AsRegisterPairLow<Register>(), Low32Bits(value));
4120        __ LoadImmediate(destination.AsRegisterPairHigh<Register>(), High32Bits(value));
4121      } else {
4122        DCHECK(destination.IsDoubleStackSlot()) << destination;
4123        __ LoadImmediate(IP, Low32Bits(value));
4124        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4125        __ LoadImmediate(IP, High32Bits(value));
4126        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
4127      }
4128    } else if (constant->IsDoubleConstant()) {
4129      double value = constant->AsDoubleConstant()->GetValue();
4130      if (destination.IsFpuRegisterPair()) {
4131        __ LoadDImmediate(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), value);
4132      } else {
4133        DCHECK(destination.IsDoubleStackSlot()) << destination;
4134        uint64_t int_value = bit_cast<uint64_t, double>(value);
4135        __ LoadImmediate(IP, Low32Bits(int_value));
4136        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4137        __ LoadImmediate(IP, High32Bits(int_value));
4138        __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
4139      }
4140    } else {
4141      DCHECK(constant->IsFloatConstant()) << constant->DebugName();
4142      float value = constant->AsFloatConstant()->GetValue();
4143      if (destination.IsFpuRegister()) {
4144        __ LoadSImmediate(destination.AsFpuRegister<SRegister>(), value);
4145      } else {
4146        DCHECK(destination.IsStackSlot());
4147        __ LoadImmediate(IP, bit_cast<int32_t, float>(value));
4148        __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
4149      }
4150    }
4151  }
4152}
4153
4154void ParallelMoveResolverARM::Exchange(Register reg, int mem) {
4155  __ Mov(IP, reg);
4156  __ LoadFromOffset(kLoadWord, reg, SP, mem);
4157  __ StoreToOffset(kStoreWord, IP, SP, mem);
4158}
4159
4160void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
4161  ScratchRegisterScope ensure_scratch(this, IP, R0, codegen_->GetNumberOfCoreRegisters());
4162  int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0;
4163  __ LoadFromOffset(kLoadWord, static_cast<Register>(ensure_scratch.GetRegister()),
4164                    SP, mem1 + stack_offset);
4165  __ LoadFromOffset(kLoadWord, IP, SP, mem2 + stack_offset);
4166  __ StoreToOffset(kStoreWord, static_cast<Register>(ensure_scratch.GetRegister()),
4167                   SP, mem2 + stack_offset);
4168  __ StoreToOffset(kStoreWord, IP, SP, mem1 + stack_offset);
4169}
4170
4171void ParallelMoveResolverARM::EmitSwap(size_t index) {
4172  DCHECK_LT(index, moves_.size());
4173  MoveOperands* move = moves_[index];
4174  Location source = move->GetSource();
4175  Location destination = move->GetDestination();
4176
4177  if (source.IsRegister() && destination.IsRegister()) {
4178    DCHECK_NE(source.AsRegister<Register>(), IP);
4179    DCHECK_NE(destination.AsRegister<Register>(), IP);
4180    __ Mov(IP, source.AsRegister<Register>());
4181    __ Mov(source.AsRegister<Register>(), destination.AsRegister<Register>());
4182    __ Mov(destination.AsRegister<Register>(), IP);
4183  } else if (source.IsRegister() && destination.IsStackSlot()) {
4184    Exchange(source.AsRegister<Register>(), destination.GetStackIndex());
4185  } else if (source.IsStackSlot() && destination.IsRegister()) {
4186    Exchange(destination.AsRegister<Register>(), source.GetStackIndex());
4187  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
4188    Exchange(source.GetStackIndex(), destination.GetStackIndex());
4189  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
4190    __ vmovrs(IP, source.AsFpuRegister<SRegister>());
4191    __ vmovs(source.AsFpuRegister<SRegister>(), destination.AsFpuRegister<SRegister>());
4192    __ vmovsr(destination.AsFpuRegister<SRegister>(), IP);
4193  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
4194    __ vmovdrr(DTMP, source.AsRegisterPairLow<Register>(), source.AsRegisterPairHigh<Register>());
4195    __ Mov(source.AsRegisterPairLow<Register>(), destination.AsRegisterPairLow<Register>());
4196    __ Mov(source.AsRegisterPairHigh<Register>(), destination.AsRegisterPairHigh<Register>());
4197    __ vmovrrd(destination.AsRegisterPairLow<Register>(),
4198               destination.AsRegisterPairHigh<Register>(),
4199               DTMP);
4200  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
4201    Register low_reg = source.IsRegisterPair()
4202        ? source.AsRegisterPairLow<Register>()
4203        : destination.AsRegisterPairLow<Register>();
4204    int mem = source.IsRegisterPair()
4205        ? destination.GetStackIndex()
4206        : source.GetStackIndex();
4207    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
4208    __ vmovdrr(DTMP, low_reg, static_cast<Register>(low_reg + 1));
4209    __ LoadFromOffset(kLoadWordPair, low_reg, SP, mem);
4210    __ StoreDToOffset(DTMP, SP, mem);
4211  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
4212    DRegister first = FromLowSToD(source.AsFpuRegisterPairLow<SRegister>());
4213    DRegister second = FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
4214    __ vmovd(DTMP, first);
4215    __ vmovd(first, second);
4216    __ vmovd(second, DTMP);
4217  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
4218    DRegister reg = source.IsFpuRegisterPair()
4219        ? FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())
4220        : FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>());
4221    int mem = source.IsFpuRegisterPair()
4222        ? destination.GetStackIndex()
4223        : source.GetStackIndex();
4224    __ vmovd(DTMP, reg);
4225    __ LoadDFromOffset(reg, SP, mem);
4226    __ StoreDToOffset(DTMP, SP, mem);
4227  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
4228    SRegister reg = source.IsFpuRegister() ? source.AsFpuRegister<SRegister>()
4229                                           : destination.AsFpuRegister<SRegister>();
4230    int mem = source.IsFpuRegister()
4231        ? destination.GetStackIndex()
4232        : source.GetStackIndex();
4233
4234    __ vmovrs(IP, reg);
4235    __ LoadSFromOffset(reg, SP, mem);
4236    __ StoreToOffset(kStoreWord, IP, SP, mem);
4237  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
4238    Exchange(source.GetStackIndex(), destination.GetStackIndex());
4239    Exchange(source.GetHighStackIndex(kArmWordSize), destination.GetHighStackIndex(kArmWordSize));
4240  } else {
4241    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
4242  }
4243}
4244
4245void ParallelMoveResolverARM::SpillScratch(int reg) {
4246  __ Push(static_cast<Register>(reg));
4247}
4248
4249void ParallelMoveResolverARM::RestoreScratch(int reg) {
4250  __ Pop(static_cast<Register>(reg));
4251}
4252
4253void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
4254  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
4255      ? LocationSummary::kCallOnSlowPath
4256      : LocationSummary::kNoCall;
4257  LocationSummary* locations =
4258      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
4259  locations->SetInAt(0, Location::RequiresRegister());
4260  locations->SetOut(Location::RequiresRegister());
4261}
4262
4263void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
4264  LocationSummary* locations = cls->GetLocations();
4265  Register out = locations->Out().AsRegister<Register>();
4266  Register current_method = locations->InAt(0).AsRegister<Register>();
4267  if (cls->IsReferrersClass()) {
4268    DCHECK(!cls->CanCallRuntime());
4269    DCHECK(!cls->MustGenerateClinitCheck());
4270    __ LoadFromOffset(
4271        kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4272  } else {
4273    DCHECK(cls->CanCallRuntime());
4274    __ LoadFromOffset(kLoadWord,
4275                      out,
4276                      current_method,
4277                      ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
4278    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
4279    // TODO: We will need a read barrier here.
4280
4281    SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4282        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
4283    codegen_->AddSlowPath(slow_path);
4284    __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4285    if (cls->MustGenerateClinitCheck()) {
4286      GenerateClassInitializationCheck(slow_path, out);
4287    } else {
4288      __ Bind(slow_path->GetExitLabel());
4289    }
4290  }
4291}
4292
4293void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
4294  LocationSummary* locations =
4295      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
4296  locations->SetInAt(0, Location::RequiresRegister());
4297  if (check->HasUses()) {
4298    locations->SetOut(Location::SameAsFirstInput());
4299  }
4300}
4301
4302void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
4303  // We assume the class is not null.
4304  SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
4305      check->GetLoadClass(), check, check->GetDexPc(), true);
4306  codegen_->AddSlowPath(slow_path);
4307  GenerateClassInitializationCheck(slow_path,
4308                                   check->GetLocations()->InAt(0).AsRegister<Register>());
4309}
4310
4311void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
4312    SlowPathCode* slow_path, Register class_reg) {
4313  __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
4314  __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
4315  __ b(slow_path->GetEntryLabel(), LT);
4316  // Even if the initialized flag is set, we may be in a situation where caches are not synced
4317  // properly. Therefore, we do a memory fence.
4318  __ dmb(ISH);
4319  __ Bind(slow_path->GetExitLabel());
4320}
4321
4322void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
4323  LocationSummary* locations =
4324      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
4325  locations->SetInAt(0, Location::RequiresRegister());
4326  locations->SetOut(Location::RequiresRegister());
4327}
4328
4329void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
4330  SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
4331  codegen_->AddSlowPath(slow_path);
4332
4333  LocationSummary* locations = load->GetLocations();
4334  Register out = locations->Out().AsRegister<Register>();
4335  Register current_method = locations->InAt(0).AsRegister<Register>();
4336  __ LoadFromOffset(
4337      kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4338  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
4339  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4340  // TODO: We will need a read barrier here.
4341  __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
4342  __ Bind(slow_path->GetExitLabel());
4343}
4344
4345static int32_t GetExceptionTlsOffset() {
4346  return Thread::ExceptionOffset<kArmWordSize>().Int32Value();
4347}
4348
4349void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
4350  LocationSummary* locations =
4351      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
4352  locations->SetOut(Location::RequiresRegister());
4353}
4354
4355void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
4356  Register out = load->GetLocations()->Out().AsRegister<Register>();
4357  __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
4358}
4359
4360void LocationsBuilderARM::VisitClearException(HClearException* clear) {
4361  new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
4362}
4363
4364void InstructionCodeGeneratorARM::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
4365  __ LoadImmediate(IP, 0);
4366  __ StoreToOffset(kStoreWord, IP, TR, GetExceptionTlsOffset());
4367}
4368
4369void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
4370  LocationSummary* locations =
4371      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4372  InvokeRuntimeCallingConvention calling_convention;
4373  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4374}
4375
4376void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
4377  codegen_->InvokeRuntime(
4378      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4379}
4380
4381void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
4382  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
4383  switch (instruction->GetTypeCheckKind()) {
4384    case TypeCheckKind::kExactCheck:
4385    case TypeCheckKind::kAbstractClassCheck:
4386    case TypeCheckKind::kClassHierarchyCheck:
4387    case TypeCheckKind::kArrayObjectCheck:
4388      call_kind = LocationSummary::kNoCall;
4389      break;
4390    case TypeCheckKind::kInterfaceCheck:
4391      call_kind = LocationSummary::kCall;
4392      break;
4393    case TypeCheckKind::kArrayCheck:
4394      call_kind = LocationSummary::kCallOnSlowPath;
4395      break;
4396  }
4397  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4398  if (call_kind != LocationSummary::kCall) {
4399    locations->SetInAt(0, Location::RequiresRegister());
4400    locations->SetInAt(1, Location::RequiresRegister());
4401    // The out register is used as a temporary, so it overlaps with the inputs.
4402    // Note that TypeCheckSlowPathARM uses this register too.
4403    locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
4404  } else {
4405    InvokeRuntimeCallingConvention calling_convention;
4406    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4407    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
4408    locations->SetOut(Location::RegisterLocation(R0));
4409  }
4410}
4411
4412void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
4413  LocationSummary* locations = instruction->GetLocations();
4414  Register obj = locations->InAt(0).AsRegister<Register>();
4415  Register cls = locations->InAt(1).AsRegister<Register>();
4416  Register out = locations->Out().AsRegister<Register>();
4417  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4418  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
4419  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
4420  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
4421  Label done, zero;
4422  SlowPathCode* slow_path = nullptr;
4423
4424  // Return 0 if `obj` is null.
4425  // avoid null check if we know obj is not null.
4426  if (instruction->MustDoNullCheck()) {
4427    __ CompareAndBranchIfZero(obj, &zero);
4428  }
4429
4430  // In case of an interface check, we put the object class into the object register.
4431  // This is safe, as the register is caller-save, and the object must be in another
4432  // register if it survives the runtime call.
4433  Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
4434      ? obj
4435      : out;
4436  __ LoadFromOffset(kLoadWord, target, obj, class_offset);
4437  __ MaybeUnpoisonHeapReference(target);
4438
4439  switch (instruction->GetTypeCheckKind()) {
4440    case TypeCheckKind::kExactCheck: {
4441      __ cmp(out, ShifterOperand(cls));
4442      // Classes must be equal for the instanceof to succeed.
4443      __ b(&zero, NE);
4444      __ LoadImmediate(out, 1);
4445      __ b(&done);
4446      break;
4447    }
4448    case TypeCheckKind::kAbstractClassCheck: {
4449      // If the class is abstract, we eagerly fetch the super class of the
4450      // object to avoid doing a comparison we know will fail.
4451      Label loop;
4452      __ Bind(&loop);
4453      __ LoadFromOffset(kLoadWord, out, out, super_offset);
4454      __ MaybeUnpoisonHeapReference(out);
4455      // If `out` is null, we use it for the result, and jump to `done`.
4456      __ CompareAndBranchIfZero(out, &done);
4457      __ cmp(out, ShifterOperand(cls));
4458      __ b(&loop, NE);
4459      __ LoadImmediate(out, 1);
4460      if (zero.IsLinked()) {
4461        __ b(&done);
4462      }
4463      break;
4464    }
4465    case TypeCheckKind::kClassHierarchyCheck: {
4466      // Walk over the class hierarchy to find a match.
4467      Label loop, success;
4468      __ Bind(&loop);
4469      __ cmp(out, ShifterOperand(cls));
4470      __ b(&success, EQ);
4471      __ LoadFromOffset(kLoadWord, out, out, super_offset);
4472      __ MaybeUnpoisonHeapReference(out);
4473      __ CompareAndBranchIfNonZero(out, &loop);
4474      // If `out` is null, we use it for the result, and jump to `done`.
4475      __ b(&done);
4476      __ Bind(&success);
4477      __ LoadImmediate(out, 1);
4478      if (zero.IsLinked()) {
4479        __ b(&done);
4480      }
4481      break;
4482    }
4483    case TypeCheckKind::kArrayObjectCheck: {
4484      // Do an exact check.
4485      Label exact_check;
4486      __ cmp(out, ShifterOperand(cls));
4487      __ b(&exact_check, EQ);
4488      // Otherwise, we need to check that the object's class is a non primitive array.
4489      __ LoadFromOffset(kLoadWord, out, out, component_offset);
4490      __ MaybeUnpoisonHeapReference(out);
4491      // If `out` is null, we use it for the result, and jump to `done`.
4492      __ CompareAndBranchIfZero(out, &done);
4493      __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
4494      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
4495      __ CompareAndBranchIfNonZero(out, &zero);
4496      __ Bind(&exact_check);
4497      __ LoadImmediate(out, 1);
4498      __ b(&done);
4499      break;
4500    }
4501    case TypeCheckKind::kArrayCheck: {
4502      __ cmp(out, ShifterOperand(cls));
4503      DCHECK(locations->OnlyCallsOnSlowPath());
4504      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4505          instruction, /* is_fatal */ false);
4506      codegen_->AddSlowPath(slow_path);
4507      __ b(slow_path->GetEntryLabel(), NE);
4508      __ LoadImmediate(out, 1);
4509      if (zero.IsLinked()) {
4510        __ b(&done);
4511      }
4512      break;
4513    }
4514
4515    case TypeCheckKind::kInterfaceCheck:
4516    default: {
4517      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
4518                              instruction,
4519                              instruction->GetDexPc(),
4520                              nullptr);
4521      if (zero.IsLinked()) {
4522        __ b(&done);
4523      }
4524      break;
4525    }
4526  }
4527
4528  if (zero.IsLinked()) {
4529    __ Bind(&zero);
4530    __ LoadImmediate(out, 0);
4531  }
4532
4533  if (done.IsLinked()) {
4534    __ Bind(&done);
4535  }
4536
4537  if (slow_path != nullptr) {
4538    __ Bind(slow_path->GetExitLabel());
4539  }
4540}
4541
4542void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
4543  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
4544  bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
4545
4546  switch (instruction->GetTypeCheckKind()) {
4547    case TypeCheckKind::kExactCheck:
4548    case TypeCheckKind::kAbstractClassCheck:
4549    case TypeCheckKind::kClassHierarchyCheck:
4550    case TypeCheckKind::kArrayObjectCheck:
4551      call_kind = throws_into_catch
4552          ? LocationSummary::kCallOnSlowPath
4553          : LocationSummary::kNoCall;
4554      break;
4555    case TypeCheckKind::kInterfaceCheck:
4556      call_kind = LocationSummary::kCall;
4557      break;
4558    case TypeCheckKind::kArrayCheck:
4559      call_kind = LocationSummary::kCallOnSlowPath;
4560      break;
4561  }
4562
4563  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
4564      instruction, call_kind);
4565  if (call_kind != LocationSummary::kCall) {
4566    locations->SetInAt(0, Location::RequiresRegister());
4567    locations->SetInAt(1, Location::RequiresRegister());
4568    // Note that TypeCheckSlowPathARM uses this register too.
4569    locations->AddTemp(Location::RequiresRegister());
4570  } else {
4571    InvokeRuntimeCallingConvention calling_convention;
4572    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4573    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
4574  }
4575}
4576
4577void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
4578  LocationSummary* locations = instruction->GetLocations();
4579  Register obj = locations->InAt(0).AsRegister<Register>();
4580  Register cls = locations->InAt(1).AsRegister<Register>();
4581  Register temp = locations->WillCall()
4582      ? Register(kNoRegister)
4583      : locations->GetTemp(0).AsRegister<Register>();
4584
4585  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4586  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
4587  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
4588  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
4589  SlowPathCode* slow_path = nullptr;
4590
4591  if (!locations->WillCall()) {
4592    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
4593        instruction, !locations->CanCall());
4594    codegen_->AddSlowPath(slow_path);
4595  }
4596
4597  Label done;
4598  // Avoid null check if we know obj is not null.
4599  if (instruction->MustDoNullCheck()) {
4600    __ CompareAndBranchIfZero(obj, &done);
4601  }
4602
4603  if (locations->WillCall()) {
4604    __ LoadFromOffset(kLoadWord, obj, obj, class_offset);
4605    __ MaybeUnpoisonHeapReference(obj);
4606  } else {
4607    __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
4608    __ MaybeUnpoisonHeapReference(temp);
4609  }
4610
4611  switch (instruction->GetTypeCheckKind()) {
4612    case TypeCheckKind::kExactCheck:
4613    case TypeCheckKind::kArrayCheck: {
4614      __ cmp(temp, ShifterOperand(cls));
4615      // Jump to slow path for throwing the exception or doing a
4616      // more involved array check.
4617      __ b(slow_path->GetEntryLabel(), NE);
4618      break;
4619    }
4620    case TypeCheckKind::kAbstractClassCheck: {
4621      // If the class is abstract, we eagerly fetch the super class of the
4622      // object to avoid doing a comparison we know will fail.
4623      Label loop;
4624      __ Bind(&loop);
4625      __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
4626      __ MaybeUnpoisonHeapReference(temp);
4627      // Jump to the slow path to throw the exception.
4628      __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel());
4629      __ cmp(temp, ShifterOperand(cls));
4630      __ b(&loop, NE);
4631      break;
4632    }
4633    case TypeCheckKind::kClassHierarchyCheck: {
4634      // Walk over the class hierarchy to find a match.
4635      Label loop;
4636      __ Bind(&loop);
4637      __ cmp(temp, ShifterOperand(cls));
4638      __ b(&done, EQ);
4639      __ LoadFromOffset(kLoadWord, temp, temp, super_offset);
4640      __ MaybeUnpoisonHeapReference(temp);
4641      __ CompareAndBranchIfNonZero(temp, &loop);
4642      // Jump to the slow path to throw the exception.
4643      __ b(slow_path->GetEntryLabel());
4644      break;
4645    }
4646    case TypeCheckKind::kArrayObjectCheck: {
4647      // Do an exact check.
4648      __ cmp(temp, ShifterOperand(cls));
4649      __ b(&done, EQ);
4650      // Otherwise, we need to check that the object's class is a non primitive array.
4651      __ LoadFromOffset(kLoadWord, temp, temp, component_offset);
4652      __ MaybeUnpoisonHeapReference(temp);
4653      __ CompareAndBranchIfZero(temp, slow_path->GetEntryLabel());
4654      __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
4655      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
4656      __ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel());
4657      break;
4658    }
4659    case TypeCheckKind::kInterfaceCheck:
4660    default:
4661      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
4662                              instruction,
4663                              instruction->GetDexPc(),
4664                              nullptr);
4665      break;
4666  }
4667  __ Bind(&done);
4668
4669  if (slow_path != nullptr) {
4670    __ Bind(slow_path->GetExitLabel());
4671  }
4672}
4673
4674void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4675  LocationSummary* locations =
4676      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4677  InvokeRuntimeCallingConvention calling_convention;
4678  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
4679}
4680
4681void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
4682  codegen_->InvokeRuntime(instruction->IsEnter()
4683        ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4684      instruction,
4685      instruction->GetDexPc(),
4686      nullptr);
4687}
4688
4689void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
4690void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
4691void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
4692
4693void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4694  LocationSummary* locations =
4695      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4696  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
4697         || instruction->GetResultType() == Primitive::kPrimLong);
4698  locations->SetInAt(0, Location::RequiresRegister());
4699  locations->SetInAt(1, Location::RequiresRegister());
4700  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4701}
4702
4703void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
4704  HandleBitwiseOperation(instruction);
4705}
4706
4707void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
4708  HandleBitwiseOperation(instruction);
4709}
4710
4711void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
4712  HandleBitwiseOperation(instruction);
4713}
4714
4715void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
4716  LocationSummary* locations = instruction->GetLocations();
4717
4718  if (instruction->GetResultType() == Primitive::kPrimInt) {
4719    Register first = locations->InAt(0).AsRegister<Register>();
4720    Register second = locations->InAt(1).AsRegister<Register>();
4721    Register out = locations->Out().AsRegister<Register>();
4722    if (instruction->IsAnd()) {
4723      __ and_(out, first, ShifterOperand(second));
4724    } else if (instruction->IsOr()) {
4725      __ orr(out, first, ShifterOperand(second));
4726    } else {
4727      DCHECK(instruction->IsXor());
4728      __ eor(out, first, ShifterOperand(second));
4729    }
4730  } else {
4731    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
4732    Location first = locations->InAt(0);
4733    Location second = locations->InAt(1);
4734    Location out = locations->Out();
4735    if (instruction->IsAnd()) {
4736      __ and_(out.AsRegisterPairLow<Register>(),
4737              first.AsRegisterPairLow<Register>(),
4738              ShifterOperand(second.AsRegisterPairLow<Register>()));
4739      __ and_(out.AsRegisterPairHigh<Register>(),
4740              first.AsRegisterPairHigh<Register>(),
4741              ShifterOperand(second.AsRegisterPairHigh<Register>()));
4742    } else if (instruction->IsOr()) {
4743      __ orr(out.AsRegisterPairLow<Register>(),
4744             first.AsRegisterPairLow<Register>(),
4745             ShifterOperand(second.AsRegisterPairLow<Register>()));
4746      __ orr(out.AsRegisterPairHigh<Register>(),
4747             first.AsRegisterPairHigh<Register>(),
4748             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4749    } else {
4750      DCHECK(instruction->IsXor());
4751      __ eor(out.AsRegisterPairLow<Register>(),
4752             first.AsRegisterPairLow<Register>(),
4753             ShifterOperand(second.AsRegisterPairLow<Register>()));
4754      __ eor(out.AsRegisterPairHigh<Register>(),
4755             first.AsRegisterPairHigh<Register>(),
4756             ShifterOperand(second.AsRegisterPairHigh<Register>()));
4757    }
4758  }
4759}
4760
4761void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
4762  // For better instruction scheduling we load the direct code pointer before the method pointer.
4763  bool direct_code_loaded = false;
4764  switch (invoke->GetCodePtrLocation()) {
4765    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
4766      if (IsSameDexFile(*invoke->GetTargetMethod().dex_file, GetGraph()->GetDexFile())) {
4767        break;
4768      }
4769      // Calls across dex files are more likely to exceed the available BL range,
4770      // so use absolute patch by falling through to kDirectCodeFixup.
4771      FALLTHROUGH_INTENDED;
4772    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
4773      // LR = code address from literal pool with link-time patch.
4774      __ LoadLiteral(LR, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
4775      direct_code_loaded = true;
4776      break;
4777    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
4778      // LR = invoke->GetDirectCodePtr();
4779      __ LoadImmediate(LR, invoke->GetDirectCodePtr());
4780      direct_code_loaded = true;
4781      break;
4782    default:
4783      break;
4784  }
4785
4786  Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
4787  switch (invoke->GetMethodLoadKind()) {
4788    case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
4789      // temp = thread->string_init_entrypoint
4790      __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, invoke->GetStringInitOffset());
4791      break;
4792    case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
4793      callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
4794      break;
4795    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
4796      __ LoadImmediate(temp.AsRegister<Register>(), invoke->GetMethodAddress());
4797      break;
4798    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
4799      __ LoadLiteral(temp.AsRegister<Register>(),
4800                     DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
4801      break;
4802    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
4803      // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
4804      FALLTHROUGH_INTENDED;
4805    case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
4806      Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
4807      Register method_reg;
4808      Register reg = temp.AsRegister<Register>();
4809      if (current_method.IsRegister()) {
4810        method_reg = current_method.AsRegister<Register>();
4811      } else {
4812        DCHECK(invoke->GetLocations()->Intrinsified());
4813        DCHECK(!current_method.IsValid());
4814        method_reg = reg;
4815        __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
4816      }
4817      // temp = current_method->dex_cache_resolved_methods_;
4818      __ LoadFromOffset(
4819          kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset(
4820              kArmPointerSize).Int32Value());
4821      // temp = temp[index_in_cache]
4822      uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
4823      __ LoadFromOffset(kLoadWord, reg, reg, CodeGenerator::GetCachePointerOffset(index_in_cache));
4824      break;
4825    }
4826  }
4827
4828  switch (invoke->GetCodePtrLocation()) {
4829    case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
4830      __ bl(GetFrameEntryLabel());
4831      break;
4832    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
4833      if (!direct_code_loaded) {
4834        relative_call_patches_.emplace_back(invoke->GetTargetMethod());
4835        __ Bind(&relative_call_patches_.back().label);
4836        Label label;
4837        __ bl(&label);  // Arbitrarily branch to the instruction after BL, override at link time.
4838        __ Bind(&label);
4839        break;
4840      }
4841      // If we loaded the direct code above, fall through.
4842      FALLTHROUGH_INTENDED;
4843    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
4844    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
4845      // LR prepared above for better instruction scheduling.
4846      DCHECK(direct_code_loaded);
4847      // LR()
4848      __ blx(LR);
4849      break;
4850    case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
4851      // LR = callee_method->entry_point_from_quick_compiled_code_
4852      __ LoadFromOffset(
4853          kLoadWord, LR, callee_method.AsRegister<Register>(),
4854          ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value());
4855      // LR()
4856      __ blx(LR);
4857      break;
4858  }
4859
4860  DCHECK(!IsLeafMethod());
4861}
4862
4863void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
4864  Register temp = temp_location.AsRegister<Register>();
4865  uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
4866      invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
4867  LocationSummary* locations = invoke->GetLocations();
4868  Location receiver = locations->InAt(0);
4869  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
4870  // temp = object->GetClass();
4871  DCHECK(receiver.IsRegister());
4872  __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
4873  MaybeRecordImplicitNullCheck(invoke);
4874  __ MaybeUnpoisonHeapReference(temp);
4875  // temp = temp->GetMethodAt(method_offset);
4876  uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
4877      kArmWordSize).Int32Value();
4878  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
4879  // LR = temp->GetEntryPoint();
4880  __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
4881  // LR();
4882  __ blx(LR);
4883}
4884
4885void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
4886  DCHECK(linker_patches->empty());
4887  size_t size = method_patches_.size() + call_patches_.size() + relative_call_patches_.size();
4888  linker_patches->reserve(size);
4889  for (const auto& entry : method_patches_) {
4890    const MethodReference& target_method = entry.first;
4891    Literal* literal = entry.second;
4892    DCHECK(literal->GetLabel()->IsBound());
4893    uint32_t literal_offset = literal->GetLabel()->Position();
4894    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
4895                                                       target_method.dex_file,
4896                                                       target_method.dex_method_index));
4897  }
4898  for (const auto& entry : call_patches_) {
4899    const MethodReference& target_method = entry.first;
4900    Literal* literal = entry.second;
4901    DCHECK(literal->GetLabel()->IsBound());
4902    uint32_t literal_offset = literal->GetLabel()->Position();
4903    linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
4904                                                     target_method.dex_file,
4905                                                     target_method.dex_method_index));
4906  }
4907  for (const MethodPatchInfo<Label>& info : relative_call_patches_) {
4908    uint32_t literal_offset = info.label.Position();
4909    linker_patches->push_back(LinkerPatch::RelativeCodePatch(literal_offset,
4910                                                             info.target_method.dex_file,
4911                                                             info.target_method.dex_method_index));
4912  }
4913}
4914
4915Literal* CodeGeneratorARM::DeduplicateMethodLiteral(MethodReference target_method,
4916                                                    MethodToLiteralMap* map) {
4917  // Look up the literal for target_method.
4918  auto lb = map->lower_bound(target_method);
4919  if (lb != map->end() && !map->key_comp()(target_method, lb->first)) {
4920    return lb->second;
4921  }
4922  // We don't have a literal for this method yet, insert a new one.
4923  Literal* literal = __ NewLiteral<uint32_t>(0u);
4924  map->PutBefore(lb, target_method, literal);
4925  return literal;
4926}
4927
4928Literal* CodeGeneratorARM::DeduplicateMethodAddressLiteral(MethodReference target_method) {
4929  return DeduplicateMethodLiteral(target_method, &method_patches_);
4930}
4931
4932Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_method) {
4933  return DeduplicateMethodLiteral(target_method, &call_patches_);
4934}
4935
4936void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
4937  // Nothing to do, this should be removed during prepare for register allocator.
4938  UNUSED(instruction);
4939  LOG(FATAL) << "Unreachable";
4940}
4941
4942void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
4943  // Nothing to do, this should be removed during prepare for register allocator.
4944  UNUSED(instruction);
4945  LOG(FATAL) << "Unreachable";
4946}
4947
4948void LocationsBuilderARM::VisitFakeString(HFakeString* instruction) {
4949  DCHECK(codegen_->IsBaseline());
4950  LocationSummary* locations =
4951      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4952  locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
4953}
4954
4955void InstructionCodeGeneratorARM::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
4956  DCHECK(codegen_->IsBaseline());
4957  // Will be generated at use site.
4958}
4959
4960// Simple implementation of packed switch - generate cascaded compare/jumps.
4961void LocationsBuilderARM::VisitPackedSwitch(HPackedSwitch* switch_instr) {
4962  LocationSummary* locations =
4963      new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
4964  locations->SetInAt(0, Location::RequiresRegister());
4965}
4966
4967void InstructionCodeGeneratorARM::VisitPackedSwitch(HPackedSwitch* switch_instr) {
4968  int32_t lower_bound = switch_instr->GetStartValue();
4969  int32_t num_entries = switch_instr->GetNumEntries();
4970  LocationSummary* locations = switch_instr->GetLocations();
4971  Register value_reg = locations->InAt(0).AsRegister<Register>();
4972  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
4973
4974  // Create a series of compare/jumps.
4975  const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
4976  for (int32_t i = 0; i < num_entries; i++) {
4977    GenerateCompareWithImmediate(value_reg, lower_bound + i);
4978    __ b(codegen_->GetLabelOf(successors.at(i)), EQ);
4979  }
4980
4981  // And the default for any other value.
4982  if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
4983    __ b(codegen_->GetLabelOf(default_block));
4984  }
4985}
4986
4987void CodeGeneratorARM::MoveFromReturnRegister(Location trg, Primitive::Type type) {
4988  if (!trg.IsValid()) {
4989    DCHECK(type == Primitive::kPrimVoid);
4990    return;
4991  }
4992
4993  DCHECK_NE(type, Primitive::kPrimVoid);
4994
4995  Location return_loc = InvokeDexCallingConventionVisitorARM().GetReturnLocation(type);
4996  if (return_loc.Equals(trg)) {
4997    return;
4998  }
4999
5000  // TODO: Consider pairs in the parallel move resolver, then this could be nicely merged
5001  //       with the last branch.
5002  if (type == Primitive::kPrimLong) {
5003    HParallelMove parallel_move(GetGraph()->GetArena());
5004    parallel_move.AddMove(return_loc.ToLow(), trg.ToLow(), Primitive::kPrimInt, nullptr);
5005    parallel_move.AddMove(return_loc.ToHigh(), trg.ToHigh(), Primitive::kPrimInt, nullptr);
5006    GetMoveResolver()->EmitNativeCode(&parallel_move);
5007  } else if (type == Primitive::kPrimDouble) {
5008    HParallelMove parallel_move(GetGraph()->GetArena());
5009    parallel_move.AddMove(return_loc.ToLow(), trg.ToLow(), Primitive::kPrimFloat, nullptr);
5010    parallel_move.AddMove(return_loc.ToHigh(), trg.ToHigh(), Primitive::kPrimFloat, nullptr);
5011    GetMoveResolver()->EmitNativeCode(&parallel_move);
5012  } else {
5013    // Let the parallel move resolver take care of all of this.
5014    HParallelMove parallel_move(GetGraph()->GetArena());
5015    parallel_move.AddMove(return_loc, trg, type, nullptr);
5016    GetMoveResolver()->EmitNativeCode(&parallel_move);
5017  }
5018}
5019
5020#undef __
5021#undef QUICK_ENTRY_POINT
5022
5023}  // namespace arm
5024}  // namespace art
5025