int_arm64.cc revision 63999683329612292d534e6be09dbde9480f1250
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "dex/reg_storage_eq.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "mirror/array.h"
25
26namespace art {
27
28LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
29  OpRegReg(kOpCmp, src1, src2);
30  return OpCondBranch(cond, target);
31}
32
33LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
34  LOG(FATAL) << "Unexpected use of OpIT for Arm64";
35  return NULL;
36}
37
38void Arm64Mir2Lir::OpEndIT(LIR* it) {
39  LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
40}
41
42/*
43 * 64-bit 3way compare function.
44 *     cmp   xA, xB
45 *     csinc wC, wzr, wzr, eq  // wC = (xA == xB) ? 0 : 1
46 *     csneg wC, wC, wC, ge    // wC = (xA >= xB) ? wC : -wC
47 */
48void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
49                              RegLocation rl_src2) {
50  RegLocation rl_result;
51  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
52  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
53  rl_result = EvalLoc(rl_dest, kCoreReg, true);
54
55  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
56  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
57  NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
58          rl_result.reg.GetReg(), kArmCondGe);
59  StoreValue(rl_dest, rl_result);
60}
61
62void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
63                             RegLocation rl_src1, RegLocation rl_shift) {
64  OpKind op = kOpBkpt;
65  switch (opcode) {
66  case Instruction::SHL_LONG:
67  case Instruction::SHL_LONG_2ADDR:
68    op = kOpLsl;
69    break;
70  case Instruction::SHR_LONG:
71  case Instruction::SHR_LONG_2ADDR:
72    op = kOpAsr;
73    break;
74  case Instruction::USHR_LONG:
75  case Instruction::USHR_LONG_2ADDR:
76    op = kOpLsr;
77    break;
78  default:
79    LOG(FATAL) << "Unexpected case: " << opcode;
80  }
81  rl_shift = LoadValue(rl_shift, kCoreReg);
82  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
83  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
84  OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
85  StoreValueWide(rl_dest, rl_result);
86}
87
88static constexpr bool kUseDeltaEncodingInGenSelect = false;
89
90void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode ccode,
91                             RegStorage rs_dest, int result_reg_class) {
92  if (false_val == 0 ||               // 0 is better as first operand.
93      true_val == 1 ||                // Potentially Csinc.
94      true_val == -1 ||               // Potentially Csinv.
95      true_val == false_val + 1) {    // Potentially Csinc.
96    ccode = NegateComparison(ccode);
97    std::swap(true_val, false_val);
98  }
99
100  ArmConditionCode code = ArmConditionEncoding(ccode);
101
102  int opcode;                                      // The opcode.
103  RegStorage left_op = RegStorage::InvalidReg();   // The operands.
104  RegStorage right_op = RegStorage::InvalidReg();  // The operands.
105
106  bool is_wide = rs_dest.Is64Bit();
107
108  RegStorage zero_reg = is_wide ? rs_xzr : rs_wzr;
109
110  if (true_val == 0) {
111    left_op = zero_reg;
112  } else {
113    left_op = rs_dest;
114    LoadConstantNoClobber(rs_dest, true_val);
115  }
116  if (false_val == 1) {
117    right_op = zero_reg;
118    opcode = kA64Csinc4rrrc;
119  } else if (false_val == -1) {
120    right_op = zero_reg;
121    opcode = kA64Csinv4rrrc;
122  } else if (false_val == true_val + 1) {
123    right_op = left_op;
124    opcode = kA64Csinc4rrrc;
125  } else if (false_val == -true_val) {
126    right_op = left_op;
127    opcode = kA64Csneg4rrrc;
128  } else if (false_val == ~true_val) {
129    right_op = left_op;
130    opcode = kA64Csinv4rrrc;
131  } else if (true_val == 0) {
132    // left_op is zero_reg.
133    right_op = rs_dest;
134    LoadConstantNoClobber(rs_dest, false_val);
135    opcode = kA64Csel4rrrc;
136  } else {
137    // Generic case.
138    RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
139    if (is_wide) {
140      if (t_reg2.Is32Bit()) {
141        t_reg2 = As64BitReg(t_reg2);
142      }
143    } else {
144      if (t_reg2.Is64Bit()) {
145        t_reg2 = As32BitReg(t_reg2);
146      }
147    }
148
149    if (kUseDeltaEncodingInGenSelect) {
150      int32_t delta = false_val - true_val;
151      uint32_t abs_val = delta < 0 ? -delta : delta;
152
153      if (abs_val < 0x1000) {  // TODO: Replace with InexpensiveConstant with opcode.
154        // Can encode as immediate to an add.
155        right_op = t_reg2;
156        OpRegRegImm(kOpAdd, t_reg2, left_op, delta);
157      }
158    }
159
160    // Load as constant.
161    if (!right_op.Valid()) {
162      LoadConstantNoClobber(t_reg2, false_val);
163      right_op = t_reg2;
164    }
165
166    opcode = kA64Csel4rrrc;
167  }
168
169  DCHECK(left_op.Valid() && right_op.Valid());
170  NewLIR4(is_wide ? WIDE(opcode) : opcode, rs_dest.GetReg(), left_op.GetReg(), right_op.GetReg(),
171      code);
172}
173
174void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
175                                    int32_t true_val, int32_t false_val, RegStorage rs_dest,
176                                    int dest_reg_class) {
177  DCHECK(rs_dest.Valid());
178  OpRegReg(kOpCmp, left_op, right_op);
179  GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
180}
181
182void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
183  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
184  rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
185  // rl_src may be aliased with rl_result/rl_dest, so do compare early.
186  OpRegImm(kOpCmp, rl_src.reg, 0);
187
188  RegLocation rl_dest = mir_graph_->GetDest(mir);
189
190  // The kMirOpSelect has two variants, one for constants and one for moves.
191  if (mir->ssa_rep->num_uses == 1) {
192    RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true);
193    GenSelect(mir->dalvikInsn.vB, mir->dalvikInsn.vC, mir->meta.ccode, rl_result.reg,
194              rl_dest.ref ? kRefReg : kCoreReg);
195    StoreValue(rl_dest, rl_result);
196  } else {
197    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
198    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
199
200    RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
201    rl_true = LoadValue(rl_true, result_reg_class);
202    rl_false = LoadValue(rl_false, result_reg_class);
203    RegLocation rl_result = EvalLoc(rl_dest, result_reg_class, true);
204
205    bool is_wide = rl_dest.ref || rl_dest.wide;
206    int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
207    NewLIR4(opcode, rl_result.reg.GetReg(),
208            rl_true.reg.GetReg(), rl_false.reg.GetReg(), ArmConditionEncoding(mir->meta.ccode));
209    StoreValue(rl_dest, rl_result);
210  }
211}
212
213void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
214  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
215  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
216  LIR* taken = &block_label_list_[bb->taken];
217  LIR* not_taken = &block_label_list_[bb->fall_through];
218  // Normalize such that if either operand is constant, src2 will be constant.
219  ConditionCode ccode = mir->meta.ccode;
220  if (rl_src1.is_const) {
221    std::swap(rl_src1, rl_src2);
222    ccode = FlipComparisonOrder(ccode);
223  }
224
225  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
226
227  if (rl_src2.is_const) {
228    // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.)
229
230    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
231    // Special handling using cbz & cbnz.
232    if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
233      OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
234      OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
235      return;
236    }
237
238    // Only handle Imm if src2 is not already in a register.
239    rl_src2 = UpdateLocWide(rl_src2);
240    if (rl_src2.location != kLocPhysReg) {
241      OpRegImm64(kOpCmp, rl_src1.reg, val);
242      OpCondBranch(ccode, taken);
243      OpCondBranch(NegateComparison(ccode), not_taken);
244      return;
245    }
246  }
247
248  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
249  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
250  OpCondBranch(ccode, taken);
251  OpCondBranch(NegateComparison(ccode), not_taken);
252}
253
254/*
255 * Generate a register comparison to an immediate and branch.  Caller
256 * is responsible for setting branch target field.
257 */
258LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
259                                  LIR* target) {
260  LIR* branch = nullptr;
261  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
262  if (check_value == 0) {
263    if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
264      ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
265      ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
266      branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
267    } else if (arm_cond == kArmCondLs) {
268      // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
269      // This case happens for a bounds check of array[0].
270      ArmOpcode opcode = kA64Cbz2rt;
271      ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
272      branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
273    }
274  }
275
276  if (branch == nullptr) {
277    OpRegImm(kOpCmp, reg, check_value);
278    branch = NewLIR2(kA64B2ct, arm_cond, 0);
279  }
280
281  branch->target = target;
282  return branch;
283}
284
285LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
286                                     RegStorage base_reg, int offset, int check_value,
287                                     LIR* target, LIR** compare) {
288  DCHECK(compare == nullptr);
289  // It is possible that temp register is 64-bit. (ArgReg or RefReg)
290  // Always compare 32-bit value no matter what temp_reg is.
291  if (temp_reg.Is64Bit()) {
292    temp_reg = As32BitReg(temp_reg);
293  }
294  Load32Disp(base_reg, offset, temp_reg);
295  LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
296  return branch;
297}
298
299LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
300  bool dest_is_fp = r_dest.IsFloat();
301  bool src_is_fp = r_src.IsFloat();
302  ArmOpcode opcode = kA64Brk1d;
303  LIR* res;
304
305  if (LIKELY(dest_is_fp == src_is_fp)) {
306    if (LIKELY(!dest_is_fp)) {
307      DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
308
309      // Core/core copy.
310      // Copies involving the sp register require a different instruction.
311      opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
312
313      // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
314      //   This currently works because the other arguments are set to 0 by default. We should
315      //   rather introduce an alias kA64Mov2RR.
316
317      // core/core copy. Do a x/x copy only if both registers are x.
318      if (r_dest.Is64Bit() && r_src.Is64Bit()) {
319        opcode = WIDE(opcode);
320      }
321    } else {
322      // Float/float copy.
323      bool dest_is_double = r_dest.IsDouble();
324      bool src_is_double = r_src.IsDouble();
325
326      // We do not do float/double or double/float casts here.
327      DCHECK_EQ(dest_is_double, src_is_double);
328
329      // Homogeneous float/float copy.
330      opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff;
331    }
332  } else {
333    // Inhomogeneous register copy.
334    if (dest_is_fp) {
335      if (r_dest.IsDouble()) {
336        opcode = kA64Fmov2Sx;
337      } else {
338        r_src = Check32BitReg(r_src);
339        opcode = kA64Fmov2sw;
340      }
341    } else {
342      if (r_src.IsDouble()) {
343        opcode = kA64Fmov2xS;
344      } else {
345        r_dest = Check32BitReg(r_dest);
346        opcode = kA64Fmov2ws;
347      }
348    }
349  }
350
351  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
352
353  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
354    res->flags.is_nop = true;
355  }
356
357  return res;
358}
359
360void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
361  if (r_dest != r_src) {
362    LIR* res = OpRegCopyNoInsert(r_dest, r_src);
363    AppendLIR(res);
364  }
365}
366
367void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
368  OpRegCopy(r_dest, r_src);
369}
370
371// Table of magic divisors
372struct MagicTable {
373  int magic64_base;
374  int magic64_eor;
375  uint64_t magic64;
376  uint32_t magic32;
377  uint32_t shift;
378  DividePattern pattern;
379};
380
381static const MagicTable magic_table[] = {
382  {   0,      0,                  0,          0, 0, DivideNone},  // 0
383  {   0,      0,                  0,          0, 0, DivideNone},  // 1
384  {   0,      0,                  0,          0, 0, DivideNone},  // 2
385  {0x3c,     -1, 0x5555555555555556, 0x55555556, 0, Divide3},     // 3
386  {   0,      0,                  0,          0, 0, DivideNone},  // 4
387  {0xf9,     -1, 0x6666666666666667, 0x66666667, 1, Divide5},     // 5
388  {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 0, Divide3},     // 6
389  {  -1,     -1, 0x924924924924924A, 0x92492493, 2, Divide7},     // 7
390  {   0,      0,                  0,          0, 0, DivideNone},  // 8
391  {  -1,     -1, 0x38E38E38E38E38E4, 0x38E38E39, 1, Divide5},     // 9
392  {0xf9,     -1, 0x6666666666666667, 0x66666667, 2, Divide5},     // 10
393  {  -1,     -1, 0x2E8BA2E8BA2E8BA3, 0x2E8BA2E9, 1, Divide5},     // 11
394  {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 1, Divide5},     // 12
395  {  -1,     -1, 0x4EC4EC4EC4EC4EC5, 0x4EC4EC4F, 2, Divide5},     // 13
396  {  -1,     -1, 0x924924924924924A, 0x92492493, 3, Divide7},     // 14
397  {0x78,     -1, 0x8888888888888889, 0x88888889, 3, Divide7},     // 15
398};
399
400// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
401bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
402                                      RegLocation rl_src, RegLocation rl_dest, int lit) {
403  if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
404    return false;
405  }
406  DividePattern pattern = magic_table[lit].pattern;
407  if (pattern == DivideNone) {
408    return false;
409  }
410  // Tuning: add rem patterns
411  if (!is_div) {
412    return false;
413  }
414
415  RegStorage r_magic = AllocTemp();
416  LoadConstant(r_magic, magic_table[lit].magic32);
417  rl_src = LoadValue(rl_src, kCoreReg);
418  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
419  RegStorage r_long_mul = AllocTemp();
420  NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(),
421          r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
422  switch (pattern) {
423    case Divide3:
424      OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
425      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
426      break;
427    case Divide5:
428      OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul),
429                  32 + magic_table[lit].shift);
430      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
431      break;
432    case Divide7:
433      OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg),
434                       As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32));
435      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
436      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
437      break;
438    default:
439      LOG(FATAL) << "Unexpected pattern: " << pattern;
440  }
441  StoreValue(rl_dest, rl_result);
442  return true;
443}
444
445bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
446                                        RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
447  if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
448    return false;
449  }
450  DividePattern pattern = magic_table[lit].pattern;
451  if (pattern == DivideNone) {
452    return false;
453  }
454  // Tuning: add rem patterns
455  if (!is_div) {
456    return false;
457  }
458
459  RegStorage r_magic = AllocTempWide();
460  rl_src = LoadValueWide(rl_src, kCoreReg);
461  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
462  RegStorage r_long_mul = AllocTempWide();
463
464  if (magic_table[lit].magic64_base >= 0) {
465    // Check that the entry in the table is correct.
466    if (kIsDebugBuild) {
467      uint64_t reconstructed_imm;
468      uint64_t base = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_base);
469      if (magic_table[lit].magic64_eor >= 0) {
470        uint64_t eor = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_eor);
471        reconstructed_imm = base ^ eor;
472      } else {
473        reconstructed_imm = base + 1;
474      }
475      DCHECK_EQ(reconstructed_imm, magic_table[lit].magic64) << " for literal " << lit;
476    }
477
478    // Load the magic constant in two instructions.
479    NewLIR3(WIDE(kA64Orr3Rrl), r_magic.GetReg(), rxzr, magic_table[lit].magic64_base);
480    if (magic_table[lit].magic64_eor >= 0) {
481      NewLIR3(WIDE(kA64Eor3Rrl), r_magic.GetReg(), r_magic.GetReg(),
482              magic_table[lit].magic64_eor);
483    } else {
484      NewLIR4(WIDE(kA64Add4RRdT), r_magic.GetReg(), r_magic.GetReg(), 1, 0);
485    }
486  } else {
487    LoadConstantWide(r_magic, magic_table[lit].magic64);
488  }
489
490  NewLIR3(kA64Smulh3xxx, r_long_mul.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
491  switch (pattern) {
492    case Divide3:
493      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
494      break;
495    case Divide5:
496      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
497      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
498      break;
499    case Divide7:
500      OpRegRegReg(kOpAdd, r_long_mul, rl_src.reg, r_long_mul);
501      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
502      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
503      break;
504    default:
505      LOG(FATAL) << "Unexpected pattern: " << pattern;
506  }
507  StoreValueWide(rl_dest, rl_result);
508  return true;
509}
510
511// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
512// and store the result in 'rl_dest'.
513bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
514                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
515  return HandleEasyDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int>(lit));
516}
517
518// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
519// and store the result in 'rl_dest'.
520bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
521                                      RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
522  const bool is_64bit = rl_dest.wide;
523  const int nbits = (is_64bit) ? 64 : 32;
524
525  if (lit < 2) {
526    return false;
527  }
528  if (!IsPowerOfTwo(lit)) {
529    if (is_64bit) {
530      return SmallLiteralDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, lit);
531    } else {
532      return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int32_t>(lit));
533    }
534  }
535  int k = LowestSetBit(lit);
536  if (k >= nbits - 2) {
537    // Avoid special cases.
538    return false;
539  }
540
541  RegLocation rl_result;
542  RegStorage t_reg;
543  if (is_64bit) {
544    rl_src = LoadValueWide(rl_src, kCoreReg);
545    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
546    t_reg = AllocTempWide();
547  } else {
548    rl_src = LoadValue(rl_src, kCoreReg);
549    rl_result = EvalLoc(rl_dest, kCoreReg, true);
550    t_reg = AllocTemp();
551  }
552
553  int shift = EncodeShift(kA64Lsr, nbits - k);
554  if (is_div) {
555    if (lit == 2) {
556      // Division by 2 is by far the most common division by constant.
557      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
558      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
559    } else {
560      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
561      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, shift);
562      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
563    }
564  } else {
565    if (lit == 2) {
566      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
567      OpRegRegImm64(kOpAnd, t_reg, t_reg, lit - 1);
568      OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, shift);
569    } else {
570      RegStorage t_reg2 = (is_64bit) ? AllocTempWide() : AllocTemp();
571      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
572      OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, shift);
573      OpRegRegImm64(kOpAnd, t_reg2, t_reg2, lit - 1);
574      OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, shift);
575    }
576  }
577
578  if (is_64bit) {
579    StoreValueWide(rl_dest, rl_result);
580  } else {
581    StoreValue(rl_dest, rl_result);
582  }
583  return true;
584}
585
586bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
587  LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
588  return false;
589}
590
591RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
592  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
593  return rl_dest;
594}
595
596RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
597  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
598
599  // Put the literal in a temp.
600  RegStorage lit_temp = AllocTemp();
601  LoadConstant(lit_temp, lit);
602  // Use the generic case for div/rem with arg2 in a register.
603  // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
604  rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
605  FreeTemp(lit_temp);
606
607  return rl_result;
608}
609
610RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
611                                    RegLocation rl_src2, bool is_div, bool check_zero) {
612  LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
613  return rl_dest;
614}
615
616RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
617                                    bool is_div) {
618  CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
619
620  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
621  if (is_div) {
622    OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
623  } else {
624    // temp = r_src1 / r_src2
625    // dest = r_src1 - temp * r_src2
626    RegStorage temp;
627    ArmOpcode wide;
628    if (rl_result.reg.Is64Bit()) {
629      temp = AllocTempWide();
630      wide = WIDE(0);
631    } else {
632      temp = AllocTemp();
633      wide = UNWIDE(0);
634    }
635    OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
636    NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
637            r_src1.GetReg(), r_src2.GetReg());
638    FreeTemp(temp);
639  }
640  return rl_result;
641}
642
643bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
644  RegLocation rl_src = info->args[0];
645  rl_src = LoadValueWide(rl_src, kCoreReg);
646  RegLocation rl_dest = InlineTargetWide(info);
647  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
648  RegStorage sign_reg = AllocTempWide();
649  // abs(x) = y<=x>>63, (x+y)^y.
650  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
651  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
652  OpRegReg(kOpXor, rl_result.reg, sign_reg);
653  StoreValueWide(rl_dest, rl_result);
654  return true;
655}
656
657bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
658  DCHECK_EQ(cu_->instruction_set, kArm64);
659  RegLocation rl_src1 = info->args[0];
660  RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1];
661  rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg);
662  rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg);
663  RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info);
664  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
665  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
666  NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(),
667          rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt);
668  (is_long) ?  StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result);
669  return true;
670}
671
672bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
673  RegLocation rl_src_address = info->args[0];  // long address
674  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
675  RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
676  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
677
678  LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
679  if (size == k64) {
680    StoreValueWide(rl_dest, rl_result);
681  } else {
682    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
683    StoreValue(rl_dest, rl_result);
684  }
685  return true;
686}
687
688bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
689  RegLocation rl_src_address = info->args[0];  // long address
690  RegLocation rl_src_value = info->args[2];  // [size] value
691  RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
692
693  RegLocation rl_value;
694  if (size == k64) {
695    rl_value = LoadValueWide(rl_src_value, kCoreReg);
696  } else {
697    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
698    rl_value = LoadValue(rl_src_value, kCoreReg);
699  }
700  StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
701  return true;
702}
703
704bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
705  DCHECK_EQ(cu_->instruction_set, kArm64);
706  // Unused - RegLocation rl_src_unsafe = info->args[0];
707  RegLocation rl_src_obj = info->args[1];  // Object - known non-null
708  RegLocation rl_src_offset = info->args[2];  // long low
709  RegLocation rl_src_expected = info->args[4];  // int, long or Object
710  // If is_long, high half is in info->args[5]
711  RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
712  // If is_long, high half is in info->args[7]
713  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
714
715  // Load Object and offset
716  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
717  RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
718
719  RegLocation rl_new_value;
720  RegLocation rl_expected;
721  if (is_long) {
722    rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
723    rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
724  } else {
725    rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
726    rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
727  }
728
729  if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
730    // Mark card for object assuming new value is stored.
731    MarkGCCard(rl_new_value.reg, rl_object.reg);
732  }
733
734  RegStorage r_ptr = AllocTempRef();
735  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
736
737  // Free now unneeded rl_object and rl_offset to give more temps.
738  ClobberSReg(rl_object.s_reg_low);
739  FreeTemp(rl_object.reg);
740  ClobberSReg(rl_offset.s_reg_low);
741  FreeTemp(rl_offset.reg);
742
743  // do {
744  //   tmp = [r_ptr] - expected;
745  // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
746  // result = tmp != 0;
747
748  RegStorage r_tmp;
749  RegStorage r_tmp_stored;
750  RegStorage rl_new_value_stored = rl_new_value.reg;
751  ArmOpcode wide = UNWIDE(0);
752  if (is_long) {
753    r_tmp_stored = r_tmp = AllocTempWide();
754    wide = WIDE(0);
755  } else if (is_object) {
756    // References use 64-bit registers, but are stored as compressed 32-bit values.
757    // This means r_tmp_stored != r_tmp.
758    r_tmp = AllocTempRef();
759    r_tmp_stored = As32BitReg(r_tmp);
760    rl_new_value_stored = As32BitReg(rl_new_value_stored);
761  } else {
762    r_tmp_stored = r_tmp = AllocTemp();
763  }
764
765  RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp);
766  LIR* loop = NewLIR0(kPseudoTargetLabel);
767  NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
768  OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
769  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
770  LIR* early_exit = OpCondBranch(kCondNe, NULL);
771  NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
772  NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
773  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
774  OpCondBranch(kCondNe, loop);
775
776  LIR* exit_loop = NewLIR0(kPseudoTargetLabel);
777  early_exit->target = exit_loop;
778
779  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
780  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
781
782  FreeTemp(r_tmp);  // Now unneeded.
783  FreeTemp(r_ptr);  // Now unneeded.
784
785  StoreValue(rl_dest, rl_result);
786
787  return true;
788}
789
790LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
791  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
792  return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
793}
794
795LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
796  LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
797  return NULL;
798}
799
800LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
801  LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
802  return NULL;
803}
804
805void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
806                                               RegLocation rl_result, int lit,
807                                               int first_bit, int second_bit) {
808  OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
809  if (first_bit != 0) {
810    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
811  }
812}
813
814void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
815  LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
816}
817
818// Test suspend flag, return target of taken suspend branch
819LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
820  NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1);
821  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
822}
823
824// Decrement register and branch on condition
825LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
826  // Combine sub & test using sub setflags encoding here.  We need to make sure a
827  // subtract form that sets carry is used, so generate explicitly.
828  // TODO: might be best to add a new op, kOpSubs, and handle it generically.
829  ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
830  NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1);  // For value == 1, this should set flags.
831  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
832  return OpCondBranch(c_code, target);
833}
834
835bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
836#if ANDROID_SMP != 0
837  // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
838  LIR* barrier = last_lir_insn_;
839
840  int dmb_flavor;
841  // TODO: revisit Arm barrier kinds
842  switch (barrier_kind) {
843    case kAnyStore: dmb_flavor = kISH; break;
844    case kLoadAny: dmb_flavor = kISH; break;
845        // We conjecture that kISHLD is insufficient.  It is documented
846        // to provide LoadLoad | StoreStore ordering.  But if this were used
847        // to implement volatile loads, we suspect that the lack of store
848        // atomicity on ARM would cause us to allow incorrect results for
849        // the canonical IRIW example.  But we're not sure.
850        // We should be using acquire loads instead.
851    case kStoreStore: dmb_flavor = kISHST; break;
852    case kAnyAny: dmb_flavor = kISH; break;
853    default:
854      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
855      dmb_flavor = kSY;  // quiet gcc.
856      break;
857  }
858
859  bool ret = false;
860
861  // If the same barrier already exists, don't generate another.
862  if (barrier == nullptr
863      || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
864    barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
865    ret = true;
866  }
867
868  // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
869  DCHECK(!barrier->flags.use_def_invalid);
870  barrier->u.m.def_mask = &kEncodeAll;
871  return ret;
872#else
873  return false;
874#endif
875}
876
877void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
878  RegLocation rl_result;
879
880  rl_src = LoadValue(rl_src, kCoreReg);
881  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
882  NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31);
883  StoreValueWide(rl_dest, rl_result);
884}
885
886void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
887                                 RegLocation rl_src1, RegLocation rl_src2, bool is_div) {
888  if (rl_src2.is_const) {
889    DCHECK(rl_src2.wide);
890    int64_t lit = mir_graph_->ConstantValueWide(rl_src2);
891    if (HandleEasyDivRem64(opcode, is_div, rl_src1, rl_dest, lit)) {
892      return;
893    }
894  }
895
896  RegLocation rl_result;
897  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
898  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
899  GenDivZeroCheck(rl_src2.reg);
900  rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
901  StoreValueWide(rl_dest, rl_result);
902}
903
904void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
905                             RegLocation rl_src2) {
906  RegLocation rl_result;
907
908  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
909  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
910  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
911  OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
912  StoreValueWide(rl_dest, rl_result);
913}
914
915void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
916  RegLocation rl_result;
917
918  rl_src = LoadValueWide(rl_src, kCoreReg);
919  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
920  OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
921  StoreValueWide(rl_dest, rl_result);
922}
923
924void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
925  RegLocation rl_result;
926
927  rl_src = LoadValueWide(rl_src, kCoreReg);
928  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
929  OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
930  StoreValueWide(rl_dest, rl_result);
931}
932
933void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
934                              RegLocation rl_src1, RegLocation rl_src2) {
935  GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
936}
937
938void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
939                              RegLocation rl_src2) {
940  GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
941}
942
943void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
944                            RegLocation rl_src2) {
945  GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
946}
947
948void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
949                            RegLocation rl_src2) {
950  GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
951}
952
953void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
954                           RegLocation rl_src2) {
955  GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
956}
957
958void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
959                            RegLocation rl_src2) {
960  GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
961}
962
963/*
964 * Generate array load
965 */
966void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
967                             RegLocation rl_index, RegLocation rl_dest, int scale) {
968  RegisterClass reg_class = RegClassBySize(size);
969  int len_offset = mirror::Array::LengthOffset().Int32Value();
970  int data_offset;
971  RegLocation rl_result;
972  bool constant_index = rl_index.is_const;
973  rl_array = LoadValue(rl_array, kRefReg);
974  if (!constant_index) {
975    rl_index = LoadValue(rl_index, kCoreReg);
976  }
977
978  if (rl_dest.wide) {
979    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
980  } else {
981    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
982  }
983
984  // If index is constant, just fold it into the data offset
985  if (constant_index) {
986    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
987  }
988
989  /* null object? */
990  GenNullCheck(rl_array.reg, opt_flags);
991
992  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
993  RegStorage reg_len;
994  if (needs_range_check) {
995    reg_len = AllocTemp();
996    /* Get len */
997    Load32Disp(rl_array.reg, len_offset, reg_len);
998    MarkPossibleNullPointerException(opt_flags);
999  } else {
1000    ForceImplicitNullCheck(rl_array.reg, opt_flags);
1001  }
1002  if (rl_dest.wide || rl_dest.fp || constant_index) {
1003    RegStorage reg_ptr;
1004    if (constant_index) {
1005      reg_ptr = rl_array.reg;  // NOTE: must not alter reg_ptr in constant case.
1006    } else {
1007      // No special indexed operation, lea + load w/ displacement
1008      reg_ptr = AllocTempRef();
1009      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
1010                       EncodeShift(kA64Lsl, scale));
1011      FreeTemp(rl_index.reg);
1012    }
1013    rl_result = EvalLoc(rl_dest, reg_class, true);
1014
1015    if (needs_range_check) {
1016      if (constant_index) {
1017        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1018      } else {
1019        GenArrayBoundsCheck(rl_index.reg, reg_len);
1020      }
1021      FreeTemp(reg_len);
1022    }
1023    if (rl_result.ref) {
1024      LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
1025    } else {
1026      LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
1027    }
1028    MarkPossibleNullPointerException(opt_flags);
1029    if (!constant_index) {
1030      FreeTemp(reg_ptr);
1031    }
1032    if (rl_dest.wide) {
1033      StoreValueWide(rl_dest, rl_result);
1034    } else {
1035      StoreValue(rl_dest, rl_result);
1036    }
1037  } else {
1038    // Offset base, then use indexed load
1039    RegStorage reg_ptr = AllocTempRef();
1040    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1041    FreeTemp(rl_array.reg);
1042    rl_result = EvalLoc(rl_dest, reg_class, true);
1043
1044    if (needs_range_check) {
1045      GenArrayBoundsCheck(rl_index.reg, reg_len);
1046      FreeTemp(reg_len);
1047    }
1048    if (rl_result.ref) {
1049      LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale);
1050    } else {
1051      LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
1052    }
1053    MarkPossibleNullPointerException(opt_flags);
1054    FreeTemp(reg_ptr);
1055    StoreValue(rl_dest, rl_result);
1056  }
1057}
1058
1059/*
1060 * Generate array store
1061 *
1062 */
1063void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1064                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
1065  RegisterClass reg_class = RegClassBySize(size);
1066  int len_offset = mirror::Array::LengthOffset().Int32Value();
1067  bool constant_index = rl_index.is_const;
1068
1069  int data_offset;
1070  if (size == k64 || size == kDouble) {
1071    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1072  } else {
1073    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1074  }
1075
1076  // If index is constant, just fold it into the data offset.
1077  if (constant_index) {
1078    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1079  }
1080
1081  rl_array = LoadValue(rl_array, kRefReg);
1082  if (!constant_index) {
1083    rl_index = LoadValue(rl_index, kCoreReg);
1084  }
1085
1086  RegStorage reg_ptr;
1087  bool allocated_reg_ptr_temp = false;
1088  if (constant_index) {
1089    reg_ptr = rl_array.reg;
1090  } else if (IsTemp(rl_array.reg) && !card_mark) {
1091    Clobber(rl_array.reg);
1092    reg_ptr = rl_array.reg;
1093  } else {
1094    allocated_reg_ptr_temp = true;
1095    reg_ptr = AllocTempRef();
1096  }
1097
1098  /* null object? */
1099  GenNullCheck(rl_array.reg, opt_flags);
1100
1101  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1102  RegStorage reg_len;
1103  if (needs_range_check) {
1104    reg_len = AllocTemp();
1105    // NOTE: max live temps(4) here.
1106    /* Get len */
1107    Load32Disp(rl_array.reg, len_offset, reg_len);
1108    MarkPossibleNullPointerException(opt_flags);
1109  } else {
1110    ForceImplicitNullCheck(rl_array.reg, opt_flags);
1111  }
1112  /* at this point, reg_ptr points to array, 2 live temps */
1113  if (rl_src.wide || rl_src.fp || constant_index) {
1114    if (rl_src.wide) {
1115      rl_src = LoadValueWide(rl_src, reg_class);
1116    } else {
1117      rl_src = LoadValue(rl_src, reg_class);
1118    }
1119    if (!constant_index) {
1120      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
1121                       EncodeShift(kA64Lsl, scale));
1122    }
1123    if (needs_range_check) {
1124      if (constant_index) {
1125        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1126      } else {
1127        GenArrayBoundsCheck(rl_index.reg, reg_len);
1128      }
1129      FreeTemp(reg_len);
1130    }
1131    if (rl_src.ref) {
1132      StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
1133    } else {
1134      StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
1135    }
1136    MarkPossibleNullPointerException(opt_flags);
1137  } else {
1138    /* reg_ptr -> array data */
1139    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1140    rl_src = LoadValue(rl_src, reg_class);
1141    if (needs_range_check) {
1142      GenArrayBoundsCheck(rl_index.reg, reg_len);
1143      FreeTemp(reg_len);
1144    }
1145    if (rl_src.ref) {
1146      StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale);
1147    } else {
1148      StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
1149    }
1150    MarkPossibleNullPointerException(opt_flags);
1151  }
1152  if (allocated_reg_ptr_temp) {
1153    FreeTemp(reg_ptr);
1154  }
1155  if (card_mark) {
1156    MarkGCCard(rl_src.reg, rl_array.reg);
1157  }
1158}
1159
1160void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
1161                                     RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
1162  OpKind op = kOpBkpt;
1163  // Per spec, we only care about low 6 bits of shift amount.
1164  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
1165  rl_src = LoadValueWide(rl_src, kCoreReg);
1166  if (shift_amount == 0) {
1167    StoreValueWide(rl_dest, rl_src);
1168    return;
1169  }
1170
1171  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1172  switch (opcode) {
1173    case Instruction::SHL_LONG:
1174    case Instruction::SHL_LONG_2ADDR:
1175      op = kOpLsl;
1176      break;
1177    case Instruction::SHR_LONG:
1178    case Instruction::SHR_LONG_2ADDR:
1179      op = kOpAsr;
1180      break;
1181    case Instruction::USHR_LONG:
1182    case Instruction::USHR_LONG_2ADDR:
1183      op = kOpLsr;
1184      break;
1185    default:
1186      LOG(FATAL) << "Unexpected case";
1187  }
1188  OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
1189  StoreValueWide(rl_dest, rl_result);
1190}
1191
1192void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
1193                                     RegLocation rl_src1, RegLocation rl_src2) {
1194  if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
1195    if (!rl_src2.is_const) {
1196      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1197    }
1198  } else {
1199    // Associativity.
1200    if (!rl_src2.is_const) {
1201      DCHECK(rl_src1.is_const);
1202      std::swap(rl_src1, rl_src2);
1203    }
1204  }
1205  DCHECK(rl_src2.is_const);
1206
1207  OpKind op = kOpBkpt;
1208  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1209
1210  switch (opcode) {
1211    case Instruction::ADD_LONG:
1212    case Instruction::ADD_LONG_2ADDR:
1213      op = kOpAdd;
1214      break;
1215    case Instruction::SUB_LONG:
1216    case Instruction::SUB_LONG_2ADDR:
1217      op = kOpSub;
1218      break;
1219    case Instruction::AND_LONG:
1220    case Instruction::AND_LONG_2ADDR:
1221      op = kOpAnd;
1222      break;
1223    case Instruction::OR_LONG:
1224    case Instruction::OR_LONG_2ADDR:
1225      op = kOpOr;
1226      break;
1227    case Instruction::XOR_LONG:
1228    case Instruction::XOR_LONG_2ADDR:
1229      op = kOpXor;
1230      break;
1231    default:
1232      LOG(FATAL) << "Unexpected opcode";
1233  }
1234
1235  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1236  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1237  OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
1238  StoreValueWide(rl_dest, rl_result);
1239}
1240
1241/**
1242 * @brief Split a register list in pairs or registers.
1243 *
1244 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1245 * @code
1246 *   int reg1 = -1, reg2 = -1;
1247 *   while (reg_mask) {
1248 *     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1249 *     if (UNLIKELY(reg2 < 0)) {
1250 *       // Single register in reg1.
1251 *     } else {
1252 *       // Pair in reg1, reg2.
1253 *     }
1254 *   }
1255 * @endcode
1256 */
1257uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
1258  // Find first register.
1259  int first_bit_set = __builtin_ctz(reg_mask) + 1;
1260  int reg = *reg1 + first_bit_set;
1261  reg_mask >>= first_bit_set;
1262
1263  if (LIKELY(reg_mask)) {
1264    // Save the first register, find the second and use the pair opcode.
1265    int second_bit_set = __builtin_ctz(reg_mask) + 1;
1266    *reg2 = reg;
1267    reg_mask >>= second_bit_set;
1268    *reg1 = reg + second_bit_set;
1269    return reg_mask;
1270  }
1271
1272  // Use the single opcode, as we just have one register.
1273  *reg1 = reg;
1274  *reg2 = -1;
1275  return reg_mask;
1276}
1277
1278void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1279  int reg1 = -1, reg2 = -1;
1280  const int reg_log2_size = 3;
1281
1282  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1283     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1284    if (UNLIKELY(reg2 < 0)) {
1285      NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1286    } else {
1287      DCHECK_LE(offset, 63);
1288      NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1289              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1290    }
1291  }
1292}
1293
1294void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1295  int reg1 = -1, reg2 = -1;
1296  const int reg_log2_size = 3;
1297
1298  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1299    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1300    if (UNLIKELY(reg2 < 0)) {
1301      NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1302    } else {
1303      NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1304              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1305    }
1306  }
1307}
1308
1309void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1310  int reg1 = -1, reg2 = -1;
1311  const int reg_log2_size = 3;
1312
1313  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1314     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1315    if (UNLIKELY(reg2 < 0)) {
1316      NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1317    } else {
1318      NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1319              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1320    }
1321  }
1322}
1323
1324// TODO(Arm64): consider using ld1 and st1?
1325void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1326  int reg1 = -1, reg2 = -1;
1327  const int reg_log2_size = 3;
1328
1329  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1330    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1331    if (UNLIKELY(reg2 < 0)) {
1332      NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1333    } else {
1334      NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1335              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1336    }
1337  }
1338}
1339
1340bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1341  ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
1342  RegLocation rl_src_i = info->args[0];
1343  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1344  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1345  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1346  NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
1347  (size == k64) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
1348  return true;
1349}
1350
1351}  // namespace art
1352