int_arm64.cc revision 39c8a99a3fdd9876980502ab12ed74a27e6be369
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "dex/reg_storage_eq.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "mirror/array.h"
25
26namespace art {
27
28LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
29  OpRegReg(kOpCmp, src1, src2);
30  return OpCondBranch(cond, target);
31}
32
33LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
34  LOG(FATAL) << "Unexpected use of OpIT for Arm64";
35  return NULL;
36}
37
38void Arm64Mir2Lir::OpEndIT(LIR* it) {
39  LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
40}
41
42/*
43 * 64-bit 3way compare function.
44 *     cmp   xA, xB
45 *     csinc wC, wzr, wzr, eq  // wC = (xA == xB) ? 0 : 1
46 *     csneg wC, wC, wC, ge    // wC = (xA >= xB) ? wC : -wC
47 */
48void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
49                              RegLocation rl_src2) {
50  RegLocation rl_result;
51  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
52  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
53  rl_result = EvalLoc(rl_dest, kCoreReg, true);
54
55  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
56  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
57  NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
58          rl_result.reg.GetReg(), kArmCondGe);
59  StoreValue(rl_dest, rl_result);
60}
61
62void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
63                             RegLocation rl_src1, RegLocation rl_shift) {
64  OpKind op = kOpBkpt;
65  switch (opcode) {
66  case Instruction::SHL_LONG:
67  case Instruction::SHL_LONG_2ADDR:
68    op = kOpLsl;
69    break;
70  case Instruction::SHR_LONG:
71  case Instruction::SHR_LONG_2ADDR:
72    op = kOpAsr;
73    break;
74  case Instruction::USHR_LONG:
75  case Instruction::USHR_LONG_2ADDR:
76    op = kOpLsr;
77    break;
78  default:
79    LOG(FATAL) << "Unexpected case: " << opcode;
80  }
81  rl_shift = LoadValue(rl_shift, kCoreReg);
82  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
83  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
84  OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
85  StoreValueWide(rl_dest, rl_result);
86}
87
88void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
89  RegLocation rl_result;
90  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
91  RegLocation rl_dest = mir_graph_->GetDest(mir);
92  RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg;
93  RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
94
95  rl_src = LoadValue(rl_src, src_reg_class);
96  // rl_src may be aliased with rl_result/rl_dest, so do compare early.
97  OpRegImm(kOpCmp, rl_src.reg, 0);
98
99  ArmConditionCode code = ArmConditionEncoding(mir->meta.ccode);
100
101  // The kMirOpSelect has two variants, one for constants and one for moves.
102  bool is_wide = rl_dest.ref || rl_dest.wide;
103
104  if (mir->ssa_rep->num_uses == 1) {
105    uint32_t true_val = mir->dalvikInsn.vB;
106    uint32_t false_val = mir->dalvikInsn.vC;
107
108    int opcode;             // The opcode.
109    int left_op, right_op;  // The operands.
110    bool rl_result_evaled = false;
111
112    // Check some simple cases.
113    // TODO: Improve this.
114    int zero_reg = (is_wide ? rs_xzr : rs_wzr).GetReg();
115
116    if ((true_val == 0 && false_val == 1) || (true_val == 1 && false_val == 0)) {
117      // CSInc cheap based on wzr.
118      if (true_val == 1) {
119        // Negate.
120        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
121      }
122
123      left_op = right_op = zero_reg;
124      opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc;
125    } else if ((true_val == 0 && false_val == 0xFFFFFFFF) ||
126               (true_val == 0xFFFFFFFF && false_val == 0)) {
127      // CSneg cheap based on wzr.
128      if (true_val == 0xFFFFFFFF) {
129        // Negate.
130        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
131      }
132
133      left_op = right_op = zero_reg;
134      opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc;
135    } else if (true_val == 0 || false_val == 0) {
136      // Csel half cheap based on wzr.
137      rl_result = EvalLoc(rl_dest, result_reg_class, true);
138      rl_result_evaled = true;
139      if (false_val == 0) {
140        // Negate.
141        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
142      }
143      LoadConstantNoClobber(rl_result.reg, true_val == 0 ? false_val : true_val);
144      left_op = zero_reg;
145      right_op = rl_result.reg.GetReg();
146      opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
147    } else if (true_val == 1 || false_val == 1) {
148      // CSInc half cheap based on wzr.
149      rl_result = EvalLoc(rl_dest, result_reg_class, true);
150      rl_result_evaled = true;
151      if (true_val == 1) {
152        // Negate.
153        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
154      }
155      LoadConstantNoClobber(rl_result.reg, true_val == 1 ? false_val : true_val);
156      left_op = rl_result.reg.GetReg();
157      right_op = zero_reg;
158      opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc;
159    } else if (true_val == 0xFFFFFFFF || false_val == 0xFFFFFFFF) {
160      // CSneg half cheap based on wzr.
161      rl_result = EvalLoc(rl_dest, result_reg_class, true);
162      rl_result_evaled = true;
163      if (true_val == 0xFFFFFFFF) {
164        // Negate.
165        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
166      }
167      LoadConstantNoClobber(rl_result.reg, true_val == 0xFFFFFFFF ? false_val : true_val);
168      left_op = rl_result.reg.GetReg();
169      right_op = zero_reg;
170      opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc;
171    } else if ((true_val + 1 == false_val) || (false_val + 1 == true_val)) {
172      // Load a constant and use CSinc. Use rl_result.
173      if (false_val + 1 == true_val) {
174        // Negate.
175        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
176        true_val = false_val;
177      }
178
179      rl_result = EvalLoc(rl_dest, result_reg_class, true);
180      rl_result_evaled = true;
181      LoadConstantNoClobber(rl_result.reg, true_val);
182      left_op = right_op = rl_result.reg.GetReg();
183      opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc;
184    } else {
185      // Csel. The rest. Use rl_result and a temp.
186      // TODO: To minimize the constants being loaded, check whether one can be inexpensively
187      //       loaded as n - 1 or ~n.
188      rl_result = EvalLoc(rl_dest, result_reg_class, true);
189      rl_result_evaled = true;
190      LoadConstantNoClobber(rl_result.reg, true_val);
191      RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
192      if (rl_dest.wide) {
193        if (t_reg2.Is32Bit()) {
194          t_reg2 = As64BitReg(t_reg2);
195        }
196      }
197      LoadConstantNoClobber(t_reg2, false_val);
198
199      // Use csel.
200      left_op = rl_result.reg.GetReg();
201      right_op = t_reg2.GetReg();
202      opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
203    }
204
205    if (!rl_result_evaled) {
206      rl_result = EvalLoc(rl_dest, result_reg_class, true);
207    }
208
209    NewLIR4(opcode, rl_result.reg.GetReg(), left_op, right_op, code);
210  } else {
211    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
212    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
213
214    rl_true = LoadValue(rl_true, result_reg_class);
215    rl_false = LoadValue(rl_false, result_reg_class);
216    rl_result = EvalLoc(rl_dest, result_reg_class, true);
217
218    int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
219    NewLIR4(opcode, rl_result.reg.GetReg(),
220            rl_true.reg.GetReg(), rl_false.reg.GetReg(), code);
221  }
222  StoreValue(rl_dest, rl_result);
223}
224
225void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
226  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
227  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
228  LIR* taken = &block_label_list_[bb->taken];
229  LIR* not_taken = &block_label_list_[bb->fall_through];
230  // Normalize such that if either operand is constant, src2 will be constant.
231  ConditionCode ccode = mir->meta.ccode;
232  if (rl_src1.is_const) {
233    std::swap(rl_src1, rl_src2);
234    ccode = FlipComparisonOrder(ccode);
235  }
236
237  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
238
239  if (rl_src2.is_const) {
240    // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.)
241
242    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
243    // Special handling using cbz & cbnz.
244    if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
245      OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
246      OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
247      return;
248    }
249
250    // Only handle Imm if src2 is not already in a register.
251    rl_src2 = UpdateLocWide(rl_src2);
252    if (rl_src2.location != kLocPhysReg) {
253      OpRegImm64(kOpCmp, rl_src1.reg, val);
254      OpCondBranch(ccode, taken);
255      OpCondBranch(NegateComparison(ccode), not_taken);
256      return;
257    }
258  }
259
260  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
261  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
262  OpCondBranch(ccode, taken);
263  OpCondBranch(NegateComparison(ccode), not_taken);
264}
265
266/*
267 * Generate a register comparison to an immediate and branch.  Caller
268 * is responsible for setting branch target field.
269 */
270LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
271                                  LIR* target) {
272  LIR* branch;
273  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
274  if (check_value == 0 && (arm_cond == kArmCondEq || arm_cond == kArmCondNe)) {
275    ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
276    ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
277    branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
278  } else {
279    OpRegImm(kOpCmp, reg, check_value);
280    branch = NewLIR2(kA64B2ct, arm_cond, 0);
281  }
282  branch->target = target;
283  return branch;
284}
285
286LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
287                                     RegStorage base_reg, int offset, int check_value,
288                                     LIR* target) {
289  // It is possible that temp register is 64-bit. (ArgReg or RefReg)
290  // Always compare 32-bit value no matter what temp_reg is.
291  if (temp_reg.Is64Bit()) {
292    temp_reg = As32BitReg(temp_reg);
293  }
294  Load32Disp(base_reg, offset, temp_reg);
295  LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
296  return branch;
297}
298
299LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
300  bool dest_is_fp = r_dest.IsFloat();
301  bool src_is_fp = r_src.IsFloat();
302  ArmOpcode opcode = kA64Brk1d;
303  LIR* res;
304
305  if (LIKELY(dest_is_fp == src_is_fp)) {
306    if (LIKELY(!dest_is_fp)) {
307      DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
308
309      // Core/core copy.
310      // Copies involving the sp register require a different instruction.
311      opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
312
313      // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
314      //   This currently works because the other arguments are set to 0 by default. We should
315      //   rather introduce an alias kA64Mov2RR.
316
317      // core/core copy. Do a x/x copy only if both registers are x.
318      if (r_dest.Is64Bit() && r_src.Is64Bit()) {
319        opcode = WIDE(opcode);
320      }
321    } else {
322      // Float/float copy.
323      bool dest_is_double = r_dest.IsDouble();
324      bool src_is_double = r_src.IsDouble();
325
326      // We do not do float/double or double/float casts here.
327      DCHECK_EQ(dest_is_double, src_is_double);
328
329      // Homogeneous float/float copy.
330      opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff;
331    }
332  } else {
333    // Inhomogeneous register copy.
334    if (dest_is_fp) {
335      if (r_dest.IsDouble()) {
336        opcode = kA64Fmov2Sx;
337      } else {
338        r_src = Check32BitReg(r_src);
339        opcode = kA64Fmov2sw;
340      }
341    } else {
342      if (r_src.IsDouble()) {
343        opcode = kA64Fmov2xS;
344      } else {
345        r_dest = Check32BitReg(r_dest);
346        opcode = kA64Fmov2ws;
347      }
348    }
349  }
350
351  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
352
353  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
354    res->flags.is_nop = true;
355  }
356
357  return res;
358}
359
360void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
361  if (r_dest != r_src) {
362    LIR* res = OpRegCopyNoInsert(r_dest, r_src);
363    AppendLIR(res);
364  }
365}
366
367void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
368  OpRegCopy(r_dest, r_src);
369}
370
371// Table of magic divisors
372struct MagicTable {
373  uint32_t magic;
374  uint32_t shift;
375  DividePattern pattern;
376};
377
378static const MagicTable magic_table[] = {
379  {0, 0, DivideNone},        // 0
380  {0, 0, DivideNone},        // 1
381  {0, 0, DivideNone},        // 2
382  {0x55555556, 0, Divide3},  // 3
383  {0, 0, DivideNone},        // 4
384  {0x66666667, 1, Divide5},  // 5
385  {0x2AAAAAAB, 0, Divide3},  // 6
386  {0x92492493, 2, Divide7},  // 7
387  {0, 0, DivideNone},        // 8
388  {0x38E38E39, 1, Divide5},  // 9
389  {0x66666667, 2, Divide5},  // 10
390  {0x2E8BA2E9, 1, Divide5},  // 11
391  {0x2AAAAAAB, 1, Divide5},  // 12
392  {0x4EC4EC4F, 2, Divide5},  // 13
393  {0x92492493, 3, Divide7},  // 14
394  {0x88888889, 3, Divide7},  // 15
395};
396
397// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
398bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
399                                      RegLocation rl_src, RegLocation rl_dest, int lit) {
400  if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
401    return false;
402  }
403  DividePattern pattern = magic_table[lit].pattern;
404  if (pattern == DivideNone) {
405    return false;
406  }
407  // Tuning: add rem patterns
408  if (!is_div) {
409    return false;
410  }
411
412  RegStorage r_magic = AllocTemp();
413  LoadConstant(r_magic, magic_table[lit].magic);
414  rl_src = LoadValue(rl_src, kCoreReg);
415  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
416  RegStorage r_long_mul = AllocTemp();
417  NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(),
418          r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
419  switch (pattern) {
420    case Divide3:
421      OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
422      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
423      break;
424    case Divide5:
425      OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul),
426                  32 + magic_table[lit].shift);
427      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
428      break;
429    case Divide7:
430      OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg),
431                       As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32));
432      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
433      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
434      break;
435    default:
436      LOG(FATAL) << "Unexpected pattern: " << pattern;
437  }
438  StoreValue(rl_dest, rl_result);
439  return true;
440}
441
442// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
443// and store the result in 'rl_dest'.
444bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
445                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
446  if (lit < 2) {
447    return false;
448  }
449  if (!IsPowerOfTwo(lit)) {
450    return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
451  }
452  int k = LowestSetBit(lit);
453  if (k >= 30) {
454    // Avoid special cases.
455    return false;
456  }
457  rl_src = LoadValue(rl_src, kCoreReg);
458  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
459  if (is_div) {
460    RegStorage t_reg = AllocTemp();
461    if (lit == 2) {
462      // Division by 2 is by far the most common division by constant.
463      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsr, 32 - k));
464      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
465    } else {
466      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
467      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, EncodeShift(kA64Lsr, 32 - k));
468      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
469    }
470  } else {
471    RegStorage t_reg = AllocTemp();
472    if (lit == 2) {
473      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsr, 32 - k));
474      OpRegRegImm(kOpAnd, t_reg, t_reg, lit - 1);
475      OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, EncodeShift(kA64Lsr, 32 - k));
476    } else {
477      RegStorage t_reg2 = AllocTemp();
478      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
479      OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, EncodeShift(kA64Lsr, 32 - k));
480      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
481      OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, EncodeShift(kA64Lsr, 32 - k));
482    }
483  }
484  StoreValue(rl_dest, rl_result);
485  return true;
486}
487
488bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
489  LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
490  return false;
491}
492
493RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
494                                    RegLocation rl_src2, bool is_div, bool check_zero) {
495  LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
496  return rl_dest;
497}
498
499RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
500  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
501  return rl_dest;
502}
503
504RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
505  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
506
507  // Put the literal in a temp.
508  RegStorage lit_temp = AllocTemp();
509  LoadConstant(lit_temp, lit);
510  // Use the generic case for div/rem with arg2 in a register.
511  // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
512  rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
513  FreeTemp(lit_temp);
514
515  return rl_result;
516}
517
518RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
519                                  bool is_div) {
520  CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
521
522  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
523  if (is_div) {
524    OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
525  } else {
526    // temp = r_src1 / r_src2
527    // dest = r_src1 - temp * r_src2
528    RegStorage temp;
529    ArmOpcode wide;
530    if (rl_result.reg.Is64Bit()) {
531      temp = AllocTempWide();
532      wide = WIDE(0);
533    } else {
534      temp = AllocTemp();
535      wide = UNWIDE(0);
536    }
537    OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
538    NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
539            r_src1.GetReg(), r_src2.GetReg());
540    FreeTemp(temp);
541  }
542  return rl_result;
543}
544
545bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
546  RegLocation rl_src = info->args[0];
547  rl_src = LoadValueWide(rl_src, kCoreReg);
548  RegLocation rl_dest = InlineTargetWide(info);
549  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
550  RegStorage sign_reg = AllocTempWide();
551  // abs(x) = y<=x>>63, (x+y)^y.
552  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
553  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
554  OpRegReg(kOpXor, rl_result.reg, sign_reg);
555  StoreValueWide(rl_dest, rl_result);
556  return true;
557}
558
559bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
560  DCHECK_EQ(cu_->instruction_set, kArm64);
561  RegLocation rl_src1 = info->args[0];
562  RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1];
563  rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg);
564  rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg);
565  RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info);
566  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
567  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
568  NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(),
569          rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt);
570  (is_long) ?  StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result);
571  return true;
572}
573
574bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
575  RegLocation rl_src_address = info->args[0];  // long address
576  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
577  RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
578  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
579
580  LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
581  if (size == k64) {
582    StoreValueWide(rl_dest, rl_result);
583  } else {
584    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
585    StoreValue(rl_dest, rl_result);
586  }
587  return true;
588}
589
590bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
591  RegLocation rl_src_address = info->args[0];  // long address
592  RegLocation rl_src_value = info->args[2];  // [size] value
593  RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
594
595  RegLocation rl_value;
596  if (size == k64) {
597    rl_value = LoadValueWide(rl_src_value, kCoreReg);
598  } else {
599    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
600    rl_value = LoadValue(rl_src_value, kCoreReg);
601  }
602  StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
603  return true;
604}
605
606void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
607  LOG(FATAL) << "Unexpected use of OpLea for Arm64";
608}
609
610void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
611  UNIMPLEMENTED(FATAL) << "Should not be used.";
612}
613
614void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
615  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64";
616}
617
618bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
619  DCHECK_EQ(cu_->instruction_set, kArm64);
620  // Unused - RegLocation rl_src_unsafe = info->args[0];
621  RegLocation rl_src_obj = info->args[1];  // Object - known non-null
622  RegLocation rl_src_offset = info->args[2];  // long low
623  RegLocation rl_src_expected = info->args[4];  // int, long or Object
624  // If is_long, high half is in info->args[5]
625  RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
626  // If is_long, high half is in info->args[7]
627  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
628
629  // Load Object and offset
630  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
631  RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
632
633  RegLocation rl_new_value;
634  RegLocation rl_expected;
635  if (is_long) {
636    rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
637    rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
638  } else {
639    rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
640    rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
641  }
642
643  if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
644    // Mark card for object assuming new value is stored.
645    MarkGCCard(rl_new_value.reg, rl_object.reg);
646  }
647
648  RegStorage r_ptr = AllocTempRef();
649  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
650
651  // Free now unneeded rl_object and rl_offset to give more temps.
652  ClobberSReg(rl_object.s_reg_low);
653  FreeTemp(rl_object.reg);
654  ClobberSReg(rl_offset.s_reg_low);
655  FreeTemp(rl_offset.reg);
656
657  // do {
658  //   tmp = [r_ptr] - expected;
659  // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
660  // result = tmp != 0;
661
662  RegStorage r_tmp;
663  RegStorage r_tmp_stored;
664  RegStorage rl_new_value_stored = rl_new_value.reg;
665  ArmOpcode wide = UNWIDE(0);
666  if (is_long) {
667    r_tmp_stored = r_tmp = AllocTempWide();
668    wide = WIDE(0);
669  } else if (is_object) {
670    // References use 64-bit registers, but are stored as compressed 32-bit values.
671    // This means r_tmp_stored != r_tmp.
672    r_tmp = AllocTempRef();
673    r_tmp_stored = As32BitReg(r_tmp);
674    rl_new_value_stored = As32BitReg(rl_new_value_stored);
675  } else {
676    r_tmp_stored = r_tmp = AllocTemp();
677  }
678
679  RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp);
680  LIR* loop = NewLIR0(kPseudoTargetLabel);
681  NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
682  OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
683  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
684  LIR* early_exit = OpCondBranch(kCondNe, NULL);
685  NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
686  NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
687  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
688  OpCondBranch(kCondNe, loop);
689
690  LIR* exit_loop = NewLIR0(kPseudoTargetLabel);
691  early_exit->target = exit_loop;
692
693  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
694  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
695
696  FreeTemp(r_tmp);  // Now unneeded.
697  FreeTemp(r_ptr);  // Now unneeded.
698
699  StoreValue(rl_dest, rl_result);
700
701  return true;
702}
703
704LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
705  return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
706}
707
708LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
709  LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
710  return NULL;
711}
712
713LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
714  LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
715  return NULL;
716}
717
718void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
719                                               RegLocation rl_result, int lit,
720                                               int first_bit, int second_bit) {
721  OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
722  if (first_bit != 0) {
723    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
724  }
725}
726
727void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
728  LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
729}
730
731// Test suspend flag, return target of taken suspend branch
732LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
733  NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1);
734  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
735}
736
737// Decrement register and branch on condition
738LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
739  // Combine sub & test using sub setflags encoding here.  We need to make sure a
740  // subtract form that sets carry is used, so generate explicitly.
741  // TODO: might be best to add a new op, kOpSubs, and handle it generically.
742  ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
743  NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1);  // For value == 1, this should set flags.
744  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
745  return OpCondBranch(c_code, target);
746}
747
748bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
749#if ANDROID_SMP != 0
750  // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
751  LIR* barrier = last_lir_insn_;
752
753  int dmb_flavor;
754  // TODO: revisit Arm barrier kinds
755  switch (barrier_kind) {
756    case kAnyStore: dmb_flavor = kISH; break;
757    case kLoadAny: dmb_flavor = kISH; break;
758        // We conjecture that kISHLD is insufficient.  It is documented
759        // to provide LoadLoad | StoreStore ordering.  But if this were used
760        // to implement volatile loads, we suspect that the lack of store
761        // atomicity on ARM would cause us to allow incorrect results for
762        // the canonical IRIW example.  But we're not sure.
763        // We should be using acquire loads instead.
764    case kStoreStore: dmb_flavor = kISHST; break;
765    case kAnyAny: dmb_flavor = kISH; break;
766    default:
767      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
768      dmb_flavor = kSY;  // quiet gcc.
769      break;
770  }
771
772  bool ret = false;
773
774  // If the same barrier already exists, don't generate another.
775  if (barrier == nullptr
776      || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
777    barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
778    ret = true;
779  }
780
781  // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
782  DCHECK(!barrier->flags.use_def_invalid);
783  barrier->u.m.def_mask = &kEncodeAll;
784  return ret;
785#else
786  return false;
787#endif
788}
789
790void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
791  RegLocation rl_result;
792
793  rl_src = LoadValue(rl_src, kCoreReg);
794  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
795  NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31);
796  StoreValueWide(rl_dest, rl_result);
797}
798
799void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
800                                 RegLocation rl_src1, RegLocation rl_src2, bool is_div) {
801  RegLocation rl_result;
802  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
803  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
804  GenDivZeroCheck(rl_src2.reg);
805  rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
806  StoreValueWide(rl_dest, rl_result);
807}
808
809void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
810                             RegLocation rl_src2) {
811  RegLocation rl_result;
812
813  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
814  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
815  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
816  OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
817  StoreValueWide(rl_dest, rl_result);
818}
819
820void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
821  RegLocation rl_result;
822
823  rl_src = LoadValueWide(rl_src, kCoreReg);
824  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
825  OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
826  StoreValueWide(rl_dest, rl_result);
827}
828
829void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
830  RegLocation rl_result;
831
832  rl_src = LoadValueWide(rl_src, kCoreReg);
833  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
834  OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
835  StoreValueWide(rl_dest, rl_result);
836}
837
838void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
839                              RegLocation rl_src1, RegLocation rl_src2) {
840  GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
841}
842
843void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
844                              RegLocation rl_src2) {
845  GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
846}
847
848void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
849                            RegLocation rl_src2) {
850  GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
851}
852
853void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
854                            RegLocation rl_src2) {
855  GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
856}
857
858void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
859                           RegLocation rl_src2) {
860  GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
861}
862
863void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
864                            RegLocation rl_src2) {
865  GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
866}
867
868/*
869 * Generate array load
870 */
871void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
872                             RegLocation rl_index, RegLocation rl_dest, int scale) {
873  RegisterClass reg_class = RegClassBySize(size);
874  int len_offset = mirror::Array::LengthOffset().Int32Value();
875  int data_offset;
876  RegLocation rl_result;
877  bool constant_index = rl_index.is_const;
878  rl_array = LoadValue(rl_array, kRefReg);
879  if (!constant_index) {
880    rl_index = LoadValue(rl_index, kCoreReg);
881  }
882
883  if (rl_dest.wide) {
884    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
885  } else {
886    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
887  }
888
889  // If index is constant, just fold it into the data offset
890  if (constant_index) {
891    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
892  }
893
894  /* null object? */
895  GenNullCheck(rl_array.reg, opt_flags);
896
897  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
898  RegStorage reg_len;
899  if (needs_range_check) {
900    reg_len = AllocTemp();
901    /* Get len */
902    Load32Disp(rl_array.reg, len_offset, reg_len);
903    MarkPossibleNullPointerException(opt_flags);
904  } else {
905    ForceImplicitNullCheck(rl_array.reg, opt_flags);
906  }
907  if (rl_dest.wide || rl_dest.fp || constant_index) {
908    RegStorage reg_ptr;
909    if (constant_index) {
910      reg_ptr = rl_array.reg;  // NOTE: must not alter reg_ptr in constant case.
911    } else {
912      // No special indexed operation, lea + load w/ displacement
913      reg_ptr = AllocTempRef();
914      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
915                       EncodeShift(kA64Lsl, scale));
916      FreeTemp(rl_index.reg);
917    }
918    rl_result = EvalLoc(rl_dest, reg_class, true);
919
920    if (needs_range_check) {
921      if (constant_index) {
922        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
923      } else {
924        GenArrayBoundsCheck(rl_index.reg, reg_len);
925      }
926      FreeTemp(reg_len);
927    }
928    if (rl_result.ref) {
929      LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
930    } else {
931      LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
932    }
933    MarkPossibleNullPointerException(opt_flags);
934    if (!constant_index) {
935      FreeTemp(reg_ptr);
936    }
937    if (rl_dest.wide) {
938      StoreValueWide(rl_dest, rl_result);
939    } else {
940      StoreValue(rl_dest, rl_result);
941    }
942  } else {
943    // Offset base, then use indexed load
944    RegStorage reg_ptr = AllocTempRef();
945    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
946    FreeTemp(rl_array.reg);
947    rl_result = EvalLoc(rl_dest, reg_class, true);
948
949    if (needs_range_check) {
950      GenArrayBoundsCheck(rl_index.reg, reg_len);
951      FreeTemp(reg_len);
952    }
953    if (rl_result.ref) {
954      LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale);
955    } else {
956      LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
957    }
958    MarkPossibleNullPointerException(opt_flags);
959    FreeTemp(reg_ptr);
960    StoreValue(rl_dest, rl_result);
961  }
962}
963
964/*
965 * Generate array store
966 *
967 */
968void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
969                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
970  RegisterClass reg_class = RegClassBySize(size);
971  int len_offset = mirror::Array::LengthOffset().Int32Value();
972  bool constant_index = rl_index.is_const;
973
974  int data_offset;
975  if (size == k64 || size == kDouble) {
976    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
977  } else {
978    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
979  }
980
981  // If index is constant, just fold it into the data offset.
982  if (constant_index) {
983    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
984  }
985
986  rl_array = LoadValue(rl_array, kRefReg);
987  if (!constant_index) {
988    rl_index = LoadValue(rl_index, kCoreReg);
989  }
990
991  RegStorage reg_ptr;
992  bool allocated_reg_ptr_temp = false;
993  if (constant_index) {
994    reg_ptr = rl_array.reg;
995  } else if (IsTemp(rl_array.reg) && !card_mark) {
996    Clobber(rl_array.reg);
997    reg_ptr = rl_array.reg;
998  } else {
999    allocated_reg_ptr_temp = true;
1000    reg_ptr = AllocTempRef();
1001  }
1002
1003  /* null object? */
1004  GenNullCheck(rl_array.reg, opt_flags);
1005
1006  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1007  RegStorage reg_len;
1008  if (needs_range_check) {
1009    reg_len = AllocTemp();
1010    // NOTE: max live temps(4) here.
1011    /* Get len */
1012    Load32Disp(rl_array.reg, len_offset, reg_len);
1013    MarkPossibleNullPointerException(opt_flags);
1014  } else {
1015    ForceImplicitNullCheck(rl_array.reg, opt_flags);
1016  }
1017  /* at this point, reg_ptr points to array, 2 live temps */
1018  if (rl_src.wide || rl_src.fp || constant_index) {
1019    if (rl_src.wide) {
1020      rl_src = LoadValueWide(rl_src, reg_class);
1021    } else {
1022      rl_src = LoadValue(rl_src, reg_class);
1023    }
1024    if (!constant_index) {
1025      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
1026                       EncodeShift(kA64Lsl, scale));
1027    }
1028    if (needs_range_check) {
1029      if (constant_index) {
1030        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1031      } else {
1032        GenArrayBoundsCheck(rl_index.reg, reg_len);
1033      }
1034      FreeTemp(reg_len);
1035    }
1036    if (rl_src.ref) {
1037      StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
1038    } else {
1039      StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
1040    }
1041    MarkPossibleNullPointerException(opt_flags);
1042  } else {
1043    /* reg_ptr -> array data */
1044    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1045    rl_src = LoadValue(rl_src, reg_class);
1046    if (needs_range_check) {
1047      GenArrayBoundsCheck(rl_index.reg, reg_len);
1048      FreeTemp(reg_len);
1049    }
1050    if (rl_src.ref) {
1051      StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale);
1052    } else {
1053      StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
1054    }
1055    MarkPossibleNullPointerException(opt_flags);
1056  }
1057  if (allocated_reg_ptr_temp) {
1058    FreeTemp(reg_ptr);
1059  }
1060  if (card_mark) {
1061    MarkGCCard(rl_src.reg, rl_array.reg);
1062  }
1063}
1064
1065void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
1066                                   RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
1067  OpKind op = kOpBkpt;
1068  // Per spec, we only care about low 6 bits of shift amount.
1069  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
1070  rl_src = LoadValueWide(rl_src, kCoreReg);
1071  if (shift_amount == 0) {
1072    StoreValueWide(rl_dest, rl_src);
1073    return;
1074  }
1075
1076  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1077  switch (opcode) {
1078    case Instruction::SHL_LONG:
1079    case Instruction::SHL_LONG_2ADDR:
1080      op = kOpLsl;
1081      break;
1082    case Instruction::SHR_LONG:
1083    case Instruction::SHR_LONG_2ADDR:
1084      op = kOpAsr;
1085      break;
1086    case Instruction::USHR_LONG:
1087    case Instruction::USHR_LONG_2ADDR:
1088      op = kOpLsr;
1089      break;
1090    default:
1091      LOG(FATAL) << "Unexpected case";
1092  }
1093  OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
1094  StoreValueWide(rl_dest, rl_result);
1095}
1096
1097void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
1098                                     RegLocation rl_src1, RegLocation rl_src2) {
1099  if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
1100    if (!rl_src2.is_const) {
1101      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1102    }
1103  } else {
1104    // Associativity.
1105    if (!rl_src2.is_const) {
1106      DCHECK(rl_src1.is_const);
1107      std::swap(rl_src1, rl_src2);
1108    }
1109  }
1110  DCHECK(rl_src2.is_const);
1111
1112  OpKind op = kOpBkpt;
1113  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1114
1115  switch (opcode) {
1116    case Instruction::ADD_LONG:
1117    case Instruction::ADD_LONG_2ADDR:
1118      op = kOpAdd;
1119      break;
1120    case Instruction::SUB_LONG:
1121    case Instruction::SUB_LONG_2ADDR:
1122      op = kOpSub;
1123      break;
1124    case Instruction::AND_LONG:
1125    case Instruction::AND_LONG_2ADDR:
1126      op = kOpAnd;
1127      break;
1128    case Instruction::OR_LONG:
1129    case Instruction::OR_LONG_2ADDR:
1130      op = kOpOr;
1131      break;
1132    case Instruction::XOR_LONG:
1133    case Instruction::XOR_LONG_2ADDR:
1134      op = kOpXor;
1135      break;
1136    default:
1137      LOG(FATAL) << "Unexpected opcode";
1138  }
1139
1140  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1141  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1142  OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
1143  StoreValueWide(rl_dest, rl_result);
1144}
1145
1146/**
1147 * @brief Split a register list in pairs or registers.
1148 *
1149 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1150 * @code
1151 *   int reg1 = -1, reg2 = -1;
1152 *   while (reg_mask) {
1153 *     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1154 *     if (UNLIKELY(reg2 < 0)) {
1155 *       // Single register in reg1.
1156 *     } else {
1157 *       // Pair in reg1, reg2.
1158 *     }
1159 *   }
1160 * @endcode
1161 */
1162uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
1163  // Find first register.
1164  int first_bit_set = __builtin_ctz(reg_mask) + 1;
1165  int reg = *reg1 + first_bit_set;
1166  reg_mask >>= first_bit_set;
1167
1168  if (LIKELY(reg_mask)) {
1169    // Save the first register, find the second and use the pair opcode.
1170    int second_bit_set = __builtin_ctz(reg_mask) + 1;
1171    *reg2 = reg;
1172    reg_mask >>= second_bit_set;
1173    *reg1 = reg + second_bit_set;
1174    return reg_mask;
1175  }
1176
1177  // Use the single opcode, as we just have one register.
1178  *reg1 = reg;
1179  *reg2 = -1;
1180  return reg_mask;
1181}
1182
1183void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1184  int reg1 = -1, reg2 = -1;
1185  const int reg_log2_size = 3;
1186
1187  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1188     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1189    if (UNLIKELY(reg2 < 0)) {
1190      NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1191    } else {
1192      DCHECK_LE(offset, 63);
1193      NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1194              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1195    }
1196  }
1197}
1198
1199void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1200  int reg1 = -1, reg2 = -1;
1201  const int reg_log2_size = 3;
1202
1203  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1204    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1205    if (UNLIKELY(reg2 < 0)) {
1206      NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1207    } else {
1208      NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1209              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1210    }
1211  }
1212}
1213
1214void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1215  int reg1 = -1, reg2 = -1;
1216  const int reg_log2_size = 3;
1217
1218  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1219     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1220    if (UNLIKELY(reg2 < 0)) {
1221      NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1222    } else {
1223      NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1224              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1225    }
1226  }
1227}
1228
1229// TODO(Arm64): consider using ld1 and st1?
1230void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1231  int reg1 = -1, reg2 = -1;
1232  const int reg_log2_size = 3;
1233
1234  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1235    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1236    if (UNLIKELY(reg2 < 0)) {
1237      NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1238    } else {
1239      NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1240              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1241    }
1242  }
1243}
1244
1245bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1246  ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
1247  RegLocation rl_src_i = info->args[0];
1248  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1249  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1250  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1251  NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
1252  (size == k64) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
1253  return true;
1254}
1255
1256}  // namespace art
1257