int_arm64.cc revision 147eb41b53729ec8d5c188d1cac90964a51afb8a
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "dex/reg_storage_eq.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "mirror/array.h"
25
26namespace art {
27
28LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
29  OpRegReg(kOpCmp, src1, src2);
30  return OpCondBranch(cond, target);
31}
32
33LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
34  LOG(FATAL) << "Unexpected use of OpIT for Arm64";
35  return NULL;
36}
37
38void Arm64Mir2Lir::OpEndIT(LIR* it) {
39  LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
40}
41
42/*
43 * 64-bit 3way compare function.
44 *     cmp   xA, xB
45 *     csinc wC, wzr, wzr, eq  // wC = (xA == xB) ? 0 : 1
46 *     csneg wC, wC, wC, ge    // wC = (xA >= xB) ? wC : -wC
47 */
48void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
49                              RegLocation rl_src2) {
50  RegLocation rl_result;
51  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
52  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
53  rl_result = EvalLoc(rl_dest, kCoreReg, true);
54
55  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
56  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
57  NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
58          rl_result.reg.GetReg(), kArmCondGe);
59  StoreValue(rl_dest, rl_result);
60}
61
62void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
63                             RegLocation rl_src1, RegLocation rl_shift) {
64  OpKind op = kOpBkpt;
65  switch (opcode) {
66  case Instruction::SHL_LONG:
67  case Instruction::SHL_LONG_2ADDR:
68    op = kOpLsl;
69    break;
70  case Instruction::SHR_LONG:
71  case Instruction::SHR_LONG_2ADDR:
72    op = kOpAsr;
73    break;
74  case Instruction::USHR_LONG:
75  case Instruction::USHR_LONG_2ADDR:
76    op = kOpLsr;
77    break;
78  default:
79    LOG(FATAL) << "Unexpected case: " << opcode;
80  }
81  rl_shift = LoadValue(rl_shift, kCoreReg);
82  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
83  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
84  OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
85  StoreValueWide(rl_dest, rl_result);
86}
87
88void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
89  RegLocation rl_result;
90  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
91  RegLocation rl_dest = mir_graph_->GetDest(mir);
92  RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg;
93  RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
94
95  rl_src = LoadValue(rl_src, src_reg_class);
96  // rl_src may be aliased with rl_result/rl_dest, so do compare early.
97  OpRegImm(kOpCmp, rl_src.reg, 0);
98
99  ArmConditionCode code = ArmConditionEncoding(mir->meta.ccode);
100
101  // The kMirOpSelect has two variants, one for constants and one for moves.
102  bool is_wide = rl_dest.ref || rl_dest.wide;
103
104  if (mir->ssa_rep->num_uses == 1) {
105    uint32_t true_val = mir->dalvikInsn.vB;
106    uint32_t false_val = mir->dalvikInsn.vC;
107
108    int opcode;             // The opcode.
109    int left_op, right_op;  // The operands.
110    bool rl_result_evaled = false;
111
112    // Check some simple cases.
113    // TODO: Improve this.
114    int zero_reg = (is_wide ? rs_xzr : rs_wzr).GetReg();
115
116    if ((true_val == 0 && false_val == 1) || (true_val == 1 && false_val == 0)) {
117      // CSInc cheap based on wzr.
118      if (true_val == 1) {
119        // Negate.
120        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
121      }
122
123      left_op = right_op = zero_reg;
124      opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc;
125    } else if ((true_val == 0 && false_val == 0xFFFFFFFF) ||
126               (true_val == 0xFFFFFFFF && false_val == 0)) {
127      // CSneg cheap based on wzr.
128      if (true_val == 0xFFFFFFFF) {
129        // Negate.
130        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
131      }
132
133      left_op = right_op = zero_reg;
134      opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc;
135    } else if (true_val == 0 || false_val == 0) {
136      // Csel half cheap based on wzr.
137      rl_result = EvalLoc(rl_dest, result_reg_class, true);
138      rl_result_evaled = true;
139      if (false_val == 0) {
140        // Negate.
141        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
142      }
143      LoadConstantNoClobber(rl_result.reg, true_val == 0 ? false_val : true_val);
144      left_op = zero_reg;
145      right_op = rl_result.reg.GetReg();
146      opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
147    } else if (true_val == 1 || false_val == 1) {
148      // CSInc half cheap based on wzr.
149      rl_result = EvalLoc(rl_dest, result_reg_class, true);
150      rl_result_evaled = true;
151      if (true_val == 1) {
152        // Negate.
153        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
154      }
155      LoadConstantNoClobber(rl_result.reg, true_val == 1 ? false_val : true_val);
156      left_op = rl_result.reg.GetReg();
157      right_op = zero_reg;
158      opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc;
159    } else if (true_val == 0xFFFFFFFF || false_val == 0xFFFFFFFF) {
160      // CSneg half cheap based on wzr.
161      rl_result = EvalLoc(rl_dest, result_reg_class, true);
162      rl_result_evaled = true;
163      if (true_val == 0xFFFFFFFF) {
164        // Negate.
165        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
166      }
167      LoadConstantNoClobber(rl_result.reg, true_val == 0xFFFFFFFF ? false_val : true_val);
168      left_op = rl_result.reg.GetReg();
169      right_op = zero_reg;
170      opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc;
171    } else if ((true_val + 1 == false_val) || (false_val + 1 == true_val)) {
172      // Load a constant and use CSinc. Use rl_result.
173      if (false_val + 1 == true_val) {
174        // Negate.
175        code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
176        true_val = false_val;
177      }
178
179      rl_result = EvalLoc(rl_dest, result_reg_class, true);
180      rl_result_evaled = true;
181      LoadConstantNoClobber(rl_result.reg, true_val);
182      left_op = right_op = rl_result.reg.GetReg();
183      opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc;
184    } else {
185      // Csel. The rest. Use rl_result and a temp.
186      // TODO: To minimize the constants being loaded, check whether one can be inexpensively
187      //       loaded as n - 1 or ~n.
188      rl_result = EvalLoc(rl_dest, result_reg_class, true);
189      rl_result_evaled = true;
190      LoadConstantNoClobber(rl_result.reg, true_val);
191      RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
192      if (rl_dest.wide) {
193        if (t_reg2.Is32Bit()) {
194          t_reg2 = As64BitReg(t_reg2);
195        }
196      }
197      LoadConstantNoClobber(t_reg2, false_val);
198
199      // Use csel.
200      left_op = rl_result.reg.GetReg();
201      right_op = t_reg2.GetReg();
202      opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
203    }
204
205    if (!rl_result_evaled) {
206      rl_result = EvalLoc(rl_dest, result_reg_class, true);
207    }
208
209    NewLIR4(opcode, rl_result.reg.GetReg(), left_op, right_op, code);
210  } else {
211    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
212    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
213
214    rl_true = LoadValue(rl_true, result_reg_class);
215    rl_false = LoadValue(rl_false, result_reg_class);
216    rl_result = EvalLoc(rl_dest, result_reg_class, true);
217
218    int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
219    NewLIR4(opcode, rl_result.reg.GetReg(),
220            rl_true.reg.GetReg(), rl_false.reg.GetReg(), code);
221  }
222  StoreValue(rl_dest, rl_result);
223}
224
225void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
226  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
227  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
228  LIR* taken = &block_label_list_[bb->taken];
229  LIR* not_taken = &block_label_list_[bb->fall_through];
230  // Normalize such that if either operand is constant, src2 will be constant.
231  ConditionCode ccode = mir->meta.ccode;
232  if (rl_src1.is_const) {
233    std::swap(rl_src1, rl_src2);
234    ccode = FlipComparisonOrder(ccode);
235  }
236
237  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
238
239  if (rl_src2.is_const) {
240    // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.)
241
242    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
243    // Special handling using cbz & cbnz.
244    if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
245      OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
246      OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
247      return;
248    }
249
250    // Only handle Imm if src2 is not already in a register.
251    rl_src2 = UpdateLocWide(rl_src2);
252    if (rl_src2.location != kLocPhysReg) {
253      OpRegImm64(kOpCmp, rl_src1.reg, val);
254      OpCondBranch(ccode, taken);
255      OpCondBranch(NegateComparison(ccode), not_taken);
256      return;
257    }
258  }
259
260  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
261  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
262  OpCondBranch(ccode, taken);
263  OpCondBranch(NegateComparison(ccode), not_taken);
264}
265
266/*
267 * Generate a register comparison to an immediate and branch.  Caller
268 * is responsible for setting branch target field.
269 */
270LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
271                                  LIR* target) {
272  LIR* branch;
273  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
274  if (check_value == 0 && (arm_cond == kArmCondEq || arm_cond == kArmCondNe)) {
275    ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
276    ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
277    branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
278  } else {
279    OpRegImm(kOpCmp, reg, check_value);
280    branch = NewLIR2(kA64B2ct, arm_cond, 0);
281  }
282  branch->target = target;
283  return branch;
284}
285
286LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
287                                     RegStorage base_reg, int offset, int check_value,
288                                     LIR* target, LIR** compare) {
289  DCHECK(compare == nullptr);
290  // It is possible that temp register is 64-bit. (ArgReg or RefReg)
291  // Always compare 32-bit value no matter what temp_reg is.
292  if (temp_reg.Is64Bit()) {
293    temp_reg = As32BitReg(temp_reg);
294  }
295  Load32Disp(base_reg, offset, temp_reg);
296  LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
297  return branch;
298}
299
300LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
301  bool dest_is_fp = r_dest.IsFloat();
302  bool src_is_fp = r_src.IsFloat();
303  ArmOpcode opcode = kA64Brk1d;
304  LIR* res;
305
306  if (LIKELY(dest_is_fp == src_is_fp)) {
307    if (LIKELY(!dest_is_fp)) {
308      DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
309
310      // Core/core copy.
311      // Copies involving the sp register require a different instruction.
312      opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
313
314      // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
315      //   This currently works because the other arguments are set to 0 by default. We should
316      //   rather introduce an alias kA64Mov2RR.
317
318      // core/core copy. Do a x/x copy only if both registers are x.
319      if (r_dest.Is64Bit() && r_src.Is64Bit()) {
320        opcode = WIDE(opcode);
321      }
322    } else {
323      // Float/float copy.
324      bool dest_is_double = r_dest.IsDouble();
325      bool src_is_double = r_src.IsDouble();
326
327      // We do not do float/double or double/float casts here.
328      DCHECK_EQ(dest_is_double, src_is_double);
329
330      // Homogeneous float/float copy.
331      opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff;
332    }
333  } else {
334    // Inhomogeneous register copy.
335    if (dest_is_fp) {
336      if (r_dest.IsDouble()) {
337        opcode = kA64Fmov2Sx;
338      } else {
339        r_src = Check32BitReg(r_src);
340        opcode = kA64Fmov2sw;
341      }
342    } else {
343      if (r_src.IsDouble()) {
344        opcode = kA64Fmov2xS;
345      } else {
346        r_dest = Check32BitReg(r_dest);
347        opcode = kA64Fmov2ws;
348      }
349    }
350  }
351
352  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
353
354  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
355    res->flags.is_nop = true;
356  }
357
358  return res;
359}
360
361void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
362  if (r_dest != r_src) {
363    LIR* res = OpRegCopyNoInsert(r_dest, r_src);
364    AppendLIR(res);
365  }
366}
367
368void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
369  OpRegCopy(r_dest, r_src);
370}
371
372// Table of magic divisors
373struct MagicTable {
374  int magic64_base;
375  int magic64_eor;
376  uint64_t magic64;
377  uint32_t magic32;
378  uint32_t shift;
379  DividePattern pattern;
380};
381
382static const MagicTable magic_table[] = {
383  {   0,      0,                  0,          0, 0, DivideNone},  // 0
384  {   0,      0,                  0,          0, 0, DivideNone},  // 1
385  {   0,      0,                  0,          0, 0, DivideNone},  // 2
386  {0x3c,     -1, 0x5555555555555556, 0x55555556, 0, Divide3},     // 3
387  {   0,      0,                  0,          0, 0, DivideNone},  // 4
388  {0xf9,     -1, 0x6666666666666667, 0x66666667, 1, Divide5},     // 5
389  {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 0, Divide3},     // 6
390  {  -1,     -1, 0x924924924924924A, 0x92492493, 2, Divide7},     // 7
391  {   0,      0,                  0,          0, 0, DivideNone},  // 8
392  {  -1,     -1, 0x38E38E38E38E38E4, 0x38E38E39, 1, Divide5},     // 9
393  {0xf9,     -1, 0x6666666666666667, 0x66666667, 2, Divide5},     // 10
394  {  -1,     -1, 0x2E8BA2E8BA2E8BA3, 0x2E8BA2E9, 1, Divide5},     // 11
395  {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 1, Divide5},     // 12
396  {  -1,     -1, 0x4EC4EC4EC4EC4EC5, 0x4EC4EC4F, 2, Divide5},     // 13
397  {  -1,     -1, 0x924924924924924A, 0x92492493, 3, Divide7},     // 14
398  {0x78,     -1, 0x8888888888888889, 0x88888889, 3, Divide7},     // 15
399};
400
401// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
402bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
403                                      RegLocation rl_src, RegLocation rl_dest, int lit) {
404  if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
405    return false;
406  }
407  DividePattern pattern = magic_table[lit].pattern;
408  if (pattern == DivideNone) {
409    return false;
410  }
411  // Tuning: add rem patterns
412  if (!is_div) {
413    return false;
414  }
415
416  RegStorage r_magic = AllocTemp();
417  LoadConstant(r_magic, magic_table[lit].magic32);
418  rl_src = LoadValue(rl_src, kCoreReg);
419  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
420  RegStorage r_long_mul = AllocTemp();
421  NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(),
422          r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
423  switch (pattern) {
424    case Divide3:
425      OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
426      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
427      break;
428    case Divide5:
429      OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul),
430                  32 + magic_table[lit].shift);
431      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
432      break;
433    case Divide7:
434      OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg),
435                       As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32));
436      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
437      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
438      break;
439    default:
440      LOG(FATAL) << "Unexpected pattern: " << pattern;
441  }
442  StoreValue(rl_dest, rl_result);
443  return true;
444}
445
446bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
447                                        RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
448  if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
449    return false;
450  }
451  DividePattern pattern = magic_table[lit].pattern;
452  if (pattern == DivideNone) {
453    return false;
454  }
455  // Tuning: add rem patterns
456  if (!is_div) {
457    return false;
458  }
459
460  RegStorage r_magic = AllocTempWide();
461  rl_src = LoadValueWide(rl_src, kCoreReg);
462  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
463  RegStorage r_long_mul = AllocTempWide();
464
465  if (magic_table[lit].magic64_base >= 0) {
466    // Check that the entry in the table is correct.
467    if (kIsDebugBuild) {
468      uint64_t reconstructed_imm;
469      uint64_t base = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_base);
470      if (magic_table[lit].magic64_eor >= 0) {
471        uint64_t eor = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_eor);
472        reconstructed_imm = base ^ eor;
473      } else {
474        reconstructed_imm = base + 1;
475      }
476      DCHECK_EQ(reconstructed_imm, magic_table[lit].magic64) << " for literal " << lit;
477    }
478
479    // Load the magic constant in two instructions.
480    NewLIR3(WIDE(kA64Orr3Rrl), r_magic.GetReg(), rxzr, magic_table[lit].magic64_base);
481    if (magic_table[lit].magic64_eor >= 0) {
482      NewLIR3(WIDE(kA64Eor3Rrl), r_magic.GetReg(), r_magic.GetReg(),
483              magic_table[lit].magic64_eor);
484    } else {
485      NewLIR4(WIDE(kA64Add4RRdT), r_magic.GetReg(), r_magic.GetReg(), 1, 0);
486    }
487  } else {
488    LoadConstantWide(r_magic, magic_table[lit].magic64);
489  }
490
491  NewLIR3(kA64Smulh3xxx, r_long_mul.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
492  switch (pattern) {
493    case Divide3:
494      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
495      break;
496    case Divide5:
497      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
498      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
499      break;
500    case Divide7:
501      OpRegRegReg(kOpAdd, r_long_mul, rl_src.reg, r_long_mul);
502      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
503      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
504      break;
505    default:
506      LOG(FATAL) << "Unexpected pattern: " << pattern;
507  }
508  StoreValueWide(rl_dest, rl_result);
509  return true;
510}
511
512// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
513// and store the result in 'rl_dest'.
514bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
515                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
516  return HandleEasyDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int>(lit));
517}
518
519// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
520// and store the result in 'rl_dest'.
521bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
522                                      RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
523  const bool is_64bit = rl_dest.wide;
524  const int nbits = (is_64bit) ? 64 : 32;
525
526  if (lit < 2) {
527    return false;
528  }
529  if (!IsPowerOfTwo(lit)) {
530    if (is_64bit) {
531      return SmallLiteralDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, lit);
532    } else {
533      return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int32_t>(lit));
534    }
535  }
536  int k = LowestSetBit(lit);
537  if (k >= nbits - 2) {
538    // Avoid special cases.
539    return false;
540  }
541
542  RegLocation rl_result;
543  RegStorage t_reg;
544  if (is_64bit) {
545    rl_src = LoadValueWide(rl_src, kCoreReg);
546    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
547    t_reg = AllocTempWide();
548  } else {
549    rl_src = LoadValue(rl_src, kCoreReg);
550    rl_result = EvalLoc(rl_dest, kCoreReg, true);
551    t_reg = AllocTemp();
552  }
553
554  int shift = EncodeShift(kA64Lsr, nbits - k);
555  if (is_div) {
556    if (lit == 2) {
557      // Division by 2 is by far the most common division by constant.
558      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
559      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
560    } else {
561      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
562      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, shift);
563      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
564    }
565  } else {
566    if (lit == 2) {
567      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
568      OpRegRegImm64(kOpAnd, t_reg, t_reg, lit - 1);
569      OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, shift);
570    } else {
571      RegStorage t_reg2 = (is_64bit) ? AllocTempWide() : AllocTemp();
572      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
573      OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, shift);
574      OpRegRegImm64(kOpAnd, t_reg2, t_reg2, lit - 1);
575      OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, shift);
576    }
577  }
578
579  if (is_64bit) {
580    StoreValueWide(rl_dest, rl_result);
581  } else {
582    StoreValue(rl_dest, rl_result);
583  }
584  return true;
585}
586
587bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
588  LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
589  return false;
590}
591
592RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
593  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
594  return rl_dest;
595}
596
597RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
598  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
599
600  // Put the literal in a temp.
601  RegStorage lit_temp = AllocTemp();
602  LoadConstant(lit_temp, lit);
603  // Use the generic case for div/rem with arg2 in a register.
604  // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
605  rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
606  FreeTemp(lit_temp);
607
608  return rl_result;
609}
610
611RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
612                                    RegLocation rl_src2, bool is_div, bool check_zero) {
613  LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
614  return rl_dest;
615}
616
617RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
618                                    bool is_div) {
619  CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
620
621  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
622  if (is_div) {
623    OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
624  } else {
625    // temp = r_src1 / r_src2
626    // dest = r_src1 - temp * r_src2
627    RegStorage temp;
628    ArmOpcode wide;
629    if (rl_result.reg.Is64Bit()) {
630      temp = AllocTempWide();
631      wide = WIDE(0);
632    } else {
633      temp = AllocTemp();
634      wide = UNWIDE(0);
635    }
636    OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
637    NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
638            r_src1.GetReg(), r_src2.GetReg());
639    FreeTemp(temp);
640  }
641  return rl_result;
642}
643
644bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
645  RegLocation rl_src = info->args[0];
646  rl_src = LoadValueWide(rl_src, kCoreReg);
647  RegLocation rl_dest = InlineTargetWide(info);
648  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
649  RegStorage sign_reg = AllocTempWide();
650  // abs(x) = y<=x>>63, (x+y)^y.
651  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
652  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
653  OpRegReg(kOpXor, rl_result.reg, sign_reg);
654  StoreValueWide(rl_dest, rl_result);
655  return true;
656}
657
658bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
659  DCHECK_EQ(cu_->instruction_set, kArm64);
660  RegLocation rl_src1 = info->args[0];
661  RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1];
662  rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg);
663  rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg);
664  RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info);
665  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
666  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
667  NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(),
668          rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt);
669  (is_long) ?  StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result);
670  return true;
671}
672
673bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
674  RegLocation rl_src_address = info->args[0];  // long address
675  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
676  RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
677  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
678
679  LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
680  if (size == k64) {
681    StoreValueWide(rl_dest, rl_result);
682  } else {
683    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
684    StoreValue(rl_dest, rl_result);
685  }
686  return true;
687}
688
689bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
690  RegLocation rl_src_address = info->args[0];  // long address
691  RegLocation rl_src_value = info->args[2];  // [size] value
692  RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
693
694  RegLocation rl_value;
695  if (size == k64) {
696    rl_value = LoadValueWide(rl_src_value, kCoreReg);
697  } else {
698    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
699    rl_value = LoadValue(rl_src_value, kCoreReg);
700  }
701  StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
702  return true;
703}
704
705void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
706  LOG(FATAL) << "Unexpected use of OpLea for Arm64";
707}
708
709void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
710  UNIMPLEMENTED(FATAL) << "Should not be used.";
711}
712
713void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
714  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64";
715}
716
717bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
718  DCHECK_EQ(cu_->instruction_set, kArm64);
719  // Unused - RegLocation rl_src_unsafe = info->args[0];
720  RegLocation rl_src_obj = info->args[1];  // Object - known non-null
721  RegLocation rl_src_offset = info->args[2];  // long low
722  RegLocation rl_src_expected = info->args[4];  // int, long or Object
723  // If is_long, high half is in info->args[5]
724  RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
725  // If is_long, high half is in info->args[7]
726  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
727
728  // Load Object and offset
729  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
730  RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
731
732  RegLocation rl_new_value;
733  RegLocation rl_expected;
734  if (is_long) {
735    rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
736    rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
737  } else {
738    rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
739    rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
740  }
741
742  if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
743    // Mark card for object assuming new value is stored.
744    MarkGCCard(rl_new_value.reg, rl_object.reg);
745  }
746
747  RegStorage r_ptr = AllocTempRef();
748  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
749
750  // Free now unneeded rl_object and rl_offset to give more temps.
751  ClobberSReg(rl_object.s_reg_low);
752  FreeTemp(rl_object.reg);
753  ClobberSReg(rl_offset.s_reg_low);
754  FreeTemp(rl_offset.reg);
755
756  // do {
757  //   tmp = [r_ptr] - expected;
758  // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
759  // result = tmp != 0;
760
761  RegStorage r_tmp;
762  RegStorage r_tmp_stored;
763  RegStorage rl_new_value_stored = rl_new_value.reg;
764  ArmOpcode wide = UNWIDE(0);
765  if (is_long) {
766    r_tmp_stored = r_tmp = AllocTempWide();
767    wide = WIDE(0);
768  } else if (is_object) {
769    // References use 64-bit registers, but are stored as compressed 32-bit values.
770    // This means r_tmp_stored != r_tmp.
771    r_tmp = AllocTempRef();
772    r_tmp_stored = As32BitReg(r_tmp);
773    rl_new_value_stored = As32BitReg(rl_new_value_stored);
774  } else {
775    r_tmp_stored = r_tmp = AllocTemp();
776  }
777
778  RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp);
779  LIR* loop = NewLIR0(kPseudoTargetLabel);
780  NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
781  OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
782  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
783  LIR* early_exit = OpCondBranch(kCondNe, NULL);
784  NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
785  NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
786  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
787  OpCondBranch(kCondNe, loop);
788
789  LIR* exit_loop = NewLIR0(kPseudoTargetLabel);
790  early_exit->target = exit_loop;
791
792  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
793  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
794
795  FreeTemp(r_tmp);  // Now unneeded.
796  FreeTemp(r_ptr);  // Now unneeded.
797
798  StoreValue(rl_dest, rl_result);
799
800  return true;
801}
802
803LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
804  return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
805}
806
807LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
808  LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
809  return NULL;
810}
811
812LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
813  LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
814  return NULL;
815}
816
817void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
818                                               RegLocation rl_result, int lit,
819                                               int first_bit, int second_bit) {
820  OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
821  if (first_bit != 0) {
822    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
823  }
824}
825
826void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
827  LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
828}
829
830// Test suspend flag, return target of taken suspend branch
831LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
832  NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1);
833  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
834}
835
836// Decrement register and branch on condition
837LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
838  // Combine sub & test using sub setflags encoding here.  We need to make sure a
839  // subtract form that sets carry is used, so generate explicitly.
840  // TODO: might be best to add a new op, kOpSubs, and handle it generically.
841  ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
842  NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1);  // For value == 1, this should set flags.
843  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
844  return OpCondBranch(c_code, target);
845}
846
847bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
848#if ANDROID_SMP != 0
849  // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
850  LIR* barrier = last_lir_insn_;
851
852  int dmb_flavor;
853  // TODO: revisit Arm barrier kinds
854  switch (barrier_kind) {
855    case kAnyStore: dmb_flavor = kISH; break;
856    case kLoadAny: dmb_flavor = kISH; break;
857        // We conjecture that kISHLD is insufficient.  It is documented
858        // to provide LoadLoad | StoreStore ordering.  But if this were used
859        // to implement volatile loads, we suspect that the lack of store
860        // atomicity on ARM would cause us to allow incorrect results for
861        // the canonical IRIW example.  But we're not sure.
862        // We should be using acquire loads instead.
863    case kStoreStore: dmb_flavor = kISHST; break;
864    case kAnyAny: dmb_flavor = kISH; break;
865    default:
866      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
867      dmb_flavor = kSY;  // quiet gcc.
868      break;
869  }
870
871  bool ret = false;
872
873  // If the same barrier already exists, don't generate another.
874  if (barrier == nullptr
875      || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
876    barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
877    ret = true;
878  }
879
880  // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
881  DCHECK(!barrier->flags.use_def_invalid);
882  barrier->u.m.def_mask = &kEncodeAll;
883  return ret;
884#else
885  return false;
886#endif
887}
888
889void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
890  RegLocation rl_result;
891
892  rl_src = LoadValue(rl_src, kCoreReg);
893  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
894  NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31);
895  StoreValueWide(rl_dest, rl_result);
896}
897
898void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
899                                 RegLocation rl_src1, RegLocation rl_src2, bool is_div) {
900  if (rl_src2.is_const) {
901    DCHECK(rl_src2.wide);
902    int64_t lit = mir_graph_->ConstantValueWide(rl_src2);
903    if (HandleEasyDivRem64(opcode, is_div, rl_src1, rl_dest, lit)) {
904      return;
905    }
906  }
907
908  RegLocation rl_result;
909  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
910  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
911  GenDivZeroCheck(rl_src2.reg);
912  rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
913  StoreValueWide(rl_dest, rl_result);
914}
915
916void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
917                             RegLocation rl_src2) {
918  RegLocation rl_result;
919
920  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
921  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
922  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
923  OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
924  StoreValueWide(rl_dest, rl_result);
925}
926
927void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
928  RegLocation rl_result;
929
930  rl_src = LoadValueWide(rl_src, kCoreReg);
931  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
932  OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
933  StoreValueWide(rl_dest, rl_result);
934}
935
936void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
937  RegLocation rl_result;
938
939  rl_src = LoadValueWide(rl_src, kCoreReg);
940  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
941  OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
942  StoreValueWide(rl_dest, rl_result);
943}
944
945void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
946                              RegLocation rl_src1, RegLocation rl_src2) {
947  GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
948}
949
950void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
951                              RegLocation rl_src2) {
952  GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
953}
954
955void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
956                            RegLocation rl_src2) {
957  GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
958}
959
960void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
961                            RegLocation rl_src2) {
962  GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
963}
964
965void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
966                           RegLocation rl_src2) {
967  GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
968}
969
970void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
971                            RegLocation rl_src2) {
972  GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
973}
974
975/*
976 * Generate array load
977 */
978void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
979                             RegLocation rl_index, RegLocation rl_dest, int scale) {
980  RegisterClass reg_class = RegClassBySize(size);
981  int len_offset = mirror::Array::LengthOffset().Int32Value();
982  int data_offset;
983  RegLocation rl_result;
984  bool constant_index = rl_index.is_const;
985  rl_array = LoadValue(rl_array, kRefReg);
986  if (!constant_index) {
987    rl_index = LoadValue(rl_index, kCoreReg);
988  }
989
990  if (rl_dest.wide) {
991    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
992  } else {
993    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
994  }
995
996  // If index is constant, just fold it into the data offset
997  if (constant_index) {
998    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
999  }
1000
1001  /* null object? */
1002  GenNullCheck(rl_array.reg, opt_flags);
1003
1004  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1005  RegStorage reg_len;
1006  if (needs_range_check) {
1007    reg_len = AllocTemp();
1008    /* Get len */
1009    Load32Disp(rl_array.reg, len_offset, reg_len);
1010    MarkPossibleNullPointerException(opt_flags);
1011  } else {
1012    ForceImplicitNullCheck(rl_array.reg, opt_flags);
1013  }
1014  if (rl_dest.wide || rl_dest.fp || constant_index) {
1015    RegStorage reg_ptr;
1016    if (constant_index) {
1017      reg_ptr = rl_array.reg;  // NOTE: must not alter reg_ptr in constant case.
1018    } else {
1019      // No special indexed operation, lea + load w/ displacement
1020      reg_ptr = AllocTempRef();
1021      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
1022                       EncodeShift(kA64Lsl, scale));
1023      FreeTemp(rl_index.reg);
1024    }
1025    rl_result = EvalLoc(rl_dest, reg_class, true);
1026
1027    if (needs_range_check) {
1028      if (constant_index) {
1029        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1030      } else {
1031        GenArrayBoundsCheck(rl_index.reg, reg_len);
1032      }
1033      FreeTemp(reg_len);
1034    }
1035    if (rl_result.ref) {
1036      LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
1037    } else {
1038      LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
1039    }
1040    MarkPossibleNullPointerException(opt_flags);
1041    if (!constant_index) {
1042      FreeTemp(reg_ptr);
1043    }
1044    if (rl_dest.wide) {
1045      StoreValueWide(rl_dest, rl_result);
1046    } else {
1047      StoreValue(rl_dest, rl_result);
1048    }
1049  } else {
1050    // Offset base, then use indexed load
1051    RegStorage reg_ptr = AllocTempRef();
1052    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1053    FreeTemp(rl_array.reg);
1054    rl_result = EvalLoc(rl_dest, reg_class, true);
1055
1056    if (needs_range_check) {
1057      GenArrayBoundsCheck(rl_index.reg, reg_len);
1058      FreeTemp(reg_len);
1059    }
1060    if (rl_result.ref) {
1061      LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale);
1062    } else {
1063      LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
1064    }
1065    MarkPossibleNullPointerException(opt_flags);
1066    FreeTemp(reg_ptr);
1067    StoreValue(rl_dest, rl_result);
1068  }
1069}
1070
1071/*
1072 * Generate array store
1073 *
1074 */
1075void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1076                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
1077  RegisterClass reg_class = RegClassBySize(size);
1078  int len_offset = mirror::Array::LengthOffset().Int32Value();
1079  bool constant_index = rl_index.is_const;
1080
1081  int data_offset;
1082  if (size == k64 || size == kDouble) {
1083    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1084  } else {
1085    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1086  }
1087
1088  // If index is constant, just fold it into the data offset.
1089  if (constant_index) {
1090    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1091  }
1092
1093  rl_array = LoadValue(rl_array, kRefReg);
1094  if (!constant_index) {
1095    rl_index = LoadValue(rl_index, kCoreReg);
1096  }
1097
1098  RegStorage reg_ptr;
1099  bool allocated_reg_ptr_temp = false;
1100  if (constant_index) {
1101    reg_ptr = rl_array.reg;
1102  } else if (IsTemp(rl_array.reg) && !card_mark) {
1103    Clobber(rl_array.reg);
1104    reg_ptr = rl_array.reg;
1105  } else {
1106    allocated_reg_ptr_temp = true;
1107    reg_ptr = AllocTempRef();
1108  }
1109
1110  /* null object? */
1111  GenNullCheck(rl_array.reg, opt_flags);
1112
1113  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1114  RegStorage reg_len;
1115  if (needs_range_check) {
1116    reg_len = AllocTemp();
1117    // NOTE: max live temps(4) here.
1118    /* Get len */
1119    Load32Disp(rl_array.reg, len_offset, reg_len);
1120    MarkPossibleNullPointerException(opt_flags);
1121  } else {
1122    ForceImplicitNullCheck(rl_array.reg, opt_flags);
1123  }
1124  /* at this point, reg_ptr points to array, 2 live temps */
1125  if (rl_src.wide || rl_src.fp || constant_index) {
1126    if (rl_src.wide) {
1127      rl_src = LoadValueWide(rl_src, reg_class);
1128    } else {
1129      rl_src = LoadValue(rl_src, reg_class);
1130    }
1131    if (!constant_index) {
1132      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
1133                       EncodeShift(kA64Lsl, scale));
1134    }
1135    if (needs_range_check) {
1136      if (constant_index) {
1137        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1138      } else {
1139        GenArrayBoundsCheck(rl_index.reg, reg_len);
1140      }
1141      FreeTemp(reg_len);
1142    }
1143    if (rl_src.ref) {
1144      StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
1145    } else {
1146      StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
1147    }
1148    MarkPossibleNullPointerException(opt_flags);
1149  } else {
1150    /* reg_ptr -> array data */
1151    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1152    rl_src = LoadValue(rl_src, reg_class);
1153    if (needs_range_check) {
1154      GenArrayBoundsCheck(rl_index.reg, reg_len);
1155      FreeTemp(reg_len);
1156    }
1157    if (rl_src.ref) {
1158      StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale);
1159    } else {
1160      StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
1161    }
1162    MarkPossibleNullPointerException(opt_flags);
1163  }
1164  if (allocated_reg_ptr_temp) {
1165    FreeTemp(reg_ptr);
1166  }
1167  if (card_mark) {
1168    MarkGCCard(rl_src.reg, rl_array.reg);
1169  }
1170}
1171
1172void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
1173                                     RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
1174  OpKind op = kOpBkpt;
1175  // Per spec, we only care about low 6 bits of shift amount.
1176  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
1177  rl_src = LoadValueWide(rl_src, kCoreReg);
1178  if (shift_amount == 0) {
1179    StoreValueWide(rl_dest, rl_src);
1180    return;
1181  }
1182
1183  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1184  switch (opcode) {
1185    case Instruction::SHL_LONG:
1186    case Instruction::SHL_LONG_2ADDR:
1187      op = kOpLsl;
1188      break;
1189    case Instruction::SHR_LONG:
1190    case Instruction::SHR_LONG_2ADDR:
1191      op = kOpAsr;
1192      break;
1193    case Instruction::USHR_LONG:
1194    case Instruction::USHR_LONG_2ADDR:
1195      op = kOpLsr;
1196      break;
1197    default:
1198      LOG(FATAL) << "Unexpected case";
1199  }
1200  OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
1201  StoreValueWide(rl_dest, rl_result);
1202}
1203
1204void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
1205                                     RegLocation rl_src1, RegLocation rl_src2) {
1206  if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
1207    if (!rl_src2.is_const) {
1208      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1209    }
1210  } else {
1211    // Associativity.
1212    if (!rl_src2.is_const) {
1213      DCHECK(rl_src1.is_const);
1214      std::swap(rl_src1, rl_src2);
1215    }
1216  }
1217  DCHECK(rl_src2.is_const);
1218
1219  OpKind op = kOpBkpt;
1220  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1221
1222  switch (opcode) {
1223    case Instruction::ADD_LONG:
1224    case Instruction::ADD_LONG_2ADDR:
1225      op = kOpAdd;
1226      break;
1227    case Instruction::SUB_LONG:
1228    case Instruction::SUB_LONG_2ADDR:
1229      op = kOpSub;
1230      break;
1231    case Instruction::AND_LONG:
1232    case Instruction::AND_LONG_2ADDR:
1233      op = kOpAnd;
1234      break;
1235    case Instruction::OR_LONG:
1236    case Instruction::OR_LONG_2ADDR:
1237      op = kOpOr;
1238      break;
1239    case Instruction::XOR_LONG:
1240    case Instruction::XOR_LONG_2ADDR:
1241      op = kOpXor;
1242      break;
1243    default:
1244      LOG(FATAL) << "Unexpected opcode";
1245  }
1246
1247  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1248  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1249  OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
1250  StoreValueWide(rl_dest, rl_result);
1251}
1252
1253/**
1254 * @brief Split a register list in pairs or registers.
1255 *
1256 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1257 * @code
1258 *   int reg1 = -1, reg2 = -1;
1259 *   while (reg_mask) {
1260 *     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1261 *     if (UNLIKELY(reg2 < 0)) {
1262 *       // Single register in reg1.
1263 *     } else {
1264 *       // Pair in reg1, reg2.
1265 *     }
1266 *   }
1267 * @endcode
1268 */
1269uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
1270  // Find first register.
1271  int first_bit_set = __builtin_ctz(reg_mask) + 1;
1272  int reg = *reg1 + first_bit_set;
1273  reg_mask >>= first_bit_set;
1274
1275  if (LIKELY(reg_mask)) {
1276    // Save the first register, find the second and use the pair opcode.
1277    int second_bit_set = __builtin_ctz(reg_mask) + 1;
1278    *reg2 = reg;
1279    reg_mask >>= second_bit_set;
1280    *reg1 = reg + second_bit_set;
1281    return reg_mask;
1282  }
1283
1284  // Use the single opcode, as we just have one register.
1285  *reg1 = reg;
1286  *reg2 = -1;
1287  return reg_mask;
1288}
1289
1290void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1291  int reg1 = -1, reg2 = -1;
1292  const int reg_log2_size = 3;
1293
1294  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1295     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1296    if (UNLIKELY(reg2 < 0)) {
1297      NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1298    } else {
1299      DCHECK_LE(offset, 63);
1300      NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1301              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1302    }
1303  }
1304}
1305
1306void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1307  int reg1 = -1, reg2 = -1;
1308  const int reg_log2_size = 3;
1309
1310  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1311    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1312    if (UNLIKELY(reg2 < 0)) {
1313      NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1314    } else {
1315      NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1316              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1317    }
1318  }
1319}
1320
1321void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1322  int reg1 = -1, reg2 = -1;
1323  const int reg_log2_size = 3;
1324
1325  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1326     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1327    if (UNLIKELY(reg2 < 0)) {
1328      NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1329    } else {
1330      NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1331              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1332    }
1333  }
1334}
1335
1336// TODO(Arm64): consider using ld1 and st1?
1337void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1338  int reg1 = -1, reg2 = -1;
1339  const int reg_log2_size = 3;
1340
1341  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1342    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1343    if (UNLIKELY(reg2 < 0)) {
1344      NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1345    } else {
1346      NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1347              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1348    }
1349  }
1350}
1351
1352bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1353  ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
1354  RegLocation rl_src_i = info->args[0];
1355  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1356  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1357  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1358  NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
1359  (size == k64) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
1360  return true;
1361}
1362
1363}  // namespace art
1364