int_arm64.cc revision 8dea81ca9c0201ceaa88086b927a5838a06a3e69
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "mirror/array.h"
24
25namespace art {
26
27LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
28  OpRegReg(kOpCmp, src1, src2);
29  return OpCondBranch(cond, target);
30}
31
32LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
33  LOG(FATAL) << "Unexpected use of OpIT for Arm64";
34  return NULL;
35}
36
37void Arm64Mir2Lir::OpEndIT(LIR* it) {
38  LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
39}
40
41/*
42 * 64-bit 3way compare function.
43 *     cmp   xA, xB
44 *     csinc wC, wzr, wzr, eq  // wC = (xA == xB) ? 0 : 1
45 *     csneg wC, wC, wC, ge    // wC = (xA >= xB) ? wC : -wC
46 */
47void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
48                              RegLocation rl_src2) {
49  RegLocation rl_result;
50  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
51  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
52  rl_result = EvalLoc(rl_dest, kCoreReg, true);
53
54  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
55  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
56  NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
57          rl_result.reg.GetReg(), kArmCondGe);
58  StoreValue(rl_dest, rl_result);
59}
60
61void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
62                             RegLocation rl_src1, RegLocation rl_shift) {
63  OpKind op = kOpBkpt;
64  switch (opcode) {
65  case Instruction::SHL_LONG:
66  case Instruction::SHL_LONG_2ADDR:
67    op = kOpLsl;
68    break;
69  case Instruction::SHR_LONG:
70  case Instruction::SHR_LONG_2ADDR:
71    op = kOpAsr;
72    break;
73  case Instruction::USHR_LONG:
74  case Instruction::USHR_LONG_2ADDR:
75    op = kOpLsr;
76    break;
77  default:
78    LOG(FATAL) << "Unexpected case: " << opcode;
79  }
80  rl_shift = LoadValueWide(rl_shift, kCoreReg);
81  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
82  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
83  OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_shift.reg);
84  StoreValueWide(rl_dest, rl_result);
85}
86
87void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
88  RegLocation rl_result;
89  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
90  RegLocation rl_dest = mir_graph_->GetDest(mir);
91  RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg;
92  RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
93  rl_src = LoadValue(rl_src, src_reg_class);
94  ArmConditionCode code = ArmConditionEncoding(mir->meta.ccode);
95
96  RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
97  RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
98  rl_true = LoadValue(rl_true, result_reg_class);
99  rl_false = LoadValue(rl_false, result_reg_class);
100  rl_result = EvalLoc(rl_dest, result_reg_class, true);
101  OpRegImm(kOpCmp, rl_src.reg, 0);
102  NewLIR4(kA64Csel4rrrc, rl_result.reg.GetReg(), rl_true.reg.GetReg(),
103          rl_false.reg.GetReg(), code);
104  StoreValue(rl_dest, rl_result);
105}
106
107void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
108  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
109  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
110  LIR* taken = &block_label_list_[bb->taken];
111  LIR* not_taken = &block_label_list_[bb->fall_through];
112  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
113  // Normalize such that if either operand is constant, src2 will be constant.
114  ConditionCode ccode = mir->meta.ccode;
115  if (rl_src1.is_const) {
116    std::swap(rl_src1, rl_src2);
117    ccode = FlipComparisonOrder(ccode);
118  }
119
120  if (rl_src2.is_const) {
121    rl_src2 = UpdateLocWide(rl_src2);
122    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
123    // Special handling using cbz & cbnz.
124    if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
125      OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
126      OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
127      return;
128    // Only handle Imm if src2 is not already in a register.
129    } else if (rl_src2.location != kLocPhysReg) {
130      OpRegImm64(kOpCmp, rl_src1.reg, val);
131      OpCondBranch(ccode, taken);
132      OpCondBranch(NegateComparison(ccode), not_taken);
133      return;
134    }
135  }
136
137  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
138  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
139  OpCondBranch(ccode, taken);
140  OpCondBranch(NegateComparison(ccode), not_taken);
141}
142
143/*
144 * Generate a register comparison to an immediate and branch.  Caller
145 * is responsible for setting branch target field.
146 */
147LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
148                                  LIR* target) {
149  LIR* branch;
150  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
151  if (check_value == 0 && (arm_cond == kArmCondEq || arm_cond == kArmCondNe)) {
152    ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
153    ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
154    branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
155  } else {
156    OpRegImm(kOpCmp, reg, check_value);
157    branch = NewLIR2(kA64B2ct, arm_cond, 0);
158  }
159  branch->target = target;
160  return branch;
161}
162
163LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
164  bool dest_is_fp = r_dest.IsFloat();
165  bool src_is_fp = r_src.IsFloat();
166  ArmOpcode opcode = kA64Brk1d;
167  LIR* res;
168
169  if (LIKELY(dest_is_fp == src_is_fp)) {
170    if (LIKELY(!dest_is_fp)) {
171      // Core/core copy.
172      // Copies involving the sp register require a different instruction.
173      opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
174
175      // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
176      //   This currently works because the other arguments are set to 0 by default. We should
177      //   rather introduce an alias kA64Mov2RR.
178
179      // core/core copy. Do a x/x copy only if both registers are x.
180      if (r_dest.Is64Bit() && r_src.Is64Bit()) {
181        opcode = WIDE(opcode);
182      }
183    } else {
184      // Float/float copy.
185      bool dest_is_double = r_dest.IsDouble();
186      bool src_is_double = r_src.IsDouble();
187
188      // We do not do float/double or double/float casts here.
189      DCHECK_EQ(dest_is_double, src_is_double);
190
191      // Homogeneous float/float copy.
192      opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff;
193    }
194  } else {
195    // Inhomogeneous register copy.
196    if (dest_is_fp) {
197      if (r_dest.IsDouble()) {
198        opcode = kA64Fmov2Sx;
199      } else {
200        DCHECK(r_src.IsSingle());
201        opcode = kA64Fmov2sw;
202      }
203    } else {
204      if (r_src.IsDouble()) {
205        opcode = kA64Fmov2xS;
206      } else {
207        DCHECK(r_dest.Is32Bit());
208        opcode = kA64Fmov2ws;
209      }
210    }
211  }
212
213  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
214
215  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
216    res->flags.is_nop = true;
217  }
218
219  return res;
220}
221
222void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
223  if (r_dest != r_src) {
224    LIR* res = OpRegCopyNoInsert(r_dest, r_src);
225    AppendLIR(res);
226  }
227}
228
229void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
230  OpRegCopy(r_dest, r_src);
231}
232
233// Table of magic divisors
234struct MagicTable {
235  uint32_t magic;
236  uint32_t shift;
237  DividePattern pattern;
238};
239
240static const MagicTable magic_table[] = {
241  {0, 0, DivideNone},        // 0
242  {0, 0, DivideNone},        // 1
243  {0, 0, DivideNone},        // 2
244  {0x55555556, 0, Divide3},  // 3
245  {0, 0, DivideNone},        // 4
246  {0x66666667, 1, Divide5},  // 5
247  {0x2AAAAAAB, 0, Divide3},  // 6
248  {0x92492493, 2, Divide7},  // 7
249  {0, 0, DivideNone},        // 8
250  {0x38E38E39, 1, Divide5},  // 9
251  {0x66666667, 2, Divide5},  // 10
252  {0x2E8BA2E9, 1, Divide5},  // 11
253  {0x2AAAAAAB, 1, Divide5},  // 12
254  {0x4EC4EC4F, 2, Divide5},  // 13
255  {0x92492493, 3, Divide7},  // 14
256  {0x88888889, 3, Divide7},  // 15
257};
258
259// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
260bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
261                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
262  // TODO(Arm64): fix this for Arm64. Note: may be worth revisiting the magic table.
263  //   It should be possible subtracting one from all its entries, and using smaddl
264  //   to counteract this. The advantage is that integers should then be easier to
265  //   encode as logical immediates (0x55555555 rather than 0x55555556).
266  UNIMPLEMENTED(FATAL);
267
268  if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
269    return false;
270  }
271  DividePattern pattern = magic_table[lit].pattern;
272  if (pattern == DivideNone) {
273    return false;
274  }
275  // Tuning: add rem patterns
276  if (!is_div) {
277    return false;
278  }
279
280  RegStorage r_magic = AllocTemp();
281  LoadConstant(r_magic, magic_table[lit].magic);
282  rl_src = LoadValue(rl_src, kCoreReg);
283  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
284  RegStorage r_hi = AllocTemp();
285  RegStorage r_lo = AllocTemp();
286  NewLIR4(kA64Smaddl4xwwx, r_lo.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
287  switch (pattern) {
288    case Divide3:
289      OpRegRegRegShift(kOpSub, rl_result.reg, r_hi, rl_src.reg, EncodeShift(kA64Asr, 31));
290      break;
291    case Divide5:
292      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
293      OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi, EncodeShift(kA64Asr, magic_table[lit].shift));
294      break;
295    case Divide7:
296      OpRegReg(kOpAdd, r_hi, rl_src.reg);
297      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
298      OpRegRegRegShift(kOpRsub, rl_result.reg, r_lo, r_hi, EncodeShift(kA64Asr, magic_table[lit].shift));
299      break;
300    default:
301      LOG(FATAL) << "Unexpected pattern: " << pattern;
302  }
303  StoreValue(rl_dest, rl_result);
304  return true;
305}
306
307bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
308  LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
309  return false;
310}
311
312RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
313                      RegLocation rl_src2, bool is_div, bool check_zero) {
314  LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
315  return rl_dest;
316}
317
318RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
319  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
320  return rl_dest;
321}
322
323RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
324  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
325
326  // Put the literal in a temp.
327  RegStorage lit_temp = AllocTemp();
328  LoadConstant(lit_temp, lit);
329  // Use the generic case for div/rem with arg2 in a register.
330  // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
331  rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
332  FreeTemp(lit_temp);
333
334  return rl_result;
335}
336
337RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
338                                  bool is_div) {
339  CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
340
341  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
342  if (is_div) {
343    OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
344  } else {
345    // temp = r_src1 / r_src2
346    // dest = r_src1 - temp * r_src2
347    RegStorage temp;
348    ArmOpcode wide;
349    if (rl_result.reg.Is64Bit()) {
350      temp = AllocTempWide();
351      wide = WIDE(0);
352    } else {
353      temp = AllocTemp();
354      wide = UNWIDE(0);
355    }
356    OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
357    NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
358            r_src1.GetReg(), r_src2.GetReg());
359    FreeTemp(temp);
360  }
361  return rl_result;
362}
363
364bool Arm64Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
365  // TODO(Arm64): implement this.
366  UNIMPLEMENTED(FATAL);
367
368  DCHECK_EQ(cu_->instruction_set, kThumb2);
369  RegLocation rl_src1 = info->args[0];
370  RegLocation rl_src2 = info->args[1];
371  rl_src1 = LoadValue(rl_src1, kCoreReg);
372  rl_src2 = LoadValue(rl_src2, kCoreReg);
373  RegLocation rl_dest = InlineTarget(info);
374  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
375  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
376  // OpIT((is_min) ? kCondGt : kCondLt, "E");
377  OpRegReg(kOpMov, rl_result.reg, rl_src2.reg);
378  OpRegReg(kOpMov, rl_result.reg, rl_src1.reg);
379  GenBarrier();
380  StoreValue(rl_dest, rl_result);
381  return true;
382}
383
384bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
385  // TODO(Arm64): implement this.
386  UNIMPLEMENTED(WARNING);
387
388  RegLocation rl_src_address = info->args[0];  // long address
389  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
390  RegLocation rl_dest = InlineTarget(info);
391  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
392  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
393  if (size == k64) {
394    // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
395    if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) {
396      LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
397      LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
398    } else {
399      LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
400      LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
401    }
402    StoreValueWide(rl_dest, rl_result);
403  } else {
404    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
405    // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
406    LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
407    StoreValue(rl_dest, rl_result);
408  }
409  return true;
410}
411
412bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
413  // TODO(Arm64): implement this.
414  UNIMPLEMENTED(WARNING);
415
416  RegLocation rl_src_address = info->args[0];  // long address
417  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
418  RegLocation rl_src_value = info->args[2];  // [size] value
419  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
420  if (size == k64) {
421    // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
422    RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
423    StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32);
424    StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32);
425  } else {
426    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
427    // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
428    RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
429    StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
430  }
431  return true;
432}
433
434void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
435  LOG(FATAL) << "Unexpected use of OpLea for Arm64";
436}
437
438void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
439  UNIMPLEMENTED(FATAL) << "Should not be used.";
440}
441
442void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
443  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64";
444}
445
446bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
447  // TODO(Arm64): implement this.
448  UNIMPLEMENTED(WARNING);
449
450  DCHECK_EQ(cu_->instruction_set, kThumb2);
451  // Unused - RegLocation rl_src_unsafe = info->args[0];
452  RegLocation rl_src_obj = info->args[1];  // Object - known non-null
453  RegLocation rl_src_offset = info->args[2];  // long low
454  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
455  RegLocation rl_src_expected = info->args[4];  // int, long or Object
456  // If is_long, high half is in info->args[5]
457  RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
458  // If is_long, high half is in info->args[7]
459  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
460
461  // We have only 5 temporary registers available and actually only 4 if the InlineTarget
462  // above locked one of the temps. For a straightforward CAS64 we need 7 registers:
463  // r_ptr (1), new_value (2), expected(2) and ldrexd result (2). If neither expected nor
464  // new_value is in a non-temp core register we shall reload them in the ldrex/strex loop
465  // into the same temps, reducing the number of required temps down to 5. We shall work
466  // around the potentially locked temp by using LR for r_ptr, unconditionally.
467  // TODO: Pass information about the need for more temps to the stack frame generation
468  // code so that we can rely on being able to allocate enough temps.
469  DCHECK(!GetRegInfo(rs_rA64_LR)->IsTemp());
470  MarkTemp(rs_rA64_LR);
471  FreeTemp(rs_rA64_LR);
472  LockTemp(rs_rA64_LR);
473  bool load_early = true;
474  if (is_long) {
475    RegStorage expected_reg = rl_src_expected.reg.IsPair() ? rl_src_expected.reg.GetLow() :
476        rl_src_expected.reg;
477    RegStorage new_val_reg = rl_src_new_value.reg.IsPair() ? rl_src_new_value.reg.GetLow() :
478        rl_src_new_value.reg;
479    bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !expected_reg.IsFloat();
480    bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !new_val_reg.IsFloat();
481    bool expected_is_good_reg = expected_is_core_reg && !IsTemp(expected_reg);
482    bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(new_val_reg);
483
484    if (!expected_is_good_reg && !new_value_is_good_reg) {
485      // None of expected/new_value is non-temp reg, need to load both late
486      load_early = false;
487      // Make sure they are not in the temp regs and the load will not be skipped.
488      if (expected_is_core_reg) {
489        FlushRegWide(rl_src_expected.reg);
490        ClobberSReg(rl_src_expected.s_reg_low);
491        ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
492        rl_src_expected.location = kLocDalvikFrame;
493      }
494      if (new_value_is_core_reg) {
495        FlushRegWide(rl_src_new_value.reg);
496        ClobberSReg(rl_src_new_value.s_reg_low);
497        ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
498        rl_src_new_value.location = kLocDalvikFrame;
499      }
500    }
501  }
502
503  // Release store semantics, get the barrier out of the way.  TODO: revisit
504  GenMemBarrier(kStoreLoad);
505
506  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
507  RegLocation rl_new_value;
508  if (!is_long) {
509    rl_new_value = LoadValue(rl_src_new_value);
510  } else if (load_early) {
511    rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
512  }
513
514  if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
515    // Mark card for object assuming new value is stored.
516    MarkGCCard(rl_new_value.reg, rl_object.reg);
517  }
518
519  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
520
521  RegStorage r_ptr = rs_rA64_LR;
522  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
523
524  // Free now unneeded rl_object and rl_offset to give more temps.
525  ClobberSReg(rl_object.s_reg_low);
526  FreeTemp(rl_object.reg);
527  ClobberSReg(rl_offset.s_reg_low);
528  FreeTemp(rl_offset.reg);
529
530  RegLocation rl_expected;
531  if (!is_long) {
532    rl_expected = LoadValue(rl_src_expected);
533  } else if (load_early) {
534    rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
535  } else {
536    // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
537    int low_reg = AllocTemp().GetReg();
538    int high_reg = AllocTemp().GetReg();
539    rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
540    rl_expected = rl_new_value;
541  }
542
543  // do {
544  //   tmp = [r_ptr] - expected;
545  // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
546  // result = tmp != 0;
547
548  RegStorage r_tmp = AllocTemp();
549  LIR* target = NewLIR0(kPseudoTargetLabel);
550
551  if (is_long) {
552    RegStorage r_tmp_high = AllocTemp();
553    if (!load_early) {
554      LoadValueDirectWide(rl_src_expected, rl_expected.reg);
555    }
556    NewLIR3(kA64Ldxr2rX, r_tmp.GetReg(), r_tmp_high.GetReg(), r_ptr.GetReg());
557    OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetLow());
558    OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHigh());
559    if (!load_early) {
560      LoadValueDirectWide(rl_src_new_value, rl_new_value.reg);
561    }
562
563    LIR* branch1 = OpCmpImmBranch(kCondNe, r_tmp, 0, NULL);
564    LIR* branch2 = OpCmpImmBranch(kCondNe, r_tmp_high, 0, NULL);
565    NewLIR4(WIDE(kA64Stxr3wrX) /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(),
566            rl_new_value.reg.GetHighReg(), r_ptr.GetReg());
567    LIR* target2 = NewLIR0(kPseudoTargetLabel);
568    branch1->target = target2;
569    branch2->target = target2;
570    FreeTemp(r_tmp_high);  // Now unneeded
571
572  } else {
573    NewLIR3(kA64Ldxr2rX, r_tmp.GetReg(), r_ptr.GetReg(), 0);
574    OpRegReg(kOpSub, r_tmp, rl_expected.reg);
575    DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
576    // OpIT(kCondEq, "T");
577    NewLIR4(kA64Stxr3wrX /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
578  }
579
580  // Still one conditional left from OpIT(kCondEq, "T") from either branch
581  OpRegImm(kOpCmp /* eq */, r_tmp, 1);
582  OpCondBranch(kCondEq, target);
583
584  if (!load_early) {
585    FreeTemp(rl_expected.reg);  // Now unneeded.
586  }
587
588  // result := (tmp1 != 0) ? 0 : 1;
589  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
590  OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
591  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
592  // OpIT(kCondUlt, "");
593  LoadConstant(rl_result.reg, 0); /* cc */
594  FreeTemp(r_tmp);  // Now unneeded.
595
596  StoreValue(rl_dest, rl_result);
597
598  // Now, restore lr to its non-temp status.
599  Clobber(rs_rA64_LR);
600  UnmarkTemp(rs_rA64_LR);
601  return true;
602}
603
604LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
605  return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
606}
607
608LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
609  LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
610  return NULL;
611}
612
613LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
614  LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
615  return NULL;
616}
617
618void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
619                                               RegLocation rl_result, int lit,
620                                               int first_bit, int second_bit) {
621  OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
622  if (first_bit != 0) {
623    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
624  }
625}
626
627void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
628  LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
629}
630
631// Test suspend flag, return target of taken suspend branch
632LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
633  // FIXME: Define rA64_SUSPEND as w19, when we do not need two copies of reserved register.
634  // Note: The opcode is not set as wide, so actually we are using the 32-bit version register.
635  NewLIR3(kA64Subs3rRd, rA64_SUSPEND, rA64_SUSPEND, 1);
636  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
637}
638
639// Decrement register and branch on condition
640LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
641  // Combine sub & test using sub setflags encoding here
642  OpRegRegImm(kOpSub, reg, reg, 1);  // For value == 1, this should set flags.
643  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
644  return OpCondBranch(c_code, target);
645}
646
647bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
648#if ANDROID_SMP != 0
649  // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
650  LIR* barrier = last_lir_insn_;
651
652  int dmb_flavor;
653  // TODO: revisit Arm barrier kinds
654  switch (barrier_kind) {
655    case kLoadStore: dmb_flavor = kISH; break;
656    case kLoadLoad: dmb_flavor = kISH; break;
657    case kStoreStore: dmb_flavor = kISHST; break;
658    case kStoreLoad: dmb_flavor = kISH; break;
659    default:
660      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
661      dmb_flavor = kSY;  // quiet gcc.
662      break;
663  }
664
665  bool ret = false;
666
667  // If the same barrier already exists, don't generate another.
668  if (barrier == nullptr
669      || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
670    barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
671    ret = true;
672  }
673
674  // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
675  DCHECK(!barrier->flags.use_def_invalid);
676  barrier->u.m.def_mask = &kEncodeAll;
677  return ret;
678#else
679  return false;
680#endif
681}
682
683void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
684  RegLocation rl_result;
685
686  rl_src = LoadValue(rl_src, kCoreReg);
687  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
688  NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 31);
689  StoreValueWide(rl_dest, rl_result);
690}
691
692void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
693                                 RegLocation rl_src1, RegLocation rl_src2, bool is_div) {
694  RegLocation rl_result;
695  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
696  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
697  GenDivZeroCheck(rl_src2.reg);
698  rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
699  StoreValueWide(rl_dest, rl_result);
700}
701
702void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
703                             RegLocation rl_src2) {
704  RegLocation rl_result;
705
706  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
707  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
708  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
709  OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
710  StoreValueWide(rl_dest, rl_result);
711}
712
713void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
714  RegLocation rl_result;
715
716  rl_src = LoadValueWide(rl_src, kCoreReg);
717  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
718  OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
719  StoreValueWide(rl_dest, rl_result);
720}
721
722void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
723  RegLocation rl_result;
724
725  rl_src = LoadValueWide(rl_src, kCoreReg);
726  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
727  OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
728  StoreValueWide(rl_dest, rl_result);
729}
730
731void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
732                              RegLocation rl_src1, RegLocation rl_src2) {
733  GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
734}
735
736void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
737                              RegLocation rl_src2) {
738  GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
739}
740
741void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
742                            RegLocation rl_src2) {
743  GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
744}
745
746void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
747                            RegLocation rl_src2) {
748  GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
749}
750
751void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
752                           RegLocation rl_src2) {
753  GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
754}
755
756void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
757                            RegLocation rl_src2) {
758  GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
759}
760
761/*
762 * Generate array load
763 */
764void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
765                             RegLocation rl_index, RegLocation rl_dest, int scale) {
766  // TODO(Arm64): check this.
767  UNIMPLEMENTED(WARNING);
768
769  RegisterClass reg_class = RegClassBySize(size);
770  int len_offset = mirror::Array::LengthOffset().Int32Value();
771  int data_offset;
772  RegLocation rl_result;
773  bool constant_index = rl_index.is_const;
774  rl_array = LoadValue(rl_array, kRefReg);
775  if (!constant_index) {
776    rl_index = LoadValue(rl_index, kCoreReg);
777  }
778
779  if (rl_dest.wide) {
780    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
781  } else {
782    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
783  }
784
785  // If index is constant, just fold it into the data offset
786  if (constant_index) {
787    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
788  }
789
790  /* null object? */
791  GenNullCheck(rl_array.reg, opt_flags);
792
793  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
794  RegStorage reg_len;
795  if (needs_range_check) {
796    reg_len = AllocTemp();
797    /* Get len */
798    Load32Disp(rl_array.reg, len_offset, reg_len);
799    MarkPossibleNullPointerException(opt_flags);
800  } else {
801    ForceImplicitNullCheck(rl_array.reg, opt_flags);
802  }
803  if (rl_dest.wide || rl_dest.fp || constant_index) {
804    RegStorage reg_ptr;
805    if (constant_index) {
806      reg_ptr = rl_array.reg;  // NOTE: must not alter reg_ptr in constant case.
807    } else {
808      // No special indexed operation, lea + load w/ displacement
809      reg_ptr = AllocTempRef();
810      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kA64Lsl, scale));
811      FreeTemp(rl_index.reg);
812    }
813    rl_result = EvalLoc(rl_dest, reg_class, true);
814
815    if (needs_range_check) {
816      if (constant_index) {
817        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
818      } else {
819        GenArrayBoundsCheck(rl_index.reg, reg_len);
820      }
821      FreeTemp(reg_len);
822    }
823    LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size);
824    MarkPossibleNullPointerException(opt_flags);
825    if (!constant_index) {
826      FreeTemp(reg_ptr);
827    }
828    if (rl_dest.wide) {
829      StoreValueWide(rl_dest, rl_result);
830    } else {
831      StoreValue(rl_dest, rl_result);
832    }
833  } else {
834    // Offset base, then use indexed load
835    RegStorage reg_ptr = AllocTempRef();
836    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
837    FreeTemp(rl_array.reg);
838    rl_result = EvalLoc(rl_dest, reg_class, true);
839
840    if (needs_range_check) {
841      GenArrayBoundsCheck(rl_index.reg, reg_len);
842      FreeTemp(reg_len);
843    }
844    LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
845    MarkPossibleNullPointerException(opt_flags);
846    FreeTemp(reg_ptr);
847    StoreValue(rl_dest, rl_result);
848  }
849}
850
851/*
852 * Generate array store
853 *
854 */
855void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
856                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
857  // TODO(Arm64): check this.
858  UNIMPLEMENTED(WARNING);
859
860  RegisterClass reg_class = RegClassBySize(size);
861  int len_offset = mirror::Array::LengthOffset().Int32Value();
862  bool constant_index = rl_index.is_const;
863
864  int data_offset;
865  if (size == k64 || size == kDouble) {
866    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
867  } else {
868    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
869  }
870
871  // If index is constant, just fold it into the data offset.
872  if (constant_index) {
873    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
874  }
875
876  rl_array = LoadValue(rl_array, kRefReg);
877  if (!constant_index) {
878    rl_index = LoadValue(rl_index, kCoreReg);
879  }
880
881  RegStorage reg_ptr;
882  bool allocated_reg_ptr_temp = false;
883  if (constant_index) {
884    reg_ptr = rl_array.reg;
885  } else if (IsTemp(rl_array.reg) && !card_mark) {
886    Clobber(rl_array.reg);
887    reg_ptr = rl_array.reg;
888  } else {
889    allocated_reg_ptr_temp = true;
890    reg_ptr = AllocTempRef();
891  }
892
893  /* null object? */
894  GenNullCheck(rl_array.reg, opt_flags);
895
896  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
897  RegStorage reg_len;
898  if (needs_range_check) {
899    reg_len = AllocTemp();
900    // NOTE: max live temps(4) here.
901    /* Get len */
902    Load32Disp(rl_array.reg, len_offset, reg_len);
903    MarkPossibleNullPointerException(opt_flags);
904  } else {
905    ForceImplicitNullCheck(rl_array.reg, opt_flags);
906  }
907  /* at this point, reg_ptr points to array, 2 live temps */
908  if (rl_src.wide || rl_src.fp || constant_index) {
909    if (rl_src.wide) {
910      rl_src = LoadValueWide(rl_src, reg_class);
911    } else {
912      rl_src = LoadValue(rl_src, reg_class);
913    }
914    if (!constant_index) {
915      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kA64Lsl, scale));
916    }
917    if (needs_range_check) {
918      if (constant_index) {
919        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
920      } else {
921        GenArrayBoundsCheck(rl_index.reg, reg_len);
922      }
923      FreeTemp(reg_len);
924    }
925
926    StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
927    MarkPossibleNullPointerException(opt_flags);
928  } else {
929    /* reg_ptr -> array data */
930    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
931    rl_src = LoadValue(rl_src, reg_class);
932    if (needs_range_check) {
933      GenArrayBoundsCheck(rl_index.reg, reg_len);
934      FreeTemp(reg_len);
935    }
936    StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
937    MarkPossibleNullPointerException(opt_flags);
938  }
939  if (allocated_reg_ptr_temp) {
940    FreeTemp(reg_ptr);
941  }
942  if (card_mark) {
943    MarkGCCard(rl_src.reg, rl_array.reg);
944  }
945}
946
947void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
948                                   RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
949  OpKind op = kOpBkpt;
950  // Per spec, we only care about low 6 bits of shift amount.
951  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
952  rl_src = LoadValueWide(rl_src, kCoreReg);
953  if (shift_amount == 0) {
954    StoreValueWide(rl_dest, rl_src);
955    return;
956  }
957
958  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
959  switch (opcode) {
960    case Instruction::SHL_LONG:
961    case Instruction::SHL_LONG_2ADDR:
962      op = kOpLsl;
963      break;
964    case Instruction::SHR_LONG:
965    case Instruction::SHR_LONG_2ADDR:
966      op = kOpAsr;
967      break;
968    case Instruction::USHR_LONG:
969    case Instruction::USHR_LONG_2ADDR:
970      op = kOpLsr;
971      break;
972    default:
973      LOG(FATAL) << "Unexpected case";
974  }
975  OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
976  StoreValueWide(rl_dest, rl_result);
977}
978
979void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
980                                     RegLocation rl_src1, RegLocation rl_src2) {
981  if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
982    if (!rl_src2.is_const) {
983      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
984    }
985  } else {
986    // Associativity.
987    if (!rl_src2.is_const) {
988      DCHECK(rl_src1.is_const);
989      std::swap(rl_src1, rl_src2);
990    }
991  }
992  DCHECK(rl_src2.is_const);
993
994  OpKind op = kOpBkpt;
995  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
996
997  switch (opcode) {
998    case Instruction::ADD_LONG:
999    case Instruction::ADD_LONG_2ADDR:
1000      op = kOpAdd;
1001      break;
1002    case Instruction::SUB_LONG:
1003    case Instruction::SUB_LONG_2ADDR:
1004      op = kOpSub;
1005      break;
1006    case Instruction::AND_LONG:
1007    case Instruction::AND_LONG_2ADDR:
1008      op = kOpAnd;
1009      break;
1010    case Instruction::OR_LONG:
1011    case Instruction::OR_LONG_2ADDR:
1012      op = kOpOr;
1013      break;
1014    case Instruction::XOR_LONG:
1015    case Instruction::XOR_LONG_2ADDR:
1016      op = kOpXor;
1017      break;
1018    default:
1019      LOG(FATAL) << "Unexpected opcode";
1020  }
1021
1022  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1023  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1024  OpRegRegImm(op, rl_result.reg, rl_src1.reg, val);
1025  StoreValueWide(rl_dest, rl_result);
1026}
1027
1028/**
1029 * @brief Split a register list in pairs or registers.
1030 *
1031 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1032 * @code
1033 *   int reg1 = -1, reg2 = -1;
1034 *   while (reg_mask) {
1035 *     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1036 *     if (UNLIKELY(reg2 < 0)) {
1037 *       // Single register in reg1.
1038 *     } else {
1039 *       // Pair in reg1, reg2.
1040 *     }
1041 *   }
1042 * @endcode
1043 */
1044uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
1045  // Find first register.
1046  int first_bit_set = __builtin_ctz(reg_mask) + 1;
1047  int reg = *reg1 + first_bit_set;
1048  reg_mask >>= first_bit_set;
1049
1050  if (LIKELY(reg_mask)) {
1051    // Save the first register, find the second and use the pair opcode.
1052    int second_bit_set = __builtin_ctz(reg_mask) + 1;
1053    *reg2 = reg;
1054    reg_mask >>= second_bit_set;
1055    *reg1 = reg + second_bit_set;
1056    return reg_mask;
1057  }
1058
1059  // Use the single opcode, as we just have one register.
1060  *reg1 = reg;
1061  *reg2 = -1;
1062  return reg_mask;
1063}
1064
1065void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1066  int reg1 = -1, reg2 = -1;
1067  const int reg_log2_size = 3;
1068
1069  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1070     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1071    if (UNLIKELY(reg2 < 0)) {
1072      NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1073    } else {
1074      NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1075              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1076    }
1077  }
1078}
1079
1080void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1081  int reg1 = -1, reg2 = -1;
1082  const int reg_log2_size = 3;
1083
1084  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1085    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1086    if (UNLIKELY(reg2 < 0)) {
1087      NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1088    } else {
1089      NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1090              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1091    }
1092  }
1093}
1094
1095void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1096  int reg1 = -1, reg2 = -1;
1097  const int reg_log2_size = 3;
1098
1099  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1100     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1101    if (UNLIKELY(reg2 < 0)) {
1102      NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1103    } else {
1104      NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1105              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1106    }
1107  }
1108}
1109
1110// TODO(Arm64): consider using ld1 and st1?
1111void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1112  int reg1 = -1, reg2 = -1;
1113  const int reg_log2_size = 3;
1114
1115  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1116    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1117    if (UNLIKELY(reg2 < 0)) {
1118      NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1119    } else {
1120      NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1121              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1122    }
1123  }
1124}
1125
1126}  // namespace art
1127