int_arm.cc revision 695d13a82d6dd801aaa57a22a9d4b3f6db0d0fdb
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm_lir.h"
20#include "codegen_arm.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "mirror/array.h"
24
25namespace art {
26
27LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
28  OpRegReg(kOpCmp, src1, src2);
29  return OpCondBranch(cond, target);
30}
31
32/*
33 * Generate a Thumb2 IT instruction, which can nullify up to
34 * four subsequent instructions based on a condition and its
35 * inverse.  The condition applies to the first instruction, which
36 * is executed if the condition is met.  The string "guide" consists
37 * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
38 * A "T" means the instruction is executed if the condition is
39 * met, and an "E" means the instruction is executed if the condition
40 * is not met.
41 */
42LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
43  int mask;
44  int mask3 = 0;
45  int mask2 = 0;
46  int mask1 = 0;
47  ArmConditionCode code = ArmConditionEncoding(ccode);
48  int cond_bit = code & 1;
49  int alt_bit = cond_bit ^ 1;
50
51  // Note: case fallthroughs intentional
52  switch (strlen(guide)) {
53    case 3:
54      mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
55    case 2:
56      mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
57    case 1:
58      mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
59      break;
60    case 0:
61      break;
62    default:
63      LOG(FATAL) << "OAT: bad case in OpIT";
64  }
65  mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
66       (1 << (3 - strlen(guide)));
67  return NewLIR2(kThumb2It, code, mask);
68}
69
70void ArmMir2Lir::OpEndIT(LIR* it) {
71  // TODO: use the 'it' pointer to do some checks with the LIR, for example
72  //       we could check that the number of instructions matches the mask
73  //       in the IT instruction.
74  CHECK(it != nullptr);
75  GenBarrier();
76}
77
78/*
79 * 64-bit 3way compare function.
80 *     mov   rX, #-1
81 *     cmp   op1hi, op2hi
82 *     blt   done
83 *     bgt   flip
84 *     sub   rX, op1lo, op2lo (treat as unsigned)
85 *     beq   done
86 *     ite   hi
87 *     mov(hi)   rX, #-1
88 *     mov(!hi)  rX, #1
89 * flip:
90 *     neg   rX
91 * done:
92 */
93void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
94  LIR* target1;
95  LIR* target2;
96  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
97  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
98  RegStorage t_reg = AllocTemp();
99  LoadConstant(t_reg, -1);
100  OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
101  LIR* branch1 = OpCondBranch(kCondLt, NULL);
102  LIR* branch2 = OpCondBranch(kCondGt, NULL);
103  OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
104  LIR* branch3 = OpCondBranch(kCondEq, NULL);
105
106  LIR* it = OpIT(kCondHi, "E");
107  NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
108  LoadConstant(t_reg, 1);
109  OpEndIT(it);
110
111  target2 = NewLIR0(kPseudoTargetLabel);
112  OpRegReg(kOpNeg, t_reg, t_reg);
113
114  target1 = NewLIR0(kPseudoTargetLabel);
115
116  RegLocation rl_temp = LocCReturn();  // Just using as template, will change
117  rl_temp.reg.SetReg(t_reg.GetReg());
118  StoreValue(rl_dest, rl_temp);
119  FreeTemp(t_reg);
120
121  branch1->target = target1;
122  branch2->target = target2;
123  branch3->target = branch1->target;
124}
125
126void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
127                                          int64_t val, ConditionCode ccode) {
128  int32_t val_lo = Low32Bits(val);
129  int32_t val_hi = High32Bits(val);
130  DCHECK_GE(ModifiedImmediate(val_lo), 0);
131  DCHECK_GE(ModifiedImmediate(val_hi), 0);
132  LIR* taken = &block_label_list_[bb->taken];
133  LIR* not_taken = &block_label_list_[bb->fall_through];
134  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
135  RegStorage low_reg = rl_src1.reg.GetLow();
136  RegStorage high_reg = rl_src1.reg.GetHigh();
137
138  if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
139    RegStorage t_reg = AllocTemp();
140    NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), low_reg.GetReg(), high_reg.GetReg(), 0);
141    FreeTemp(t_reg);
142    OpCondBranch(ccode, taken);
143    return;
144  }
145
146  switch (ccode) {
147    case kCondEq:
148    case kCondNe:
149      OpCmpImmBranch(kCondNe, high_reg, val_hi, (ccode == kCondEq) ? not_taken : taken);
150      break;
151    case kCondLt:
152      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
153      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
154      ccode = kCondUlt;
155      break;
156    case kCondLe:
157      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
158      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
159      ccode = kCondLs;
160      break;
161    case kCondGt:
162      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
163      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
164      ccode = kCondHi;
165      break;
166    case kCondGe:
167      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
168      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
169      ccode = kCondUge;
170      break;
171    default:
172      LOG(FATAL) << "Unexpected ccode: " << ccode;
173  }
174  OpCmpImmBranch(ccode, low_reg, val_lo, taken);
175}
176
177void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
178  RegLocation rl_result;
179  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
180  RegLocation rl_dest = mir_graph_->GetDest(mir);
181  rl_src = LoadValue(rl_src, kCoreReg);
182  ConditionCode ccode = mir->meta.ccode;
183  if (mir->ssa_rep->num_uses == 1) {
184    // CONST case
185    int true_val = mir->dalvikInsn.vB;
186    int false_val = mir->dalvikInsn.vC;
187    rl_result = EvalLoc(rl_dest, kCoreReg, true);
188    // Change kCondNe to kCondEq for the special cases below.
189    if (ccode == kCondNe) {
190      ccode = kCondEq;
191      std::swap(true_val, false_val);
192    }
193    bool cheap_false_val = InexpensiveConstantInt(false_val);
194    if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
195      OpRegRegImm(kOpSub, rl_result.reg, rl_src.reg, -true_val);
196      DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
197      LIR* it = OpIT(true_val == 0 ? kCondNe : kCondUge, "");
198      LoadConstant(rl_result.reg, false_val);
199      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
200    } else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
201      OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, 1);
202      DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
203      LIR* it = OpIT(kCondLs, "");
204      LoadConstant(rl_result.reg, false_val);
205      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
206    } else if (cheap_false_val && InexpensiveConstantInt(true_val)) {
207      OpRegImm(kOpCmp, rl_src.reg, 0);
208      LIR* it = OpIT(ccode, "E");
209      LoadConstant(rl_result.reg, true_val);
210      LoadConstant(rl_result.reg, false_val);
211      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
212    } else {
213      // Unlikely case - could be tuned.
214      RegStorage t_reg1 = AllocTemp();
215      RegStorage t_reg2 = AllocTemp();
216      LoadConstant(t_reg1, true_val);
217      LoadConstant(t_reg2, false_val);
218      OpRegImm(kOpCmp, rl_src.reg, 0);
219      LIR* it = OpIT(ccode, "E");
220      OpRegCopy(rl_result.reg, t_reg1);
221      OpRegCopy(rl_result.reg, t_reg2);
222      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
223    }
224  } else {
225    // MOVE case
226    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
227    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
228    rl_true = LoadValue(rl_true, kCoreReg);
229    rl_false = LoadValue(rl_false, kCoreReg);
230    rl_result = EvalLoc(rl_dest, kCoreReg, true);
231    OpRegImm(kOpCmp, rl_src.reg, 0);
232    LIR* it = nullptr;
233    if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {  // Is the "true" case already in place?
234      it = OpIT(NegateComparison(ccode), "");
235      OpRegCopy(rl_result.reg, rl_false.reg);
236    } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {  // False case in place?
237      it = OpIT(ccode, "");
238      OpRegCopy(rl_result.reg, rl_true.reg);
239    } else {  // Normal - select between the two.
240      it = OpIT(ccode, "E");
241      OpRegCopy(rl_result.reg, rl_true.reg);
242      OpRegCopy(rl_result.reg, rl_false.reg);
243    }
244    OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
245  }
246  StoreValue(rl_dest, rl_result);
247}
248
249void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
250  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
251  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
252  // Normalize such that if either operand is constant, src2 will be constant.
253  ConditionCode ccode = mir->meta.ccode;
254  if (rl_src1.is_const) {
255    std::swap(rl_src1, rl_src2);
256    ccode = FlipComparisonOrder(ccode);
257  }
258  if (rl_src2.is_const) {
259    RegLocation rl_temp = UpdateLocWide(rl_src2);
260    // Do special compare/branch against simple const operand if not already in registers.
261    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
262    if ((rl_temp.location != kLocPhysReg) &&
263        ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
264      GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
265      return;
266    }
267  }
268  LIR* taken = &block_label_list_[bb->taken];
269  LIR* not_taken = &block_label_list_[bb->fall_through];
270  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
271  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
272  OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
273  switch (ccode) {
274    case kCondEq:
275      OpCondBranch(kCondNe, not_taken);
276      break;
277    case kCondNe:
278      OpCondBranch(kCondNe, taken);
279      break;
280    case kCondLt:
281      OpCondBranch(kCondLt, taken);
282      OpCondBranch(kCondGt, not_taken);
283      ccode = kCondUlt;
284      break;
285    case kCondLe:
286      OpCondBranch(kCondLt, taken);
287      OpCondBranch(kCondGt, not_taken);
288      ccode = kCondLs;
289      break;
290    case kCondGt:
291      OpCondBranch(kCondGt, taken);
292      OpCondBranch(kCondLt, not_taken);
293      ccode = kCondHi;
294      break;
295    case kCondGe:
296      OpCondBranch(kCondGt, taken);
297      OpCondBranch(kCondLt, not_taken);
298      ccode = kCondUge;
299      break;
300    default:
301      LOG(FATAL) << "Unexpected ccode: " << ccode;
302  }
303  OpRegReg(kOpCmp, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
304  OpCondBranch(ccode, taken);
305}
306
307/*
308 * Generate a register comparison to an immediate and branch.  Caller
309 * is responsible for setting branch target field.
310 */
311LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
312  LIR* branch;
313  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
314  /*
315   * A common use of OpCmpImmBranch is for null checks, and using the Thumb 16-bit
316   * compare-and-branch if zero is ideal if it will reach.  However, because null checks
317   * branch forward to a launch pad, they will frequently not reach - and thus have to
318   * be converted to a long form during assembly (which will trigger another assembly
319   * pass).  Here we estimate the branch distance for checks, and if large directly
320   * generate the long form in an attempt to avoid an extra assembly pass.
321   * TODO: consider interspersing launchpads in code following unconditional branches.
322   */
323  bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
324  skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
325  if (!skip && (ARM_LOWREG(reg.GetReg())) && (check_value == 0) &&
326     ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
327    branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
328                     reg.GetReg(), 0);
329  } else {
330    OpRegImm(kOpCmp, reg, check_value);
331    branch = NewLIR2(kThumbBCond, 0, arm_cond);
332  }
333  branch->target = target;
334  return branch;
335}
336
337LIR* ArmMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
338  LIR* res;
339  int opcode;
340  // If src or dest is a pair, we'll be using low reg.
341  if (r_dest.IsPair()) {
342    r_dest = r_dest.GetLow();
343  }
344  if (r_src.IsPair()) {
345    r_src = r_src.GetLow();
346  }
347  if (ARM_FPREG(r_dest.GetReg()) || ARM_FPREG(r_src.GetReg()))
348    return OpFpRegCopy(r_dest, r_src);
349  if (ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src.GetReg()))
350    opcode = kThumbMovRR;
351  else if (!ARM_LOWREG(r_dest.GetReg()) && !ARM_LOWREG(r_src.GetReg()))
352     opcode = kThumbMovRR_H2H;
353  else if (ARM_LOWREG(r_dest.GetReg()))
354     opcode = kThumbMovRR_H2L;
355  else
356     opcode = kThumbMovRR_L2H;
357  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
358  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
359    res->flags.is_nop = true;
360  }
361  return res;
362}
363
364LIR* ArmMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
365  LIR* res = OpRegCopyNoInsert(r_dest, r_src);
366  AppendLIR(res);
367  return res;
368}
369
370void ArmMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
371  bool dest_fp = ARM_FPREG(r_dest.GetLowReg());
372  bool src_fp = ARM_FPREG(r_src.GetLowReg());
373  if (dest_fp) {
374    if (src_fp) {
375      // FIXME: handle 64-bit solo's here.
376      OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
377                RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
378    } else {
379      NewLIR3(kThumb2Fmdrr, S2d(r_dest.GetLowReg(), r_dest.GetHighReg()),
380              r_src.GetLowReg(), r_src.GetHighReg());
381    }
382  } else {
383    if (src_fp) {
384      NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), S2d(r_src.GetLowReg(),
385              r_src.GetHighReg()));
386    } else {
387      // Handle overlap
388      if (r_src.GetHighReg() == r_dest.GetLowReg()) {
389        DCHECK_NE(r_src.GetLowReg(), r_dest.GetHighReg());
390        OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
391        OpRegCopy(r_dest.GetLow(), r_src.GetLow());
392      } else {
393        OpRegCopy(r_dest.GetLow(), r_src.GetLow());
394        OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
395      }
396    }
397  }
398}
399
400// Table of magic divisors
401struct MagicTable {
402  uint32_t magic;
403  uint32_t shift;
404  DividePattern pattern;
405};
406
407static const MagicTable magic_table[] = {
408  {0, 0, DivideNone},        // 0
409  {0, 0, DivideNone},        // 1
410  {0, 0, DivideNone},        // 2
411  {0x55555556, 0, Divide3},  // 3
412  {0, 0, DivideNone},        // 4
413  {0x66666667, 1, Divide5},  // 5
414  {0x2AAAAAAB, 0, Divide3},  // 6
415  {0x92492493, 2, Divide7},  // 7
416  {0, 0, DivideNone},        // 8
417  {0x38E38E39, 1, Divide5},  // 9
418  {0x66666667, 2, Divide5},  // 10
419  {0x2E8BA2E9, 1, Divide5},  // 11
420  {0x2AAAAAAB, 1, Divide5},  // 12
421  {0x4EC4EC4F, 2, Divide5},  // 13
422  {0x92492493, 3, Divide7},  // 14
423  {0x88888889, 3, Divide7},  // 15
424};
425
426// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
427bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
428                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
429  if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
430    return false;
431  }
432  DividePattern pattern = magic_table[lit].pattern;
433  if (pattern == DivideNone) {
434    return false;
435  }
436
437  RegStorage r_magic = AllocTemp();
438  LoadConstant(r_magic, magic_table[lit].magic);
439  rl_src = LoadValue(rl_src, kCoreReg);
440  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
441  RegStorage r_hi = AllocTemp();
442  RegStorage r_lo = AllocTemp();
443
444  // rl_dest and rl_src might overlap.
445  // Reuse r_hi to save the div result for reminder case.
446  RegStorage r_div_result = is_div ? rl_result.reg : r_hi;
447
448  NewLIR4(kThumb2Smull, r_lo.GetReg(), r_hi.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
449  switch (pattern) {
450    case Divide3:
451      OpRegRegRegShift(kOpSub, r_div_result, r_hi, rl_src.reg, EncodeShift(kArmAsr, 31));
452      break;
453    case Divide5:
454      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
455      OpRegRegRegShift(kOpRsub, r_div_result, r_lo, r_hi,
456                       EncodeShift(kArmAsr, magic_table[lit].shift));
457      break;
458    case Divide7:
459      OpRegReg(kOpAdd, r_hi, rl_src.reg);
460      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
461      OpRegRegRegShift(kOpRsub, r_div_result, r_lo, r_hi,
462                       EncodeShift(kArmAsr, magic_table[lit].shift));
463      break;
464    default:
465      LOG(FATAL) << "Unexpected pattern: " << pattern;
466  }
467
468  if (!is_div) {
469    // div_result = src / lit
470    // tmp1 = div_result * lit
471    // dest = src - tmp1
472    RegStorage tmp1 = r_lo;
473    EasyMultiplyOp ops[2];
474
475    bool canEasyMultiply = GetEasyMultiplyTwoOps(lit, ops);
476    DCHECK_NE(canEasyMultiply, false);
477
478    GenEasyMultiplyTwoOps(tmp1, r_div_result, ops);
479    OpRegRegReg(kOpSub, rl_result.reg, rl_src.reg, tmp1);
480  }
481
482  StoreValue(rl_dest, rl_result);
483  return true;
484}
485
486// Try to convert *lit to 1 RegRegRegShift/RegRegShift form.
487bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) {
488  if (IsPowerOfTwo(lit)) {
489    op->op = kOpLsl;
490    op->shift = LowestSetBit(lit);
491    return true;
492  }
493
494  if (IsPowerOfTwo(lit - 1)) {
495    op->op = kOpAdd;
496    op->shift = LowestSetBit(lit - 1);
497    return true;
498  }
499
500  if (IsPowerOfTwo(lit + 1)) {
501    op->op = kOpRsub;
502    op->shift = LowestSetBit(lit + 1);
503    return true;
504  }
505
506  op->op = kOpInvalid;
507  op->shift = 0;
508  return false;
509}
510
511// Try to convert *lit to 1~2 RegRegRegShift/RegRegShift forms.
512bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
513  GetEasyMultiplyOp(lit, &ops[0]);
514  if (GetEasyMultiplyOp(lit, &ops[0])) {
515    ops[1].op = kOpInvalid;
516    ops[1].shift = 0;
517    return true;
518  }
519
520  int lit1 = lit;
521  uint32_t shift = LowestSetBit(lit1);
522  if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
523    ops[1].op = kOpLsl;
524    ops[1].shift = shift;
525    return true;
526  }
527
528  lit1 = lit - 1;
529  shift = LowestSetBit(lit1);
530  if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
531    ops[1].op = kOpAdd;
532    ops[1].shift = shift;
533    return true;
534  }
535
536  lit1 = lit + 1;
537  shift = LowestSetBit(lit1);
538  if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
539    ops[1].op = kOpRsub;
540    ops[1].shift = shift;
541    return true;
542  }
543
544  return false;
545}
546
547// Generate instructions to do multiply.
548// Additional temporary register is required,
549// if it need to generate 2 instructions and src/dest overlap.
550void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops) {
551  // tmp1 = ( src << shift1) + [ src | -src | 0 ]
552  // dest = (tmp1 << shift2) + [ src | -src | 0 ]
553
554  RegStorage r_tmp1;
555  if (ops[1].op == kOpInvalid) {
556    r_tmp1 = r_dest;
557  } else if (r_dest.GetReg() != r_src.GetReg()) {
558    r_tmp1 = r_dest;
559  } else {
560    r_tmp1 = AllocTemp();
561  }
562
563  switch (ops[0].op) {
564    case kOpLsl:
565      OpRegRegImm(kOpLsl, r_tmp1, r_src, ops[0].shift);
566      break;
567    case kOpAdd:
568      OpRegRegRegShift(kOpAdd, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift));
569      break;
570    case kOpRsub:
571      OpRegRegRegShift(kOpRsub, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift));
572      break;
573    default:
574      DCHECK_EQ(ops[0].op, kOpInvalid);
575      break;
576  }
577
578  switch (ops[1].op) {
579    case kOpInvalid:
580      return;
581    case kOpLsl:
582      OpRegRegImm(kOpLsl, r_dest, r_tmp1, ops[1].shift);
583      break;
584    case kOpAdd:
585      OpRegRegRegShift(kOpAdd, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift));
586      break;
587    case kOpRsub:
588      OpRegRegRegShift(kOpRsub, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift));
589      break;
590    default:
591      LOG(FATAL) << "Unexpected opcode passed to GenEasyMultiplyTwoOps";
592      break;
593  }
594}
595
596bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
597  EasyMultiplyOp ops[2];
598
599  if (!GetEasyMultiplyTwoOps(lit, ops)) {
600    return false;
601  }
602
603  rl_src = LoadValue(rl_src, kCoreReg);
604  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
605
606  GenEasyMultiplyTwoOps(rl_result.reg, rl_src.reg, ops);
607  StoreValue(rl_dest, rl_result);
608  return true;
609}
610
611LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
612                                int offset, ThrowKind kind) {
613  LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
614  return NULL;
615}
616
617RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
618                      RegLocation rl_src2, bool is_div, bool check_zero) {
619  LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
620  return rl_dest;
621}
622
623RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
624  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
625  return rl_dest;
626}
627
628RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
629  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
630
631  // Put the literal in a temp.
632  RegStorage lit_temp = AllocTemp();
633  LoadConstant(lit_temp, lit);
634  // Use the generic case for div/rem with arg2 in a register.
635  // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
636  rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
637  FreeTemp(lit_temp);
638
639  return rl_result;
640}
641
642RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
643                                  bool is_div) {
644  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
645  if (is_div) {
646    // Simple case, use sdiv instruction.
647    OpRegRegReg(kOpDiv, rl_result.reg, reg1, reg2);
648  } else {
649    // Remainder case, use the following code:
650    // temp = reg1 / reg2      - integer division
651    // temp = temp * reg2
652    // dest = reg1 - temp
653
654    RegStorage temp = AllocTemp();
655    OpRegRegReg(kOpDiv, temp, reg1, reg2);
656    OpRegReg(kOpMul, temp, reg2);
657    OpRegRegReg(kOpSub, rl_result.reg, reg1, temp);
658    FreeTemp(temp);
659  }
660
661  return rl_result;
662}
663
664bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
665  DCHECK_EQ(cu_->instruction_set, kThumb2);
666  RegLocation rl_src1 = info->args[0];
667  RegLocation rl_src2 = info->args[1];
668  rl_src1 = LoadValue(rl_src1, kCoreReg);
669  rl_src2 = LoadValue(rl_src2, kCoreReg);
670  RegLocation rl_dest = InlineTarget(info);
671  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
672  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
673  LIR* it = OpIT((is_min) ? kCondGt : kCondLt, "E");
674  OpRegReg(kOpMov, rl_result.reg, rl_src2.reg);
675  OpRegReg(kOpMov, rl_result.reg, rl_src1.reg);
676  OpEndIT(it);
677  StoreValue(rl_dest, rl_result);
678  return true;
679}
680
681bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
682  RegLocation rl_src_address = info->args[0];  // long address
683  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
684  RegLocation rl_dest = InlineTarget(info);
685  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
686  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
687  if (size == k64) {
688    // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
689    if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) {
690      Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
691      Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
692    } else {
693      Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
694      Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
695    }
696    StoreValueWide(rl_dest, rl_result);
697  } else {
698    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
699    // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
700    LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
701    StoreValue(rl_dest, rl_result);
702  }
703  return true;
704}
705
706bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
707  RegLocation rl_src_address = info->args[0];  // long address
708  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
709  RegLocation rl_src_value = info->args[2];  // [size] value
710  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
711  if (size == k64) {
712    // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
713    RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
714    StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32);
715    StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32);
716  } else {
717    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
718    // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
719    RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
720    StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
721  }
722  return true;
723}
724
725void ArmMir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
726  LOG(FATAL) << "Unexpected use of OpLea for Arm";
727}
728
729void ArmMir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
730  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
731}
732
733bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
734  DCHECK_EQ(cu_->instruction_set, kThumb2);
735  // Unused - RegLocation rl_src_unsafe = info->args[0];
736  RegLocation rl_src_obj = info->args[1];  // Object - known non-null
737  RegLocation rl_src_offset = info->args[2];  // long low
738  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
739  RegLocation rl_src_expected = info->args[4];  // int, long or Object
740  // If is_long, high half is in info->args[5]
741  RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
742  // If is_long, high half is in info->args[7]
743  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
744
745  // We have only 5 temporary registers available and actually only 4 if the InlineTarget
746  // above locked one of the temps. For a straightforward CAS64 we need 7 registers:
747  // r_ptr (1), new_value (2), expected(2) and ldrexd result (2). If neither expected nor
748  // new_value is in a non-temp core register we shall reload them in the ldrex/strex loop
749  // into the same temps, reducing the number of required temps down to 5. We shall work
750  // around the potentially locked temp by using LR for r_ptr, unconditionally.
751  // TODO: Pass information about the need for more temps to the stack frame generation
752  // code so that we can rely on being able to allocate enough temps.
753  DCHECK(!reg_pool_->core_regs[rARM_LR].is_temp);
754  MarkTemp(rARM_LR);
755  FreeTemp(rARM_LR);
756  LockTemp(rARM_LR);
757  bool load_early = true;
758  if (is_long) {
759    int expected_reg = is_long ? rl_src_expected.reg.GetLowReg() : rl_src_expected.reg.GetReg();
760    int new_val_reg = is_long ? rl_src_new_value.reg.GetLowReg() : rl_src_new_value.reg.GetReg();
761    bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !IsFpReg(expected_reg);
762    bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !IsFpReg(new_val_reg);
763    bool expected_is_good_reg = expected_is_core_reg && !IsTemp(expected_reg);
764    bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(new_val_reg);
765
766    if (!expected_is_good_reg && !new_value_is_good_reg) {
767      // None of expected/new_value is non-temp reg, need to load both late
768      load_early = false;
769      // Make sure they are not in the temp regs and the load will not be skipped.
770      if (expected_is_core_reg) {
771        FlushRegWide(rl_src_expected.reg);
772        ClobberSReg(rl_src_expected.s_reg_low);
773        ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
774        rl_src_expected.location = kLocDalvikFrame;
775      }
776      if (new_value_is_core_reg) {
777        FlushRegWide(rl_src_new_value.reg);
778        ClobberSReg(rl_src_new_value.s_reg_low);
779        ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
780        rl_src_new_value.location = kLocDalvikFrame;
781      }
782    }
783  }
784
785  // Release store semantics, get the barrier out of the way.  TODO: revisit
786  GenMemBarrier(kStoreLoad);
787
788  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
789  RegLocation rl_new_value;
790  if (!is_long) {
791    rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
792  } else if (load_early) {
793    rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
794  }
795
796  if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
797    // Mark card for object assuming new value is stored.
798    MarkGCCard(rl_new_value.reg, rl_object.reg);
799  }
800
801  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
802
803  RegStorage r_ptr = rs_rARM_LR;
804  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
805
806  // Free now unneeded rl_object and rl_offset to give more temps.
807  ClobberSReg(rl_object.s_reg_low);
808  FreeTemp(rl_object.reg.GetReg());
809  ClobberSReg(rl_offset.s_reg_low);
810  FreeTemp(rl_offset.reg.GetReg());
811
812  RegLocation rl_expected;
813  if (!is_long) {
814    rl_expected = LoadValue(rl_src_expected, kCoreReg);
815  } else if (load_early) {
816    rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
817  } else {
818    // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
819    int low_reg = AllocTemp().GetReg();
820    int high_reg = AllocTemp().GetReg();
821    rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
822    rl_expected = rl_new_value;
823  }
824
825  // do {
826  //   tmp = [r_ptr] - expected;
827  // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
828  // result = tmp != 0;
829
830  RegStorage r_tmp = AllocTemp();
831  LIR* target = NewLIR0(kPseudoTargetLabel);
832
833  LIR* it = nullptr;
834  if (is_long) {
835    RegStorage r_tmp_high = AllocTemp();
836    if (!load_early) {
837      LoadValueDirectWide(rl_src_expected, rl_expected.reg);
838    }
839    NewLIR3(kThumb2Ldrexd, r_tmp.GetReg(), r_tmp_high.GetReg(), r_ptr.GetReg());
840    OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetLow());
841    OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHigh());
842    if (!load_early) {
843      LoadValueDirectWide(rl_src_new_value, rl_new_value.reg);
844    }
845    // Make sure we use ORR that sets the ccode
846    if (ARM_LOWREG(r_tmp.GetReg()) && ARM_LOWREG(r_tmp_high.GetReg())) {
847      NewLIR2(kThumbOrr, r_tmp.GetReg(), r_tmp_high.GetReg());
848    } else {
849      NewLIR4(kThumb2OrrRRRs, r_tmp.GetReg(), r_tmp.GetReg(), r_tmp_high.GetReg(), 0);
850    }
851    FreeTemp(r_tmp_high);  // Now unneeded
852
853    DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
854    it = OpIT(kCondEq, "T");
855    NewLIR4(kThumb2Strexd /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetLowReg(), rl_new_value.reg.GetHighReg(), r_ptr.GetReg());
856
857  } else {
858    NewLIR3(kThumb2Ldrex, r_tmp.GetReg(), r_ptr.GetReg(), 0);
859    OpRegReg(kOpSub, r_tmp, rl_expected.reg);
860    DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
861    it = OpIT(kCondEq, "T");
862    NewLIR4(kThumb2Strex /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
863  }
864
865  // Still one conditional left from OpIT(kCondEq, "T") from either branch
866  OpRegImm(kOpCmp /* eq */, r_tmp, 1);
867  OpEndIT(it);
868
869  OpCondBranch(kCondEq, target);
870
871  if (!load_early) {
872    FreeTemp(rl_expected.reg);  // Now unneeded.
873  }
874
875  // result := (tmp1 != 0) ? 0 : 1;
876  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
877  OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
878  DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
879  it = OpIT(kCondUlt, "");
880  LoadConstant(rl_result.reg, 0); /* cc */
881  FreeTemp(r_tmp);  // Now unneeded.
882  OpEndIT(it);     // Barrier to terminate OpIT.
883
884  StoreValue(rl_dest, rl_result);
885
886  // Now, restore lr to its non-temp status.
887  Clobber(rARM_LR);
888  UnmarkTemp(rARM_LR);
889  return true;
890}
891
892LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
893  return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
894}
895
896LIR* ArmMir2Lir::OpVldm(RegStorage r_base, int count) {
897  return NewLIR3(kThumb2Vldms, r_base.GetReg(), fr0, count);
898}
899
900LIR* ArmMir2Lir::OpVstm(RegStorage r_base, int count) {
901  return NewLIR3(kThumb2Vstms, r_base.GetReg(), fr0, count);
902}
903
904void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
905                                               RegLocation rl_result, int lit,
906                                               int first_bit, int second_bit) {
907  OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
908                   EncodeShift(kArmLsl, second_bit - first_bit));
909  if (first_bit != 0) {
910    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
911  }
912}
913
914void ArmMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
915  DCHECK(reg.IsPair());   // TODO: support k64BitSolo.
916  RegStorage t_reg = AllocTemp();
917  NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), reg.GetLowReg(), reg.GetHighReg(), 0);
918  FreeTemp(t_reg);
919  GenDivZeroCheck(kCondEq);
920}
921
922// Test suspend flag, return target of taken suspend branch
923LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
924  NewLIR2(kThumbSubRI8, rARM_SUSPEND, 1);
925  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
926}
927
928// Decrement register and branch on condition
929LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
930  // Combine sub & test using sub setflags encoding here
931  OpRegRegImm(kOpSub, reg, reg, 1);  // For value == 1, this should set flags.
932  DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
933  return OpCondBranch(c_code, target);
934}
935
936void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
937#if ANDROID_SMP != 0
938  // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
939  LIR* barrier = last_lir_insn_;
940
941  int dmb_flavor;
942  // TODO: revisit Arm barrier kinds
943  switch (barrier_kind) {
944    case kLoadStore: dmb_flavor = kISH; break;
945    case kLoadLoad: dmb_flavor = kISH; break;
946    case kStoreStore: dmb_flavor = kISHST; break;
947    case kStoreLoad: dmb_flavor = kISH; break;
948    default:
949      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
950      dmb_flavor = kSY;  // quiet gcc.
951      break;
952  }
953
954  // If the same barrier already exists, don't generate another.
955  if (barrier == nullptr
956      || (barrier != nullptr && (barrier->opcode != kThumb2Dmb || barrier->operands[0] != dmb_flavor))) {
957    barrier = NewLIR1(kThumb2Dmb, dmb_flavor);
958  }
959
960  // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
961  DCHECK(!barrier->flags.use_def_invalid);
962  barrier->u.m.def_mask = ENCODE_ALL;
963#endif
964}
965
966void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
967  rl_src = LoadValueWide(rl_src, kCoreReg);
968  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
969  RegStorage z_reg = AllocTemp();
970  LoadConstantNoClobber(z_reg, 0);
971  // Check for destructive overlap
972  if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
973    RegStorage t_reg = AllocTemp();
974    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
975    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
976    FreeTemp(t_reg);
977  } else {
978    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
979    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, rl_src.reg.GetHigh());
980  }
981  FreeTemp(z_reg);
982  StoreValueWide(rl_dest, rl_result);
983}
984
985void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
986                            RegLocation rl_src1, RegLocation rl_src2) {
987    /*
988     * tmp1     = src1.hi * src2.lo;  // src1.hi is no longer needed
989     * dest     = src1.lo * src2.lo;
990     * tmp1    += src1.lo * src2.hi;
991     * dest.hi += tmp1;
992     *
993     * To pull off inline multiply, we have a worst-case requirement of 7 temporary
994     * registers.  Normally for Arm, we get 5.  We can get to 6 by including
995     * lr in the temp set.  The only problematic case is all operands and result are
996     * distinct, and none have been promoted.  In that case, we can succeed by aggressively
997     * freeing operand temp registers after they are no longer needed.  All other cases
998     * can proceed normally.  We'll just punt on the case of the result having a misaligned
999     * overlap with either operand and send that case to a runtime handler.
1000     */
1001    RegLocation rl_result;
1002    if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
1003      ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul);
1004      FlushAllRegs();
1005      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
1006      rl_result = GetReturnWide(false);
1007      StoreValueWide(rl_dest, rl_result);
1008      return;
1009    }
1010
1011    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1012    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1013
1014    int reg_status = 0;
1015    RegStorage res_lo;
1016    RegStorage res_hi;
1017    bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
1018        !IsTemp(rl_dest.reg.GetLowReg()) && !IsTemp(rl_dest.reg.GetHighReg());
1019    bool src1_promoted = !IsTemp(rl_src1.reg.GetLowReg()) && !IsTemp(rl_src1.reg.GetHighReg());
1020    bool src2_promoted = !IsTemp(rl_src2.reg.GetLowReg()) && !IsTemp(rl_src2.reg.GetHighReg());
1021    // Check if rl_dest is *not* either operand and we have enough temp registers.
1022    if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
1023        (dest_promoted || src1_promoted || src2_promoted)) {
1024      // In this case, we do not need to manually allocate temp registers for result.
1025      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1026      res_lo = rl_result.reg.GetLow();
1027      res_hi = rl_result.reg.GetHigh();
1028    } else {
1029      res_lo = AllocTemp();
1030      if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
1031        // In this case, we have enough temp registers to be allocated for result.
1032        res_hi = AllocTemp();
1033        reg_status = 1;
1034      } else {
1035        // In this case, all temps are now allocated.
1036        // res_hi will be allocated after we can free src1_hi.
1037        reg_status = 2;
1038      }
1039    }
1040
1041    // Temporarily add LR to the temp pool, and assign it to tmp1
1042    MarkTemp(rARM_LR);
1043    FreeTemp(rARM_LR);
1044    RegStorage tmp1 = rs_rARM_LR;
1045    LockTemp(rARM_LR);
1046
1047    if (rl_src1.reg == rl_src2.reg) {
1048      DCHECK(res_hi.Valid());
1049      DCHECK(res_lo.Valid());
1050      NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
1051      NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
1052              rl_src1.reg.GetLowReg());
1053      OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
1054    } else {
1055      NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
1056      if (reg_status == 2) {
1057        DCHECK(!res_hi.Valid());
1058        DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
1059        DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
1060        FreeTemp(rl_src1.reg.GetHighReg());
1061        res_hi = AllocTemp();
1062      }
1063      DCHECK(res_hi.Valid());
1064      DCHECK(res_lo.Valid());
1065      NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
1066              rl_src1.reg.GetLowReg());
1067      NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
1068              tmp1.GetReg());
1069      NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
1070      if (reg_status == 2) {
1071        // Clobber rl_src1 since it was corrupted.
1072        FreeTemp(rl_src1.reg);
1073        Clobber(rl_src1.reg);
1074      }
1075    }
1076
1077    // Now, restore lr to its non-temp status.
1078    FreeTemp(tmp1);
1079    Clobber(rARM_LR);
1080    UnmarkTemp(rARM_LR);
1081
1082    if (reg_status != 0) {
1083      // We had manually allocated registers for rl_result.
1084      // Now construct a RegLocation.
1085      rl_result = GetReturnWide(false);  // Just using as a template.
1086      rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
1087    }
1088
1089    StoreValueWide(rl_dest, rl_result);
1090}
1091
1092void ArmMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1093                            RegLocation rl_src2) {
1094  LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
1095}
1096
1097void ArmMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1098                            RegLocation rl_src2) {
1099  LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
1100}
1101
1102void ArmMir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1103                            RegLocation rl_src2) {
1104  LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
1105}
1106
1107void ArmMir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1108                           RegLocation rl_src2) {
1109  LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
1110}
1111
1112void ArmMir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1113                            RegLocation rl_src2) {
1114  LOG(FATAL) << "Unexpected use of genXoLong for Arm";
1115}
1116
1117/*
1118 * Generate array load
1119 */
1120void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
1121                             RegLocation rl_index, RegLocation rl_dest, int scale) {
1122  RegisterClass reg_class = oat_reg_class_by_size(size);
1123  int len_offset = mirror::Array::LengthOffset().Int32Value();
1124  int data_offset;
1125  RegLocation rl_result;
1126  bool constant_index = rl_index.is_const;
1127  rl_array = LoadValue(rl_array, kCoreReg);
1128  if (!constant_index) {
1129    rl_index = LoadValue(rl_index, kCoreReg);
1130  }
1131
1132  if (rl_dest.wide) {
1133    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1134  } else {
1135    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1136  }
1137
1138  // If index is constant, just fold it into the data offset
1139  if (constant_index) {
1140    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1141  }
1142
1143  /* null object? */
1144  GenNullCheck(rl_array.reg, opt_flags);
1145
1146  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1147  RegStorage reg_len;
1148  if (needs_range_check) {
1149    reg_len = AllocTemp();
1150    /* Get len */
1151    Load32Disp(rl_array.reg, len_offset, reg_len);
1152    MarkPossibleNullPointerException(opt_flags);
1153  } else {
1154    ForceImplicitNullCheck(rl_array.reg, opt_flags);
1155  }
1156  if (rl_dest.wide || rl_dest.fp || constant_index) {
1157    RegStorage reg_ptr;
1158    if (constant_index) {
1159      reg_ptr = rl_array.reg;  // NOTE: must not alter reg_ptr in constant case.
1160    } else {
1161      // No special indexed operation, lea + load w/ displacement
1162      reg_ptr = AllocTemp();
1163      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
1164      FreeTemp(rl_index.reg.GetReg());
1165    }
1166    rl_result = EvalLoc(rl_dest, reg_class, true);
1167
1168    if (needs_range_check) {
1169      if (constant_index) {
1170        GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
1171      } else {
1172        GenRegRegCheck(kCondLs, reg_len, rl_index.reg, kThrowArrayBounds);
1173      }
1174      FreeTemp(reg_len);
1175    }
1176    if (rl_dest.wide) {
1177      LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg, INVALID_SREG);
1178      MarkPossibleNullPointerException(opt_flags);
1179      if (!constant_index) {
1180        FreeTemp(reg_ptr);
1181      }
1182      StoreValueWide(rl_dest, rl_result);
1183    } else {
1184      LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG);
1185      MarkPossibleNullPointerException(opt_flags);
1186      if (!constant_index) {
1187        FreeTemp(reg_ptr);
1188      }
1189      StoreValue(rl_dest, rl_result);
1190    }
1191  } else {
1192    // Offset base, then use indexed load
1193    RegStorage reg_ptr = AllocTemp();
1194    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1195    FreeTemp(rl_array.reg.GetReg());
1196    rl_result = EvalLoc(rl_dest, reg_class, true);
1197
1198    if (needs_range_check) {
1199      GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
1200      FreeTemp(reg_len);
1201    }
1202    LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
1203    MarkPossibleNullPointerException(opt_flags);
1204    FreeTemp(reg_ptr);
1205    StoreValue(rl_dest, rl_result);
1206  }
1207}
1208
1209/*
1210 * Generate array store
1211 *
1212 */
1213void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1214                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
1215  RegisterClass reg_class = oat_reg_class_by_size(size);
1216  int len_offset = mirror::Array::LengthOffset().Int32Value();
1217  bool constant_index = rl_index.is_const;
1218
1219  int data_offset;
1220  if (size == k64 || size == kDouble) {
1221    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1222  } else {
1223    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1224  }
1225
1226  // If index is constant, just fold it into the data offset.
1227  if (constant_index) {
1228    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1229  }
1230
1231  rl_array = LoadValue(rl_array, kCoreReg);
1232  if (!constant_index) {
1233    rl_index = LoadValue(rl_index, kCoreReg);
1234  }
1235
1236  RegStorage reg_ptr;
1237  bool allocated_reg_ptr_temp = false;
1238  if (constant_index) {
1239    reg_ptr = rl_array.reg;
1240  } else if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
1241    Clobber(rl_array.reg.GetReg());
1242    reg_ptr = rl_array.reg;
1243  } else {
1244    allocated_reg_ptr_temp = true;
1245    reg_ptr = AllocTemp();
1246  }
1247
1248  /* null object? */
1249  GenNullCheck(rl_array.reg, opt_flags);
1250
1251  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1252  RegStorage reg_len;
1253  if (needs_range_check) {
1254    reg_len = AllocTemp();
1255    // NOTE: max live temps(4) here.
1256    /* Get len */
1257    Load32Disp(rl_array.reg, len_offset, reg_len);
1258    MarkPossibleNullPointerException(opt_flags);
1259  } else {
1260    ForceImplicitNullCheck(rl_array.reg, opt_flags);
1261  }
1262  /* at this point, reg_ptr points to array, 2 live temps */
1263  if (rl_src.wide || rl_src.fp || constant_index) {
1264    if (rl_src.wide) {
1265      rl_src = LoadValueWide(rl_src, reg_class);
1266    } else {
1267      rl_src = LoadValue(rl_src, reg_class);
1268    }
1269    if (!constant_index) {
1270      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
1271    }
1272    if (needs_range_check) {
1273      if (constant_index) {
1274        GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
1275      } else {
1276        GenRegRegCheck(kCondLs, reg_len, rl_index.reg, kThrowArrayBounds);
1277      }
1278      FreeTemp(reg_len);
1279    }
1280
1281    if (rl_src.wide) {
1282      StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg);
1283    } else {
1284      StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
1285    }
1286    MarkPossibleNullPointerException(opt_flags);
1287  } else {
1288    /* reg_ptr -> array data */
1289    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1290    rl_src = LoadValue(rl_src, reg_class);
1291    if (needs_range_check) {
1292      GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
1293      FreeTemp(reg_len);
1294    }
1295    StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
1296    MarkPossibleNullPointerException(opt_flags);
1297  }
1298  if (allocated_reg_ptr_temp) {
1299    FreeTemp(reg_ptr);
1300  }
1301  if (card_mark) {
1302    MarkGCCard(rl_src.reg, rl_array.reg);
1303  }
1304}
1305
1306
1307void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
1308                                   RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
1309  rl_src = LoadValueWide(rl_src, kCoreReg);
1310  // Per spec, we only care about low 6 bits of shift amount.
1311  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
1312  if (shift_amount == 0) {
1313    StoreValueWide(rl_dest, rl_src);
1314    return;
1315  }
1316  if (BadOverlap(rl_src, rl_dest)) {
1317    GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
1318    return;
1319  }
1320  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1321  switch (opcode) {
1322    case Instruction::SHL_LONG:
1323    case Instruction::SHL_LONG_2ADDR:
1324      if (shift_amount == 1) {
1325        OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), rl_src.reg.GetLow());
1326        OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), rl_src.reg.GetHigh());
1327      } else if (shift_amount == 32) {
1328        OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg);
1329        LoadConstant(rl_result.reg.GetLow(), 0);
1330      } else if (shift_amount > 31) {
1331        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetLow(), shift_amount - 32);
1332        LoadConstant(rl_result.reg.GetLow(), 0);
1333      } else {
1334        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1335        OpRegRegRegShift(kOpOr, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), rl_src.reg.GetLow(),
1336                         EncodeShift(kArmLsr, 32 - shift_amount));
1337        OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
1338      }
1339      break;
1340    case Instruction::SHR_LONG:
1341    case Instruction::SHR_LONG_2ADDR:
1342      if (shift_amount == 32) {
1343        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
1344        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
1345      } else if (shift_amount > 31) {
1346        OpRegRegImm(kOpAsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
1347        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
1348      } else {
1349        RegStorage t_reg = AllocTemp();
1350        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
1351        OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
1352                         EncodeShift(kArmLsl, 32 - shift_amount));
1353        FreeTemp(t_reg);
1354        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1355      }
1356      break;
1357    case Instruction::USHR_LONG:
1358    case Instruction::USHR_LONG_2ADDR:
1359      if (shift_amount == 32) {
1360        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
1361        LoadConstant(rl_result.reg.GetHigh(), 0);
1362      } else if (shift_amount > 31) {
1363        OpRegRegImm(kOpLsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
1364        LoadConstant(rl_result.reg.GetHigh(), 0);
1365      } else {
1366        RegStorage t_reg = AllocTemp();
1367        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
1368        OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
1369                         EncodeShift(kArmLsl, 32 - shift_amount));
1370        FreeTemp(t_reg);
1371        OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1372      }
1373      break;
1374    default:
1375      LOG(FATAL) << "Unexpected case";
1376  }
1377  StoreValueWide(rl_dest, rl_result);
1378}
1379
1380void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
1381                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
1382  if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
1383    if (!rl_src2.is_const) {
1384      // Don't bother with special handling for subtract from immediate.
1385      GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1386      return;
1387    }
1388  } else {
1389    // Normalize
1390    if (!rl_src2.is_const) {
1391      DCHECK(rl_src1.is_const);
1392      std::swap(rl_src1, rl_src2);
1393    }
1394  }
1395  if (BadOverlap(rl_src1, rl_dest)) {
1396    GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1397    return;
1398  }
1399  DCHECK(rl_src2.is_const);
1400  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1401  uint32_t val_lo = Low32Bits(val);
1402  uint32_t val_hi = High32Bits(val);
1403  int32_t mod_imm_lo = ModifiedImmediate(val_lo);
1404  int32_t mod_imm_hi = ModifiedImmediate(val_hi);
1405
1406  // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
1407  switch (opcode) {
1408    case Instruction::ADD_LONG:
1409    case Instruction::ADD_LONG_2ADDR:
1410    case Instruction::SUB_LONG:
1411    case Instruction::SUB_LONG_2ADDR:
1412      if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
1413        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1414        return;
1415      }
1416      break;
1417    default:
1418      break;
1419  }
1420  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1421  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1422  // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
1423  switch (opcode) {
1424    case Instruction::ADD_LONG:
1425    case Instruction::ADD_LONG_2ADDR:
1426      NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
1427      NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
1428      break;
1429    case Instruction::OR_LONG:
1430    case Instruction::OR_LONG_2ADDR:
1431      if ((val_lo != 0) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
1432        OpRegRegImm(kOpOr, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1433      }
1434      if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
1435        OpRegRegImm(kOpOr, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1436      }
1437      break;
1438    case Instruction::XOR_LONG:
1439    case Instruction::XOR_LONG_2ADDR:
1440      OpRegRegImm(kOpXor, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1441      OpRegRegImm(kOpXor, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1442      break;
1443    case Instruction::AND_LONG:
1444    case Instruction::AND_LONG_2ADDR:
1445      if ((val_lo != 0xffffffff) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
1446        OpRegRegImm(kOpAnd, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1447      }
1448      if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
1449        OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1450      }
1451      break;
1452    case Instruction::SUB_LONG_2ADDR:
1453    case Instruction::SUB_LONG:
1454      NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
1455      NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
1456      break;
1457    default:
1458      LOG(FATAL) << "Unexpected opcode " << opcode;
1459  }
1460  StoreValueWide(rl_dest, rl_result);
1461}
1462
1463}  // namespace art
1464