int_arm.cc revision b14329f90f725af0f67c45dfcb94933a426d63ce
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm_lir.h"
20#include "codegen_arm.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "mirror/array.h"
24
25namespace art {
26
27LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
28  OpRegReg(kOpCmp, src1, src2);
29  return OpCondBranch(cond, target);
30}
31
32/*
33 * Generate a Thumb2 IT instruction, which can nullify up to
34 * four subsequent instructions based on a condition and its
35 * inverse.  The condition applies to the first instruction, which
36 * is executed if the condition is met.  The string "guide" consists
37 * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
38 * A "T" means the instruction is executed if the condition is
39 * met, and an "E" means the instruction is executed if the condition
40 * is not met.
41 */
42LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
43  int mask;
44  int mask3 = 0;
45  int mask2 = 0;
46  int mask1 = 0;
47  ArmConditionCode code = ArmConditionEncoding(ccode);
48  int cond_bit = code & 1;
49  int alt_bit = cond_bit ^ 1;
50
51  // Note: case fallthroughs intentional
52  switch (strlen(guide)) {
53    case 3:
54      mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
55    case 2:
56      mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
57    case 1:
58      mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
59      break;
60    case 0:
61      break;
62    default:
63      LOG(FATAL) << "OAT: bad case in OpIT";
64  }
65  mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
66       (1 << (3 - strlen(guide)));
67  return NewLIR2(kThumb2It, code, mask);
68}
69
70void ArmMir2Lir::UpdateIT(LIR* it, const char* new_guide) {
71  int mask;
72  int mask3 = 0;
73  int mask2 = 0;
74  int mask1 = 0;
75  ArmConditionCode code = static_cast<ArmConditionCode>(it->operands[0]);
76  int cond_bit = code & 1;
77  int alt_bit = cond_bit ^ 1;
78
79  // Note: case fallthroughs intentional
80  switch (strlen(new_guide)) {
81    case 3:
82      mask1 = (new_guide[2] == 'T') ? cond_bit : alt_bit;
83    case 2:
84      mask2 = (new_guide[1] == 'T') ? cond_bit : alt_bit;
85    case 1:
86      mask3 = (new_guide[0] == 'T') ? cond_bit : alt_bit;
87      break;
88    case 0:
89      break;
90    default:
91      LOG(FATAL) << "OAT: bad case in UpdateIT";
92  }
93  mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
94      (1 << (3 - strlen(new_guide)));
95  it->operands[1] = mask;
96}
97
98void ArmMir2Lir::OpEndIT(LIR* it) {
99  // TODO: use the 'it' pointer to do some checks with the LIR, for example
100  //       we could check that the number of instructions matches the mask
101  //       in the IT instruction.
102  CHECK(it != nullptr);
103  GenBarrier();
104}
105
106/*
107 * 64-bit 3way compare function.
108 *     mov   rX, #-1
109 *     cmp   op1hi, op2hi
110 *     blt   done
111 *     bgt   flip
112 *     sub   rX, op1lo, op2lo (treat as unsigned)
113 *     beq   done
114 *     ite   hi
115 *     mov(hi)   rX, #-1
116 *     mov(!hi)  rX, #1
117 * flip:
118 *     neg   rX
119 * done:
120 */
121void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
122  LIR* target1;
123  LIR* target2;
124  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
125  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
126  RegStorage t_reg = AllocTemp();
127  LoadConstant(t_reg, -1);
128  OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
129  LIR* branch1 = OpCondBranch(kCondLt, NULL);
130  LIR* branch2 = OpCondBranch(kCondGt, NULL);
131  OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
132  LIR* branch3 = OpCondBranch(kCondEq, NULL);
133
134  LIR* it = OpIT(kCondHi, "E");
135  NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
136  LoadConstant(t_reg, 1);
137  OpEndIT(it);
138
139  target2 = NewLIR0(kPseudoTargetLabel);
140  OpRegReg(kOpNeg, t_reg, t_reg);
141
142  target1 = NewLIR0(kPseudoTargetLabel);
143
144  RegLocation rl_temp = LocCReturn();  // Just using as template, will change
145  rl_temp.reg.SetReg(t_reg.GetReg());
146  StoreValue(rl_dest, rl_temp);
147  FreeTemp(t_reg);
148
149  branch1->target = target1;
150  branch2->target = target2;
151  branch3->target = branch1->target;
152}
153
154void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
155                                          int64_t val, ConditionCode ccode) {
156  int32_t val_lo = Low32Bits(val);
157  int32_t val_hi = High32Bits(val);
158  DCHECK_GE(ModifiedImmediate(val_lo), 0);
159  DCHECK_GE(ModifiedImmediate(val_hi), 0);
160  LIR* taken = &block_label_list_[bb->taken];
161  LIR* not_taken = &block_label_list_[bb->fall_through];
162  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
163  RegStorage low_reg = rl_src1.reg.GetLow();
164  RegStorage high_reg = rl_src1.reg.GetHigh();
165
166  if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
167    RegStorage t_reg = AllocTemp();
168    NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), low_reg.GetReg(), high_reg.GetReg(), 0);
169    FreeTemp(t_reg);
170    OpCondBranch(ccode, taken);
171    return;
172  }
173
174  switch (ccode) {
175    case kCondEq:
176    case kCondNe:
177      OpCmpImmBranch(kCondNe, high_reg, val_hi, (ccode == kCondEq) ? not_taken : taken);
178      break;
179    case kCondLt:
180      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
181      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
182      ccode = kCondUlt;
183      break;
184    case kCondLe:
185      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
186      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
187      ccode = kCondLs;
188      break;
189    case kCondGt:
190      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
191      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
192      ccode = kCondHi;
193      break;
194    case kCondGe:
195      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
196      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
197      ccode = kCondUge;
198      break;
199    default:
200      LOG(FATAL) << "Unexpected ccode: " << ccode;
201  }
202  OpCmpImmBranch(ccode, low_reg, val_lo, taken);
203}
204
205void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
206  RegLocation rl_result;
207  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
208  RegLocation rl_dest = mir_graph_->GetDest(mir);
209  rl_src = LoadValue(rl_src, kCoreReg);
210  ConditionCode ccode = mir->meta.ccode;
211  if (mir->ssa_rep->num_uses == 1) {
212    // CONST case
213    int true_val = mir->dalvikInsn.vB;
214    int false_val = mir->dalvikInsn.vC;
215    rl_result = EvalLoc(rl_dest, kCoreReg, true);
216    // Change kCondNe to kCondEq for the special cases below.
217    if (ccode == kCondNe) {
218      ccode = kCondEq;
219      std::swap(true_val, false_val);
220    }
221    bool cheap_false_val = InexpensiveConstantInt(false_val);
222    if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
223      OpRegRegImm(kOpSub, rl_result.reg, rl_src.reg, -true_val);
224      DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
225      LIR* it = OpIT(true_val == 0 ? kCondNe : kCondUge, "");
226      LoadConstant(rl_result.reg, false_val);
227      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
228    } else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
229      OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, 1);
230      DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
231      LIR* it = OpIT(kCondLs, "");
232      LoadConstant(rl_result.reg, false_val);
233      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
234    } else if (cheap_false_val && InexpensiveConstantInt(true_val)) {
235      OpRegImm(kOpCmp, rl_src.reg, 0);
236      LIR* it = OpIT(ccode, "E");
237      LoadConstant(rl_result.reg, true_val);
238      LoadConstant(rl_result.reg, false_val);
239      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
240    } else {
241      // Unlikely case - could be tuned.
242      RegStorage t_reg1 = AllocTemp();
243      RegStorage t_reg2 = AllocTemp();
244      LoadConstant(t_reg1, true_val);
245      LoadConstant(t_reg2, false_val);
246      OpRegImm(kOpCmp, rl_src.reg, 0);
247      LIR* it = OpIT(ccode, "E");
248      OpRegCopy(rl_result.reg, t_reg1);
249      OpRegCopy(rl_result.reg, t_reg2);
250      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
251    }
252  } else {
253    // MOVE case
254    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
255    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
256    rl_true = LoadValue(rl_true, kCoreReg);
257    rl_false = LoadValue(rl_false, kCoreReg);
258    rl_result = EvalLoc(rl_dest, kCoreReg, true);
259    OpRegImm(kOpCmp, rl_src.reg, 0);
260    LIR* it = nullptr;
261    if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {  // Is the "true" case already in place?
262      it = OpIT(NegateComparison(ccode), "");
263      OpRegCopy(rl_result.reg, rl_false.reg);
264    } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {  // False case in place?
265      it = OpIT(ccode, "");
266      OpRegCopy(rl_result.reg, rl_true.reg);
267    } else {  // Normal - select between the two.
268      it = OpIT(ccode, "E");
269      OpRegCopy(rl_result.reg, rl_true.reg);
270      OpRegCopy(rl_result.reg, rl_false.reg);
271    }
272    OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
273  }
274  StoreValue(rl_dest, rl_result);
275}
276
277void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
278  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
279  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
280  // Normalize such that if either operand is constant, src2 will be constant.
281  ConditionCode ccode = mir->meta.ccode;
282  if (rl_src1.is_const) {
283    std::swap(rl_src1, rl_src2);
284    ccode = FlipComparisonOrder(ccode);
285  }
286  if (rl_src2.is_const) {
287    RegLocation rl_temp = UpdateLocWide(rl_src2);
288    // Do special compare/branch against simple const operand if not already in registers.
289    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
290    if ((rl_temp.location != kLocPhysReg) &&
291        ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
292      GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
293      return;
294    }
295  }
296  LIR* taken = &block_label_list_[bb->taken];
297  LIR* not_taken = &block_label_list_[bb->fall_through];
298  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
299  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
300  OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
301  switch (ccode) {
302    case kCondEq:
303      OpCondBranch(kCondNe, not_taken);
304      break;
305    case kCondNe:
306      OpCondBranch(kCondNe, taken);
307      break;
308    case kCondLt:
309      OpCondBranch(kCondLt, taken);
310      OpCondBranch(kCondGt, not_taken);
311      ccode = kCondUlt;
312      break;
313    case kCondLe:
314      OpCondBranch(kCondLt, taken);
315      OpCondBranch(kCondGt, not_taken);
316      ccode = kCondLs;
317      break;
318    case kCondGt:
319      OpCondBranch(kCondGt, taken);
320      OpCondBranch(kCondLt, not_taken);
321      ccode = kCondHi;
322      break;
323    case kCondGe:
324      OpCondBranch(kCondGt, taken);
325      OpCondBranch(kCondLt, not_taken);
326      ccode = kCondUge;
327      break;
328    default:
329      LOG(FATAL) << "Unexpected ccode: " << ccode;
330  }
331  OpRegReg(kOpCmp, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
332  OpCondBranch(ccode, taken);
333}
334
335/*
336 * Generate a register comparison to an immediate and branch.  Caller
337 * is responsible for setting branch target field.
338 */
339LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
340  LIR* branch;
341  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
342  /*
343   * A common use of OpCmpImmBranch is for null checks, and using the Thumb 16-bit
344   * compare-and-branch if zero is ideal if it will reach.  However, because null checks
345   * branch forward to a slow path, they will frequently not reach - and thus have to
346   * be converted to a long form during assembly (which will trigger another assembly
347   * pass).  Here we estimate the branch distance for checks, and if large directly
348   * generate the long form in an attempt to avoid an extra assembly pass.
349   * TODO: consider interspersing slowpaths in code following unconditional branches.
350   */
351  bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
352  skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
353  if (!skip && reg.Low8() && (check_value == 0) &&
354     ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
355    branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
356                     reg.GetReg(), 0);
357  } else {
358    OpRegImm(kOpCmp, reg, check_value);
359    branch = NewLIR2(kThumbBCond, 0, arm_cond);
360  }
361  branch->target = target;
362  return branch;
363}
364
365LIR* ArmMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
366  LIR* res;
367  int opcode;
368  // If src or dest is a pair, we'll be using low reg.
369  if (r_dest.IsPair()) {
370    r_dest = r_dest.GetLow();
371  }
372  if (r_src.IsPair()) {
373    r_src = r_src.GetLow();
374  }
375  if (r_dest.IsFloat() || r_src.IsFloat())
376    return OpFpRegCopy(r_dest, r_src);
377  if (r_dest.Low8() && r_src.Low8())
378    opcode = kThumbMovRR;
379  else if (!r_dest.Low8() && !r_src.Low8())
380     opcode = kThumbMovRR_H2H;
381  else if (r_dest.Low8())
382     opcode = kThumbMovRR_H2L;
383  else
384     opcode = kThumbMovRR_L2H;
385  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
386  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
387    res->flags.is_nop = true;
388  }
389  return res;
390}
391
392void ArmMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
393  if (r_dest != r_src) {
394    LIR* res = OpRegCopyNoInsert(r_dest, r_src);
395    AppendLIR(res);
396  }
397}
398
399void ArmMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
400  if (r_dest != r_src) {
401    bool dest_fp = r_dest.IsFloat();
402    bool src_fp = r_src.IsFloat();
403    DCHECK(r_dest.Is64Bit());
404    DCHECK(r_src.Is64Bit());
405    if (dest_fp) {
406      if (src_fp) {
407        OpRegCopy(r_dest, r_src);
408      } else {
409        NewLIR3(kThumb2Fmdrr, r_dest.GetReg(), r_src.GetLowReg(), r_src.GetHighReg());
410      }
411    } else {
412      if (src_fp) {
413        NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_src.GetReg());
414      } else {
415        // Handle overlap
416        if (r_src.GetHighReg() == r_dest.GetLowReg()) {
417          DCHECK_NE(r_src.GetLowReg(), r_dest.GetHighReg());
418          OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
419          OpRegCopy(r_dest.GetLow(), r_src.GetLow());
420        } else {
421          OpRegCopy(r_dest.GetLow(), r_src.GetLow());
422          OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
423        }
424      }
425    }
426  }
427}
428
429// Table of magic divisors
430struct MagicTable {
431  uint32_t magic;
432  uint32_t shift;
433  DividePattern pattern;
434};
435
436static const MagicTable magic_table[] = {
437  {0, 0, DivideNone},        // 0
438  {0, 0, DivideNone},        // 1
439  {0, 0, DivideNone},        // 2
440  {0x55555556, 0, Divide3},  // 3
441  {0, 0, DivideNone},        // 4
442  {0x66666667, 1, Divide5},  // 5
443  {0x2AAAAAAB, 0, Divide3},  // 6
444  {0x92492493, 2, Divide7},  // 7
445  {0, 0, DivideNone},        // 8
446  {0x38E38E39, 1, Divide5},  // 9
447  {0x66666667, 2, Divide5},  // 10
448  {0x2E8BA2E9, 1, Divide5},  // 11
449  {0x2AAAAAAB, 1, Divide5},  // 12
450  {0x4EC4EC4F, 2, Divide5},  // 13
451  {0x92492493, 3, Divide7},  // 14
452  {0x88888889, 3, Divide7},  // 15
453};
454
455// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
456bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
457                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
458  if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
459    return false;
460  }
461  DividePattern pattern = magic_table[lit].pattern;
462  if (pattern == DivideNone) {
463    return false;
464  }
465
466  RegStorage r_magic = AllocTemp();
467  LoadConstant(r_magic, magic_table[lit].magic);
468  rl_src = LoadValue(rl_src, kCoreReg);
469  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
470  RegStorage r_hi = AllocTemp();
471  RegStorage r_lo = AllocTemp();
472
473  // rl_dest and rl_src might overlap.
474  // Reuse r_hi to save the div result for reminder case.
475  RegStorage r_div_result = is_div ? rl_result.reg : r_hi;
476
477  NewLIR4(kThumb2Smull, r_lo.GetReg(), r_hi.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
478  switch (pattern) {
479    case Divide3:
480      OpRegRegRegShift(kOpSub, r_div_result, r_hi, rl_src.reg, EncodeShift(kArmAsr, 31));
481      break;
482    case Divide5:
483      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
484      OpRegRegRegShift(kOpRsub, r_div_result, r_lo, r_hi,
485                       EncodeShift(kArmAsr, magic_table[lit].shift));
486      break;
487    case Divide7:
488      OpRegReg(kOpAdd, r_hi, rl_src.reg);
489      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
490      OpRegRegRegShift(kOpRsub, r_div_result, r_lo, r_hi,
491                       EncodeShift(kArmAsr, magic_table[lit].shift));
492      break;
493    default:
494      LOG(FATAL) << "Unexpected pattern: " << pattern;
495  }
496
497  if (!is_div) {
498    // div_result = src / lit
499    // tmp1 = div_result * lit
500    // dest = src - tmp1
501    RegStorage tmp1 = r_lo;
502    EasyMultiplyOp ops[2];
503
504    bool canEasyMultiply = GetEasyMultiplyTwoOps(lit, ops);
505    DCHECK_NE(canEasyMultiply, false);
506
507    GenEasyMultiplyTwoOps(tmp1, r_div_result, ops);
508    OpRegRegReg(kOpSub, rl_result.reg, rl_src.reg, tmp1);
509  }
510
511  StoreValue(rl_dest, rl_result);
512  return true;
513}
514
515// Try to convert *lit to 1 RegRegRegShift/RegRegShift form.
516bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) {
517  if (IsPowerOfTwo(lit)) {
518    op->op = kOpLsl;
519    op->shift = LowestSetBit(lit);
520    return true;
521  }
522
523  if (IsPowerOfTwo(lit - 1)) {
524    op->op = kOpAdd;
525    op->shift = LowestSetBit(lit - 1);
526    return true;
527  }
528
529  if (IsPowerOfTwo(lit + 1)) {
530    op->op = kOpRsub;
531    op->shift = LowestSetBit(lit + 1);
532    return true;
533  }
534
535  op->op = kOpInvalid;
536  op->shift = 0;
537  return false;
538}
539
540// Try to convert *lit to 1~2 RegRegRegShift/RegRegShift forms.
541bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
542  GetEasyMultiplyOp(lit, &ops[0]);
543  if (GetEasyMultiplyOp(lit, &ops[0])) {
544    ops[1].op = kOpInvalid;
545    ops[1].shift = 0;
546    return true;
547  }
548
549  int lit1 = lit;
550  uint32_t shift = LowestSetBit(lit1);
551  if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
552    ops[1].op = kOpLsl;
553    ops[1].shift = shift;
554    return true;
555  }
556
557  lit1 = lit - 1;
558  shift = LowestSetBit(lit1);
559  if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
560    ops[1].op = kOpAdd;
561    ops[1].shift = shift;
562    return true;
563  }
564
565  lit1 = lit + 1;
566  shift = LowestSetBit(lit1);
567  if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
568    ops[1].op = kOpRsub;
569    ops[1].shift = shift;
570    return true;
571  }
572
573  return false;
574}
575
576// Generate instructions to do multiply.
577// Additional temporary register is required,
578// if it need to generate 2 instructions and src/dest overlap.
579void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops) {
580  // tmp1 = ( src << shift1) + [ src | -src | 0 ]
581  // dest = (tmp1 << shift2) + [ src | -src | 0 ]
582
583  RegStorage r_tmp1;
584  if (ops[1].op == kOpInvalid) {
585    r_tmp1 = r_dest;
586  } else if (r_dest.GetReg() != r_src.GetReg()) {
587    r_tmp1 = r_dest;
588  } else {
589    r_tmp1 = AllocTemp();
590  }
591
592  switch (ops[0].op) {
593    case kOpLsl:
594      OpRegRegImm(kOpLsl, r_tmp1, r_src, ops[0].shift);
595      break;
596    case kOpAdd:
597      OpRegRegRegShift(kOpAdd, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift));
598      break;
599    case kOpRsub:
600      OpRegRegRegShift(kOpRsub, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift));
601      break;
602    default:
603      DCHECK_EQ(ops[0].op, kOpInvalid);
604      break;
605  }
606
607  switch (ops[1].op) {
608    case kOpInvalid:
609      return;
610    case kOpLsl:
611      OpRegRegImm(kOpLsl, r_dest, r_tmp1, ops[1].shift);
612      break;
613    case kOpAdd:
614      OpRegRegRegShift(kOpAdd, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift));
615      break;
616    case kOpRsub:
617      OpRegRegRegShift(kOpRsub, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift));
618      break;
619    default:
620      LOG(FATAL) << "Unexpected opcode passed to GenEasyMultiplyTwoOps";
621      break;
622  }
623}
624
625bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
626  EasyMultiplyOp ops[2];
627
628  if (!GetEasyMultiplyTwoOps(lit, ops)) {
629    return false;
630  }
631
632  rl_src = LoadValue(rl_src, kCoreReg);
633  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
634
635  GenEasyMultiplyTwoOps(rl_result.reg, rl_src.reg, ops);
636  StoreValue(rl_dest, rl_result);
637  return true;
638}
639
640RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
641                      RegLocation rl_src2, bool is_div, bool check_zero) {
642  LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
643  return rl_dest;
644}
645
646RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
647  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
648  return rl_dest;
649}
650
651RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
652  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
653
654  // Put the literal in a temp.
655  RegStorage lit_temp = AllocTemp();
656  LoadConstant(lit_temp, lit);
657  // Use the generic case for div/rem with arg2 in a register.
658  // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
659  rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
660  FreeTemp(lit_temp);
661
662  return rl_result;
663}
664
665RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
666                                  bool is_div) {
667  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
668  if (is_div) {
669    // Simple case, use sdiv instruction.
670    OpRegRegReg(kOpDiv, rl_result.reg, reg1, reg2);
671  } else {
672    // Remainder case, use the following code:
673    // temp = reg1 / reg2      - integer division
674    // temp = temp * reg2
675    // dest = reg1 - temp
676
677    RegStorage temp = AllocTemp();
678    OpRegRegReg(kOpDiv, temp, reg1, reg2);
679    OpRegReg(kOpMul, temp, reg2);
680    OpRegRegReg(kOpSub, rl_result.reg, reg1, temp);
681    FreeTemp(temp);
682  }
683
684  return rl_result;
685}
686
687bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
688  DCHECK_EQ(cu_->instruction_set, kThumb2);
689  RegLocation rl_src1 = info->args[0];
690  RegLocation rl_src2 = info->args[1];
691  rl_src1 = LoadValue(rl_src1, kCoreReg);
692  rl_src2 = LoadValue(rl_src2, kCoreReg);
693  RegLocation rl_dest = InlineTarget(info);
694  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
695  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
696  LIR* it = OpIT((is_min) ? kCondGt : kCondLt, "E");
697  OpRegReg(kOpMov, rl_result.reg, rl_src2.reg);
698  OpRegReg(kOpMov, rl_result.reg, rl_src1.reg);
699  OpEndIT(it);
700  StoreValue(rl_dest, rl_result);
701  return true;
702}
703
704bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
705  RegLocation rl_src_address = info->args[0];  // long address
706  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
707  RegLocation rl_dest = InlineTarget(info);
708  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
709  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
710  if (size == k64) {
711    // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
712    if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) {
713      Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
714      Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
715    } else {
716      Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
717      Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
718    }
719    StoreValueWide(rl_dest, rl_result);
720  } else {
721    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
722    // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
723    LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
724    StoreValue(rl_dest, rl_result);
725  }
726  return true;
727}
728
729bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
730  RegLocation rl_src_address = info->args[0];  // long address
731  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
732  RegLocation rl_src_value = info->args[2];  // [size] value
733  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
734  if (size == k64) {
735    // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
736    RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
737    StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32);
738    StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32);
739  } else {
740    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
741    // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
742    RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
743    StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
744  }
745  return true;
746}
747
748void ArmMir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
749  LOG(FATAL) << "Unexpected use of OpLea for Arm";
750}
751
752void ArmMir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
753  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
754}
755
756void ArmMir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
757  UNIMPLEMENTED(FATAL) << "Should not be called.";
758}
759
760bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
761  DCHECK_EQ(cu_->instruction_set, kThumb2);
762  // Unused - RegLocation rl_src_unsafe = info->args[0];
763  RegLocation rl_src_obj = info->args[1];  // Object - known non-null
764  RegLocation rl_src_offset = info->args[2];  // long low
765  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
766  RegLocation rl_src_expected = info->args[4];  // int, long or Object
767  // If is_long, high half is in info->args[5]
768  RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
769  // If is_long, high half is in info->args[7]
770  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
771
772  // We have only 5 temporary registers available and actually only 4 if the InlineTarget
773  // above locked one of the temps. For a straightforward CAS64 we need 7 registers:
774  // r_ptr (1), new_value (2), expected(2) and ldrexd result (2). If neither expected nor
775  // new_value is in a non-temp core register we shall reload them in the ldrex/strex loop
776  // into the same temps, reducing the number of required temps down to 5. We shall work
777  // around the potentially locked temp by using LR for r_ptr, unconditionally.
778  // TODO: Pass information about the need for more temps to the stack frame generation
779  // code so that we can rely on being able to allocate enough temps.
780  DCHECK(!GetRegInfo(rs_rARM_LR)->IsTemp());
781  MarkTemp(rs_rARM_LR);
782  FreeTemp(rs_rARM_LR);
783  LockTemp(rs_rARM_LR);
784  bool load_early = true;
785  if (is_long) {
786    RegStorage expected_reg = rl_src_expected.reg.IsPair() ? rl_src_expected.reg.GetLow() :
787        rl_src_expected.reg;
788    RegStorage new_val_reg = rl_src_new_value.reg.IsPair() ? rl_src_new_value.reg.GetLow() :
789        rl_src_new_value.reg;
790    bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !expected_reg.IsFloat();
791    bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !new_val_reg.IsFloat();
792    bool expected_is_good_reg = expected_is_core_reg && !IsTemp(expected_reg);
793    bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(new_val_reg);
794
795    if (!expected_is_good_reg && !new_value_is_good_reg) {
796      // None of expected/new_value is non-temp reg, need to load both late
797      load_early = false;
798      // Make sure they are not in the temp regs and the load will not be skipped.
799      if (expected_is_core_reg) {
800        FlushRegWide(rl_src_expected.reg);
801        ClobberSReg(rl_src_expected.s_reg_low);
802        ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
803        rl_src_expected.location = kLocDalvikFrame;
804      }
805      if (new_value_is_core_reg) {
806        FlushRegWide(rl_src_new_value.reg);
807        ClobberSReg(rl_src_new_value.s_reg_low);
808        ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
809        rl_src_new_value.location = kLocDalvikFrame;
810      }
811    }
812  }
813
814  // Release store semantics, get the barrier out of the way.  TODO: revisit
815  GenMemBarrier(kStoreLoad);
816
817  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
818  RegLocation rl_new_value;
819  if (!is_long) {
820    rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
821  } else if (load_early) {
822    rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
823  }
824
825  if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
826    // Mark card for object assuming new value is stored.
827    MarkGCCard(rl_new_value.reg, rl_object.reg);
828  }
829
830  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
831
832  RegStorage r_ptr = rs_rARM_LR;
833  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
834
835  // Free now unneeded rl_object and rl_offset to give more temps.
836  ClobberSReg(rl_object.s_reg_low);
837  FreeTemp(rl_object.reg);
838  ClobberSReg(rl_offset.s_reg_low);
839  FreeTemp(rl_offset.reg);
840
841  RegLocation rl_expected;
842  if (!is_long) {
843    rl_expected = LoadValue(rl_src_expected, kCoreReg);
844  } else if (load_early) {
845    rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
846  } else {
847    // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
848    RegStorage low_reg = AllocTemp();
849    RegStorage high_reg = AllocTemp();
850    rl_new_value.reg = RegStorage::MakeRegPair(low_reg, high_reg);
851    rl_expected = rl_new_value;
852  }
853
854  // do {
855  //   tmp = [r_ptr] - expected;
856  // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
857  // result = tmp != 0;
858
859  RegStorage r_tmp = AllocTemp();
860  LIR* target = NewLIR0(kPseudoTargetLabel);
861
862  LIR* it = nullptr;
863  if (is_long) {
864    RegStorage r_tmp_high = AllocTemp();
865    if (!load_early) {
866      LoadValueDirectWide(rl_src_expected, rl_expected.reg);
867    }
868    NewLIR3(kThumb2Ldrexd, r_tmp.GetReg(), r_tmp_high.GetReg(), r_ptr.GetReg());
869    OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetLow());
870    OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHigh());
871    if (!load_early) {
872      LoadValueDirectWide(rl_src_new_value, rl_new_value.reg);
873    }
874    // Make sure we use ORR that sets the ccode
875    if (r_tmp.Low8() && r_tmp_high.Low8()) {
876      NewLIR2(kThumbOrr, r_tmp.GetReg(), r_tmp_high.GetReg());
877    } else {
878      NewLIR4(kThumb2OrrRRRs, r_tmp.GetReg(), r_tmp.GetReg(), r_tmp_high.GetReg(), 0);
879    }
880    FreeTemp(r_tmp_high);  // Now unneeded
881
882    DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
883    it = OpIT(kCondEq, "T");
884    NewLIR4(kThumb2Strexd /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetLowReg(), rl_new_value.reg.GetHighReg(), r_ptr.GetReg());
885
886  } else {
887    NewLIR3(kThumb2Ldrex, r_tmp.GetReg(), r_ptr.GetReg(), 0);
888    OpRegReg(kOpSub, r_tmp, rl_expected.reg);
889    DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
890    it = OpIT(kCondEq, "T");
891    NewLIR4(kThumb2Strex /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
892  }
893
894  // Still one conditional left from OpIT(kCondEq, "T") from either branch
895  OpRegImm(kOpCmp /* eq */, r_tmp, 1);
896  OpEndIT(it);
897
898  OpCondBranch(kCondEq, target);
899
900  if (!load_early) {
901    FreeTemp(rl_expected.reg);  // Now unneeded.
902  }
903
904  // result := (tmp1 != 0) ? 0 : 1;
905  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
906  OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
907  DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
908  it = OpIT(kCondUlt, "");
909  LoadConstant(rl_result.reg, 0); /* cc */
910  FreeTemp(r_tmp);  // Now unneeded.
911  OpEndIT(it);     // Barrier to terminate OpIT.
912
913  StoreValue(rl_dest, rl_result);
914
915  // Now, restore lr to its non-temp status.
916  Clobber(rs_rARM_LR);
917  UnmarkTemp(rs_rARM_LR);
918  return true;
919}
920
921LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
922  return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
923}
924
925LIR* ArmMir2Lir::OpVldm(RegStorage r_base, int count) {
926  return NewLIR3(kThumb2Vldms, r_base.GetReg(), rs_fr0.GetReg(), count);
927}
928
929LIR* ArmMir2Lir::OpVstm(RegStorage r_base, int count) {
930  return NewLIR3(kThumb2Vstms, r_base.GetReg(), rs_fr0.GetReg(), count);
931}
932
933void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
934                                               RegLocation rl_result, int lit,
935                                               int first_bit, int second_bit) {
936  OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
937                   EncodeShift(kArmLsl, second_bit - first_bit));
938  if (first_bit != 0) {
939    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
940  }
941}
942
943void ArmMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
944  DCHECK(reg.IsPair());   // TODO: support k64BitSolo.
945  RegStorage t_reg = AllocTemp();
946  NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), reg.GetLowReg(), reg.GetHighReg(), 0);
947  FreeTemp(t_reg);
948  GenDivZeroCheck(kCondEq);
949}
950
951// Test suspend flag, return target of taken suspend branch
952LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
953  NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
954  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
955}
956
957// Decrement register and branch on condition
958LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
959  // Combine sub & test using sub setflags encoding here
960  OpRegRegImm(kOpSub, reg, reg, 1);  // For value == 1, this should set flags.
961  DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
962  return OpCondBranch(c_code, target);
963}
964
965bool ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
966#if ANDROID_SMP != 0
967  // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
968  LIR* barrier = last_lir_insn_;
969
970  int dmb_flavor;
971  // TODO: revisit Arm barrier kinds
972  switch (barrier_kind) {
973    case kLoadStore: dmb_flavor = kISH; break;
974    case kLoadLoad: dmb_flavor = kISH; break;
975    case kStoreStore: dmb_flavor = kISHST; break;
976    case kStoreLoad: dmb_flavor = kISH; break;
977    default:
978      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
979      dmb_flavor = kSY;  // quiet gcc.
980      break;
981  }
982
983  bool ret = false;
984
985  // If the same barrier already exists, don't generate another.
986  if (barrier == nullptr
987      || (barrier != nullptr && (barrier->opcode != kThumb2Dmb || barrier->operands[0] != dmb_flavor))) {
988    barrier = NewLIR1(kThumb2Dmb, dmb_flavor);
989    ret = true;
990  }
991
992  // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
993  DCHECK(!barrier->flags.use_def_invalid);
994  barrier->u.m.def_mask = ENCODE_ALL;
995  return ret;
996#else
997  return false;
998#endif
999}
1000
1001void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
1002  rl_src = LoadValueWide(rl_src, kCoreReg);
1003  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1004  RegStorage z_reg = AllocTemp();
1005  LoadConstantNoClobber(z_reg, 0);
1006  // Check for destructive overlap
1007  if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
1008    RegStorage t_reg = AllocTemp();
1009    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
1010    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
1011    FreeTemp(t_reg);
1012  } else {
1013    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
1014    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, rl_src.reg.GetHigh());
1015  }
1016  FreeTemp(z_reg);
1017  StoreValueWide(rl_dest, rl_result);
1018}
1019
1020void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
1021                            RegLocation rl_src1, RegLocation rl_src2) {
1022    /*
1023     * tmp1     = src1.hi * src2.lo;  // src1.hi is no longer needed
1024     * dest     = src1.lo * src2.lo;
1025     * tmp1    += src1.lo * src2.hi;
1026     * dest.hi += tmp1;
1027     *
1028     * To pull off inline multiply, we have a worst-case requirement of 7 temporary
1029     * registers.  Normally for Arm, we get 5.  We can get to 6 by including
1030     * lr in the temp set.  The only problematic case is all operands and result are
1031     * distinct, and none have been promoted.  In that case, we can succeed by aggressively
1032     * freeing operand temp registers after they are no longer needed.  All other cases
1033     * can proceed normally.  We'll just punt on the case of the result having a misaligned
1034     * overlap with either operand and send that case to a runtime handler.
1035     */
1036    RegLocation rl_result;
1037    if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
1038      ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul);
1039      FlushAllRegs();
1040      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
1041      rl_result = GetReturnWide(false);
1042      StoreValueWide(rl_dest, rl_result);
1043      return;
1044    }
1045
1046    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1047    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1048
1049    int reg_status = 0;
1050    RegStorage res_lo;
1051    RegStorage res_hi;
1052    bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
1053        !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
1054    bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
1055    bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
1056    // Check if rl_dest is *not* either operand and we have enough temp registers.
1057    if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
1058        (dest_promoted || src1_promoted || src2_promoted)) {
1059      // In this case, we do not need to manually allocate temp registers for result.
1060      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1061      res_lo = rl_result.reg.GetLow();
1062      res_hi = rl_result.reg.GetHigh();
1063    } else {
1064      res_lo = AllocTemp();
1065      if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
1066        // In this case, we have enough temp registers to be allocated for result.
1067        res_hi = AllocTemp();
1068        reg_status = 1;
1069      } else {
1070        // In this case, all temps are now allocated.
1071        // res_hi will be allocated after we can free src1_hi.
1072        reg_status = 2;
1073      }
1074    }
1075
1076    // Temporarily add LR to the temp pool, and assign it to tmp1
1077    MarkTemp(rs_rARM_LR);
1078    FreeTemp(rs_rARM_LR);
1079    RegStorage tmp1 = rs_rARM_LR;
1080    LockTemp(rs_rARM_LR);
1081
1082    if (rl_src1.reg == rl_src2.reg) {
1083      DCHECK(res_hi.Valid());
1084      DCHECK(res_lo.Valid());
1085      NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
1086      NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
1087              rl_src1.reg.GetLowReg());
1088      OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
1089    } else {
1090      NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
1091      if (reg_status == 2) {
1092        DCHECK(!res_hi.Valid());
1093        DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
1094        DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
1095        FreeTemp(rl_src1.reg.GetHigh());
1096        res_hi = AllocTemp();
1097      }
1098      DCHECK(res_hi.Valid());
1099      DCHECK(res_lo.Valid());
1100      NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
1101              rl_src1.reg.GetLowReg());
1102      NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
1103              tmp1.GetReg());
1104      NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
1105      if (reg_status == 2) {
1106        // Clobber rl_src1 since it was corrupted.
1107        FreeTemp(rl_src1.reg);
1108        Clobber(rl_src1.reg);
1109      }
1110    }
1111
1112    // Now, restore lr to its non-temp status.
1113    FreeTemp(tmp1);
1114    Clobber(rs_rARM_LR);
1115    UnmarkTemp(rs_rARM_LR);
1116
1117    if (reg_status != 0) {
1118      // We had manually allocated registers for rl_result.
1119      // Now construct a RegLocation.
1120      rl_result = GetReturnWide(false);  // Just using as a template.
1121      rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
1122    }
1123
1124    StoreValueWide(rl_dest, rl_result);
1125}
1126
1127void ArmMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1128                            RegLocation rl_src2) {
1129  LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
1130}
1131
1132void ArmMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1133                            RegLocation rl_src2) {
1134  LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
1135}
1136
1137void ArmMir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1138                            RegLocation rl_src2) {
1139  LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
1140}
1141
1142void ArmMir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1143                           RegLocation rl_src2) {
1144  LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
1145}
1146
1147void ArmMir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1148                            RegLocation rl_src2) {
1149  LOG(FATAL) << "Unexpected use of genXoLong for Arm";
1150}
1151
1152/*
1153 * Generate array load
1154 */
1155void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
1156                             RegLocation rl_index, RegLocation rl_dest, int scale) {
1157  RegisterClass reg_class = RegClassBySize(size);
1158  int len_offset = mirror::Array::LengthOffset().Int32Value();
1159  int data_offset;
1160  RegLocation rl_result;
1161  bool constant_index = rl_index.is_const;
1162  rl_array = LoadValue(rl_array, kCoreReg);
1163  if (!constant_index) {
1164    rl_index = LoadValue(rl_index, kCoreReg);
1165  }
1166
1167  if (rl_dest.wide) {
1168    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1169  } else {
1170    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1171  }
1172
1173  // If index is constant, just fold it into the data offset
1174  if (constant_index) {
1175    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1176  }
1177
1178  /* null object? */
1179  GenNullCheck(rl_array.reg, opt_flags);
1180
1181  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1182  RegStorage reg_len;
1183  if (needs_range_check) {
1184    reg_len = AllocTemp();
1185    /* Get len */
1186    Load32Disp(rl_array.reg, len_offset, reg_len);
1187    MarkPossibleNullPointerException(opt_flags);
1188  } else {
1189    ForceImplicitNullCheck(rl_array.reg, opt_flags);
1190  }
1191  if (rl_dest.wide || rl_dest.fp || constant_index) {
1192    RegStorage reg_ptr;
1193    if (constant_index) {
1194      reg_ptr = rl_array.reg;  // NOTE: must not alter reg_ptr in constant case.
1195    } else {
1196      // No special indexed operation, lea + load w/ displacement
1197      reg_ptr = AllocTemp();
1198      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
1199      FreeTemp(rl_index.reg);
1200    }
1201    rl_result = EvalLoc(rl_dest, reg_class, true);
1202
1203    if (needs_range_check) {
1204      if (constant_index) {
1205        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1206      } else {
1207        GenArrayBoundsCheck(rl_index.reg, reg_len);
1208      }
1209      FreeTemp(reg_len);
1210    }
1211    LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size);
1212    MarkPossibleNullPointerException(opt_flags);
1213    if (!constant_index) {
1214      FreeTemp(reg_ptr);
1215    }
1216    if (rl_dest.wide) {
1217      StoreValueWide(rl_dest, rl_result);
1218    } else {
1219      StoreValue(rl_dest, rl_result);
1220    }
1221  } else {
1222    // Offset base, then use indexed load
1223    RegStorage reg_ptr = AllocTemp();
1224    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1225    FreeTemp(rl_array.reg);
1226    rl_result = EvalLoc(rl_dest, reg_class, true);
1227
1228    if (needs_range_check) {
1229      GenArrayBoundsCheck(rl_index.reg, reg_len);
1230      FreeTemp(reg_len);
1231    }
1232    LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
1233    MarkPossibleNullPointerException(opt_flags);
1234    FreeTemp(reg_ptr);
1235    StoreValue(rl_dest, rl_result);
1236  }
1237}
1238
1239/*
1240 * Generate array store
1241 *
1242 */
1243void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1244                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
1245  RegisterClass reg_class = RegClassBySize(size);
1246  int len_offset = mirror::Array::LengthOffset().Int32Value();
1247  bool constant_index = rl_index.is_const;
1248
1249  int data_offset;
1250  if (size == k64 || size == kDouble) {
1251    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1252  } else {
1253    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1254  }
1255
1256  // If index is constant, just fold it into the data offset.
1257  if (constant_index) {
1258    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1259  }
1260
1261  rl_array = LoadValue(rl_array, kCoreReg);
1262  if (!constant_index) {
1263    rl_index = LoadValue(rl_index, kCoreReg);
1264  }
1265
1266  RegStorage reg_ptr;
1267  bool allocated_reg_ptr_temp = false;
1268  if (constant_index) {
1269    reg_ptr = rl_array.reg;
1270  } else if (IsTemp(rl_array.reg) && !card_mark) {
1271    Clobber(rl_array.reg);
1272    reg_ptr = rl_array.reg;
1273  } else {
1274    allocated_reg_ptr_temp = true;
1275    reg_ptr = AllocTemp();
1276  }
1277
1278  /* null object? */
1279  GenNullCheck(rl_array.reg, opt_flags);
1280
1281  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1282  RegStorage reg_len;
1283  if (needs_range_check) {
1284    reg_len = AllocTemp();
1285    // NOTE: max live temps(4) here.
1286    /* Get len */
1287    Load32Disp(rl_array.reg, len_offset, reg_len);
1288    MarkPossibleNullPointerException(opt_flags);
1289  } else {
1290    ForceImplicitNullCheck(rl_array.reg, opt_flags);
1291  }
1292  /* at this point, reg_ptr points to array, 2 live temps */
1293  if (rl_src.wide || rl_src.fp || constant_index) {
1294    if (rl_src.wide) {
1295      rl_src = LoadValueWide(rl_src, reg_class);
1296    } else {
1297      rl_src = LoadValue(rl_src, reg_class);
1298    }
1299    if (!constant_index) {
1300      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
1301    }
1302    if (needs_range_check) {
1303      if (constant_index) {
1304        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1305      } else {
1306        GenArrayBoundsCheck(rl_index.reg, reg_len);
1307      }
1308      FreeTemp(reg_len);
1309    }
1310
1311    StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
1312    MarkPossibleNullPointerException(opt_flags);
1313  } else {
1314    /* reg_ptr -> array data */
1315    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1316    rl_src = LoadValue(rl_src, reg_class);
1317    if (needs_range_check) {
1318      GenArrayBoundsCheck(rl_index.reg, reg_len);
1319      FreeTemp(reg_len);
1320    }
1321    StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
1322    MarkPossibleNullPointerException(opt_flags);
1323  }
1324  if (allocated_reg_ptr_temp) {
1325    FreeTemp(reg_ptr);
1326  }
1327  if (card_mark) {
1328    MarkGCCard(rl_src.reg, rl_array.reg);
1329  }
1330}
1331
1332
1333void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
1334                                   RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
1335  rl_src = LoadValueWide(rl_src, kCoreReg);
1336  // Per spec, we only care about low 6 bits of shift amount.
1337  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
1338  if (shift_amount == 0) {
1339    StoreValueWide(rl_dest, rl_src);
1340    return;
1341  }
1342  if (BadOverlap(rl_src, rl_dest)) {
1343    GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
1344    return;
1345  }
1346  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1347  switch (opcode) {
1348    case Instruction::SHL_LONG:
1349    case Instruction::SHL_LONG_2ADDR:
1350      if (shift_amount == 1) {
1351        OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), rl_src.reg.GetLow());
1352        OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), rl_src.reg.GetHigh());
1353      } else if (shift_amount == 32) {
1354        OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg);
1355        LoadConstant(rl_result.reg.GetLow(), 0);
1356      } else if (shift_amount > 31) {
1357        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetLow(), shift_amount - 32);
1358        LoadConstant(rl_result.reg.GetLow(), 0);
1359      } else {
1360        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1361        OpRegRegRegShift(kOpOr, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), rl_src.reg.GetLow(),
1362                         EncodeShift(kArmLsr, 32 - shift_amount));
1363        OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
1364      }
1365      break;
1366    case Instruction::SHR_LONG:
1367    case Instruction::SHR_LONG_2ADDR:
1368      if (shift_amount == 32) {
1369        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
1370        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
1371      } else if (shift_amount > 31) {
1372        OpRegRegImm(kOpAsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
1373        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
1374      } else {
1375        RegStorage t_reg = AllocTemp();
1376        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
1377        OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
1378                         EncodeShift(kArmLsl, 32 - shift_amount));
1379        FreeTemp(t_reg);
1380        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1381      }
1382      break;
1383    case Instruction::USHR_LONG:
1384    case Instruction::USHR_LONG_2ADDR:
1385      if (shift_amount == 32) {
1386        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
1387        LoadConstant(rl_result.reg.GetHigh(), 0);
1388      } else if (shift_amount > 31) {
1389        OpRegRegImm(kOpLsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
1390        LoadConstant(rl_result.reg.GetHigh(), 0);
1391      } else {
1392        RegStorage t_reg = AllocTemp();
1393        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
1394        OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
1395                         EncodeShift(kArmLsl, 32 - shift_amount));
1396        FreeTemp(t_reg);
1397        OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1398      }
1399      break;
1400    default:
1401      LOG(FATAL) << "Unexpected case";
1402  }
1403  StoreValueWide(rl_dest, rl_result);
1404}
1405
1406void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
1407                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
1408  if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
1409    if (!rl_src2.is_const) {
1410      // Don't bother with special handling for subtract from immediate.
1411      GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1412      return;
1413    }
1414  } else {
1415    // Normalize
1416    if (!rl_src2.is_const) {
1417      DCHECK(rl_src1.is_const);
1418      std::swap(rl_src1, rl_src2);
1419    }
1420  }
1421  if (BadOverlap(rl_src1, rl_dest)) {
1422    GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1423    return;
1424  }
1425  DCHECK(rl_src2.is_const);
1426  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1427  uint32_t val_lo = Low32Bits(val);
1428  uint32_t val_hi = High32Bits(val);
1429  int32_t mod_imm_lo = ModifiedImmediate(val_lo);
1430  int32_t mod_imm_hi = ModifiedImmediate(val_hi);
1431
1432  // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
1433  switch (opcode) {
1434    case Instruction::ADD_LONG:
1435    case Instruction::ADD_LONG_2ADDR:
1436    case Instruction::SUB_LONG:
1437    case Instruction::SUB_LONG_2ADDR:
1438      if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
1439        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1440        return;
1441      }
1442      break;
1443    default:
1444      break;
1445  }
1446  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1447  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1448  // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
1449  switch (opcode) {
1450    case Instruction::ADD_LONG:
1451    case Instruction::ADD_LONG_2ADDR:
1452      NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
1453      NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
1454      break;
1455    case Instruction::OR_LONG:
1456    case Instruction::OR_LONG_2ADDR:
1457      if ((val_lo != 0) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
1458        OpRegRegImm(kOpOr, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1459      }
1460      if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
1461        OpRegRegImm(kOpOr, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1462      }
1463      break;
1464    case Instruction::XOR_LONG:
1465    case Instruction::XOR_LONG_2ADDR:
1466      OpRegRegImm(kOpXor, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1467      OpRegRegImm(kOpXor, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1468      break;
1469    case Instruction::AND_LONG:
1470    case Instruction::AND_LONG_2ADDR:
1471      if ((val_lo != 0xffffffff) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
1472        OpRegRegImm(kOpAnd, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1473      }
1474      if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
1475        OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1476      }
1477      break;
1478    case Instruction::SUB_LONG_2ADDR:
1479    case Instruction::SUB_LONG:
1480      NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
1481      NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
1482      break;
1483    default:
1484      LOG(FATAL) << "Unexpected opcode " << opcode;
1485  }
1486  StoreValueWide(rl_dest, rl_result);
1487}
1488
1489}  // namespace art
1490