int_arm.cc revision a9a8254c920ce8e22210abfc16c9842ce0aea28f
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm_lir.h"
20#include "codegen_arm.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "mirror/array.h"
24
25namespace art {
26
27LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target) {
28  OpRegReg(kOpCmp, src1, src2);
29  return OpCondBranch(cond, target);
30}
31
32/*
33 * Generate a Thumb2 IT instruction, which can nullify up to
34 * four subsequent instructions based on a condition and its
35 * inverse.  The condition applies to the first instruction, which
36 * is executed if the condition is met.  The string "guide" consists
37 * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
38 * A "T" means the instruction is executed if the condition is
39 * met, and an "E" means the instruction is executed if the condition
40 * is not met.
41 */
42LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
43  int mask;
44  int mask3 = 0;
45  int mask2 = 0;
46  int mask1 = 0;
47  ArmConditionCode code = ArmConditionEncoding(ccode);
48  int cond_bit = code & 1;
49  int alt_bit = cond_bit ^ 1;
50
51  // Note: case fallthroughs intentional
52  switch (strlen(guide)) {
53    case 3:
54      mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
55    case 2:
56      mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
57    case 1:
58      mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
59      break;
60    case 0:
61      break;
62    default:
63      LOG(FATAL) << "OAT: bad case in OpIT";
64  }
65  mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
66       (1 << (3 - strlen(guide)));
67  return NewLIR2(kThumb2It, code, mask);
68}
69
70/*
71 * 64-bit 3way compare function.
72 *     mov   rX, #-1
73 *     cmp   op1hi, op2hi
74 *     blt   done
75 *     bgt   flip
76 *     sub   rX, op1lo, op2lo (treat as unsigned)
77 *     beq   done
78 *     ite   hi
79 *     mov(hi)   rX, #-1
80 *     mov(!hi)  rX, #1
81 * flip:
82 *     neg   rX
83 * done:
84 */
85void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
86                            RegLocation rl_src2) {
87  LIR* target1;
88  LIR* target2;
89  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
90  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
91  int t_reg = AllocTemp();
92  LoadConstant(t_reg, -1);
93  OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
94  LIR* branch1 = OpCondBranch(kCondLt, NULL);
95  LIR* branch2 = OpCondBranch(kCondGt, NULL);
96  OpRegRegReg(kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
97  LIR* branch3 = OpCondBranch(kCondEq, NULL);
98
99  OpIT(kCondHi, "E");
100  NewLIR2(kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
101  LoadConstant(t_reg, 1);
102  GenBarrier();
103
104  target2 = NewLIR0(kPseudoTargetLabel);
105  OpRegReg(kOpNeg, t_reg, t_reg);
106
107  target1 = NewLIR0(kPseudoTargetLabel);
108
109  RegLocation rl_temp = LocCReturn();  // Just using as template, will change
110  rl_temp.low_reg = t_reg;
111  StoreValue(rl_dest, rl_temp);
112  FreeTemp(t_reg);
113
114  branch1->target = target1;
115  branch2->target = target2;
116  branch3->target = branch1->target;
117}
118
119void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
120                                          int64_t val, ConditionCode ccode) {
121  int32_t val_lo = Low32Bits(val);
122  int32_t val_hi = High32Bits(val);
123  DCHECK_GE(ModifiedImmediate(val_lo), 0);
124  DCHECK_GE(ModifiedImmediate(val_hi), 0);
125  LIR* taken = &block_label_list_[bb->taken->id];
126  LIR* not_taken = &block_label_list_[bb->fall_through->id];
127  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
128  int32_t low_reg = rl_src1.low_reg;
129  int32_t high_reg = rl_src1.high_reg;
130
131  switch (ccode) {
132    case kCondEq:
133    case kCondNe:
134      LIR* target;
135      ConditionCode condition;
136      if (ccode == kCondEq) {
137        target = not_taken;
138        condition = kCondEq;
139      } else {
140        target = taken;
141        condition = kCondNe;
142      }
143      if (val == 0) {
144        int t_reg = AllocTemp();
145        NewLIR4(kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
146        FreeTemp(t_reg);
147        OpCondBranch(condition, taken);
148        return;
149      }
150      OpCmpImmBranch(kCondNe, high_reg, val_hi, target);
151      break;
152    case kCondLt:
153      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
154      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
155      ccode = kCondCc;
156      break;
157    case kCondLe:
158      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
159      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
160      ccode = kCondLs;
161      break;
162    case kCondGt:
163      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
164      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
165      ccode = kCondHi;
166      break;
167    case kCondGe:
168      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
169      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
170      ccode = kCondCs;
171      break;
172    default:
173      LOG(FATAL) << "Unexpected ccode: " << ccode;
174  }
175  OpCmpImmBranch(ccode, low_reg, val_lo, taken);
176}
177
178void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
179  RegLocation rl_result;
180  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
181  // Temporary debugging code
182  int dest_sreg = mir->ssa_rep->defs[0];
183  if ((dest_sreg < 0) || (dest_sreg >= mir_graph_->GetNumSSARegs())) {
184    LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
185              << PrettyMethod(cu_->method_idx, *cu_->dex_file);
186    LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
187    LOG(INFO) << "vreg = " << mir_graph_->SRegToVReg(dest_sreg);
188    LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
189    if (mir->ssa_rep->num_uses == 1) {
190      LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
191    } else {
192      LOG(INFO) << "MOVE case, operands = " << mir->ssa_rep->uses[1] << ", "
193                << mir->ssa_rep->uses[2];
194    }
195    CHECK(false) << "Invalid target sreg on Select.";
196  }
197  // End temporary debugging code
198  RegLocation rl_dest = mir_graph_->GetDest(mir);
199  rl_src = LoadValue(rl_src, kCoreReg);
200  if (mir->ssa_rep->num_uses == 1) {
201    // CONST case
202    int true_val = mir->dalvikInsn.vB;
203    int false_val = mir->dalvikInsn.vC;
204    rl_result = EvalLoc(rl_dest, kCoreReg, true);
205    if ((true_val == 1) && (false_val == 0)) {
206      OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
207      OpIT(kCondCc, "");
208      LoadConstant(rl_result.low_reg, 0);
209      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
210    } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
211      OpRegImm(kOpCmp, rl_src.low_reg, 0);
212      OpIT(kCondEq, "E");
213      LoadConstant(rl_result.low_reg, true_val);
214      LoadConstant(rl_result.low_reg, false_val);
215      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
216    } else {
217      // Unlikely case - could be tuned.
218      int t_reg1 = AllocTemp();
219      int t_reg2 = AllocTemp();
220      LoadConstant(t_reg1, true_val);
221      LoadConstant(t_reg2, false_val);
222      OpRegImm(kOpCmp, rl_src.low_reg, 0);
223      OpIT(kCondEq, "E");
224      OpRegCopy(rl_result.low_reg, t_reg1);
225      OpRegCopy(rl_result.low_reg, t_reg2);
226      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
227    }
228  } else {
229    // MOVE case
230    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
231    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
232    rl_true = LoadValue(rl_true, kCoreReg);
233    rl_false = LoadValue(rl_false, kCoreReg);
234    rl_result = EvalLoc(rl_dest, kCoreReg, true);
235    OpRegImm(kOpCmp, rl_src.low_reg, 0);
236    if (rl_result.low_reg == rl_true.low_reg) {  // Is the "true" case already in place?
237      OpIT(kCondNe, "");
238      OpRegCopy(rl_result.low_reg, rl_false.low_reg);
239    } else if (rl_result.low_reg == rl_false.low_reg) {  // False case in place?
240      OpIT(kCondEq, "");
241      OpRegCopy(rl_result.low_reg, rl_true.low_reg);
242    } else {  // Normal - select between the two.
243      OpIT(kCondEq, "E");
244      OpRegCopy(rl_result.low_reg, rl_true.low_reg);
245      OpRegCopy(rl_result.low_reg, rl_false.low_reg);
246    }
247    GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
248  }
249  StoreValue(rl_dest, rl_result);
250}
251
252void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
253  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
254  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
255  // Normalize such that if either operand is constant, src2 will be constant.
256  ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
257  if (rl_src1.is_const) {
258    RegLocation rl_temp = rl_src1;
259    rl_src1 = rl_src2;
260    rl_src2 = rl_temp;
261    ccode = FlipComparisonOrder(ccode);
262  }
263  if (rl_src2.is_const) {
264    RegLocation rl_temp = UpdateLocWide(rl_src2);
265    // Do special compare/branch against simple const operand if not already in registers.
266    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
267    if ((rl_temp.location != kLocPhysReg) &&
268        ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
269      GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
270      return;
271    }
272  }
273  LIR* taken = &block_label_list_[bb->taken->id];
274  LIR* not_taken = &block_label_list_[bb->fall_through->id];
275  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
276  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
277  OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
278  switch (ccode) {
279    case kCondEq:
280      OpCondBranch(kCondNe, not_taken);
281      break;
282    case kCondNe:
283      OpCondBranch(kCondNe, taken);
284      break;
285    case kCondLt:
286      OpCondBranch(kCondLt, taken);
287      OpCondBranch(kCondGt, not_taken);
288      ccode = kCondCc;
289      break;
290    case kCondLe:
291      OpCondBranch(kCondLt, taken);
292      OpCondBranch(kCondGt, not_taken);
293      ccode = kCondLs;
294      break;
295    case kCondGt:
296      OpCondBranch(kCondGt, taken);
297      OpCondBranch(kCondLt, not_taken);
298      ccode = kCondHi;
299      break;
300    case kCondGe:
301      OpCondBranch(kCondGt, taken);
302      OpCondBranch(kCondLt, not_taken);
303      ccode = kCondCs;
304      break;
305    default:
306      LOG(FATAL) << "Unexpected ccode: " << ccode;
307  }
308  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
309  OpCondBranch(ccode, taken);
310}
311
312/*
313 * Generate a register comparison to an immediate and branch.  Caller
314 * is responsible for setting branch target field.
315 */
316LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
317                                LIR* target) {
318  LIR* branch;
319  int mod_imm;
320  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
321  /*
322   * A common use of OpCmpImmBranch is for null checks, and using the Thumb 16-bit
323   * compare-and-branch if zero is ideal if it will reach.  However, because null checks
324   * branch forward to a launch pad, they will frequently not reach - and thus have to
325   * be converted to a long form during assembly (which will trigger another assembly
326   * pass).  Here we estimate the branch distance for checks, and if large directly
327   * generate the long form in an attempt to avoid an extra assembly pass.
328   * TODO: consider interspersing launchpads in code following unconditional branches.
329   */
330  bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
331  skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
332  if (!skip && (ARM_LOWREG(reg)) && (check_value == 0) &&
333     ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
334    branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
335                     reg, 0);
336  } else {
337    mod_imm = ModifiedImmediate(check_value);
338    if (ARM_LOWREG(reg) && ((check_value & 0xff) == check_value)) {
339      NewLIR2(kThumbCmpRI8, reg, check_value);
340    } else if (mod_imm >= 0) {
341      NewLIR2(kThumb2CmpRI12, reg, mod_imm);
342    } else {
343      int t_reg = AllocTemp();
344      LoadConstant(t_reg, check_value);
345      OpRegReg(kOpCmp, reg, t_reg);
346    }
347    branch = NewLIR2(kThumbBCond, 0, arm_cond);
348  }
349  branch->target = target;
350  return branch;
351}
352
353LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) {
354  LIR* res;
355  int opcode;
356  if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
357    return OpFpRegCopy(r_dest, r_src);
358  if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
359    opcode = kThumbMovRR;
360  else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
361     opcode = kThumbMovRR_H2H;
362  else if (ARM_LOWREG(r_dest))
363     opcode = kThumbMovRR_H2L;
364  else
365     opcode = kThumbMovRR_L2H;
366  res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
367  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
368    res->flags.is_nop = true;
369  }
370  return res;
371}
372
373LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src) {
374  LIR* res = OpRegCopyNoInsert(r_dest, r_src);
375  AppendLIR(res);
376  return res;
377}
378
379void ArmMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
380                               int src_hi) {
381  bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
382  bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
383  DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
384  DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
385  if (dest_fp) {
386    if (src_fp) {
387      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
388    } else {
389      NewLIR3(kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
390    }
391  } else {
392    if (src_fp) {
393      NewLIR3(kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
394    } else {
395      // Handle overlap
396      if (src_hi == dest_lo) {
397        OpRegCopy(dest_hi, src_hi);
398        OpRegCopy(dest_lo, src_lo);
399      } else {
400        OpRegCopy(dest_lo, src_lo);
401        OpRegCopy(dest_hi, src_hi);
402      }
403    }
404  }
405}
406
407// Table of magic divisors
408struct MagicTable {
409  uint32_t magic;
410  uint32_t shift;
411  DividePattern pattern;
412};
413
414static const MagicTable magic_table[] = {
415  {0, 0, DivideNone},        // 0
416  {0, 0, DivideNone},        // 1
417  {0, 0, DivideNone},        // 2
418  {0x55555556, 0, Divide3},  // 3
419  {0, 0, DivideNone},        // 4
420  {0x66666667, 1, Divide5},  // 5
421  {0x2AAAAAAB, 0, Divide3},  // 6
422  {0x92492493, 2, Divide7},  // 7
423  {0, 0, DivideNone},        // 8
424  {0x38E38E39, 1, Divide5},  // 9
425  {0x66666667, 2, Divide5},  // 10
426  {0x2E8BA2E9, 1, Divide5},  // 11
427  {0x2AAAAAAB, 1, Divide5},  // 12
428  {0x4EC4EC4F, 2, Divide5},  // 13
429  {0x92492493, 3, Divide7},  // 14
430  {0x88888889, 3, Divide7},  // 15
431};
432
433// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
434bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
435                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
436  if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
437    return false;
438  }
439  DividePattern pattern = magic_table[lit].pattern;
440  if (pattern == DivideNone) {
441    return false;
442  }
443  // Tuning: add rem patterns
444  if (!is_div) {
445    return false;
446  }
447
448  int r_magic = AllocTemp();
449  LoadConstant(r_magic, magic_table[lit].magic);
450  rl_src = LoadValue(rl_src, kCoreReg);
451  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
452  int r_hi = AllocTemp();
453  int r_lo = AllocTemp();
454  NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
455  switch (pattern) {
456    case Divide3:
457      OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
458               rl_src.low_reg, EncodeShift(kArmAsr, 31));
459      break;
460    case Divide5:
461      OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
462      OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
463               EncodeShift(kArmAsr, magic_table[lit].shift));
464      break;
465    case Divide7:
466      OpRegReg(kOpAdd, r_hi, rl_src.low_reg);
467      OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
468      OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
469               EncodeShift(kArmAsr, magic_table[lit].shift));
470      break;
471    default:
472      LOG(FATAL) << "Unexpected pattern: " << pattern;
473  }
474  StoreValue(rl_dest, rl_result);
475  return true;
476}
477
478LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code,
479                    int reg1, int base, int offset, ThrowKind kind) {
480  LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
481  return NULL;
482}
483
484RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
485                                     bool is_div) {
486  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
487  return rl_dest;
488}
489
490RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
491                                  bool is_div) {
492  LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
493  return rl_dest;
494}
495
496bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
497  DCHECK_EQ(cu_->instruction_set, kThumb2);
498  RegLocation rl_src1 = info->args[0];
499  RegLocation rl_src2 = info->args[1];
500  rl_src1 = LoadValue(rl_src1, kCoreReg);
501  rl_src2 = LoadValue(rl_src2, kCoreReg);
502  RegLocation rl_dest = InlineTarget(info);
503  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
504  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
505  OpIT((is_min) ? kCondGt : kCondLt, "E");
506  OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
507  OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
508  GenBarrier();
509  StoreValue(rl_dest, rl_result);
510  return true;
511}
512
513void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) {
514  LOG(FATAL) << "Unexpected use of OpLea for Arm";
515}
516
517void ArmMir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
518  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
519}
520
521bool ArmMir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
522  DCHECK_EQ(cu_->instruction_set, kThumb2);
523  // Unused - RegLocation rl_src_unsafe = info->args[0];
524  RegLocation rl_src_obj= info->args[1];  // Object - known non-null
525  RegLocation rl_src_offset= info->args[2];  // long low
526  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
527  RegLocation rl_src_expected= info->args[4];  // int or Object
528  RegLocation rl_src_new_value= info->args[5];  // int or Object
529  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
530
531
532  // Release store semantics, get the barrier out of the way.  TODO: revisit
533  GenMemBarrier(kStoreLoad);
534
535  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
536  RegLocation rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
537
538  if (need_write_barrier && !mir_graph_->IsConstantNullRef(rl_new_value)) {
539    // Mark card for object assuming new value is stored.
540    MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
541  }
542
543  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
544
545  int r_ptr = AllocTemp();
546  OpRegRegReg(kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
547
548  // Free now unneeded rl_object and rl_offset to give more temps.
549  ClobberSReg(rl_object.s_reg_low);
550  FreeTemp(rl_object.low_reg);
551  ClobberSReg(rl_offset.s_reg_low);
552  FreeTemp(rl_offset.low_reg);
553
554  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
555  LoadConstant(rl_result.low_reg, 0);  // r_result := 0
556
557  // while ([r_ptr] == rExpected && r_result == 0) {
558  //   [r_ptr] <- r_new_value && r_result := success ? 0 : 1
559  //   r_result ^= 1
560  // }
561  int r_old_value = AllocTemp();
562  LIR* target = NewLIR0(kPseudoTargetLabel);
563  NewLIR3(kThumb2Ldrex, r_old_value, r_ptr, 0);
564
565  RegLocation rl_expected = LoadValue(rl_src_expected, kCoreReg);
566  OpRegReg(kOpCmp, r_old_value, rl_expected.low_reg);
567  FreeTemp(r_old_value);  // Now unneeded.
568  OpIT(kCondEq, "TT");
569  NewLIR4(kThumb2Strex /* eq */, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
570  FreeTemp(r_ptr);  // Now unneeded.
571  OpRegImm(kOpXor /* eq */, rl_result.low_reg, 1);
572  OpRegImm(kOpCmp /* eq */, rl_result.low_reg, 0);
573  OpCondBranch(kCondEq, target);
574
575  StoreValue(rl_dest, rl_result);
576
577  return true;
578}
579
580LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target) {
581  return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
582}
583
584LIR* ArmMir2Lir::OpVldm(int rBase, int count) {
585  return NewLIR3(kThumb2Vldms, rBase, fr0, count);
586}
587
588LIR* ArmMir2Lir::OpVstm(int rBase, int count) {
589  return NewLIR3(kThumb2Vstms, rBase, fr0, count);
590}
591
592void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
593                                               RegLocation rl_result, int lit,
594                                               int first_bit, int second_bit) {
595  OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
596                   EncodeShift(kArmLsl, second_bit - first_bit));
597  if (first_bit != 0) {
598    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
599  }
600}
601
602void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) {
603  int t_reg = AllocTemp();
604  NewLIR4(kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
605  FreeTemp(t_reg);
606  GenCheck(kCondEq, kThrowDivZero);
607}
608
609// Test suspend flag, return target of taken suspend branch
610LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
611  NewLIR2(kThumbSubRI8, rARM_SUSPEND, 1);
612  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
613}
614
615// Decrement register and branch on condition
616LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
617  // Combine sub & test using sub setflags encoding here
618  NewLIR3(kThumb2SubsRRI12, reg, reg, 1);
619  return OpCondBranch(c_code, target);
620}
621
622void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
623#if ANDROID_SMP != 0
624  int dmb_flavor;
625  // TODO: revisit Arm barrier kinds
626  switch (barrier_kind) {
627    case kLoadStore: dmb_flavor = kSY; break;
628    case kLoadLoad: dmb_flavor = kSY; break;
629    case kStoreStore: dmb_flavor = kST; break;
630    case kStoreLoad: dmb_flavor = kSY; break;
631    default:
632      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
633      dmb_flavor = kSY;  // quiet gcc.
634      break;
635  }
636  LIR* dmb = NewLIR1(kThumb2Dmb, dmb_flavor);
637  dmb->u.m.def_mask = ENCODE_ALL;
638#endif
639}
640
641void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
642  rl_src = LoadValueWide(rl_src, kCoreReg);
643  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
644  int z_reg = AllocTemp();
645  LoadConstantNoClobber(z_reg, 0);
646  // Check for destructive overlap
647  if (rl_result.low_reg == rl_src.high_reg) {
648    int t_reg = AllocTemp();
649    OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
650    OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, t_reg);
651    FreeTemp(t_reg);
652  } else {
653    OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
654    OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
655  }
656  FreeTemp(z_reg);
657  StoreValueWide(rl_dest, rl_result);
658}
659
660
661 /*
662  * Check to see if a result pair has a misaligned overlap with an operand pair.  This
663  * is not usual for dx to generate, but it is legal (for now).  In a future rev of
664  * dex, we'll want to make this case illegal.
665  */
666bool ArmMir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) {
667  DCHECK(rl_src.wide);
668  DCHECK(rl_dest.wide);
669  return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
670}
671
672void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
673                            RegLocation rl_src2) {
674    /*
675     * To pull off inline multiply, we have a worst-case requirement of 8 temporary
676     * registers.  Normally for Arm, we get 5.  We can get to 6 by including
677     * lr in the temp set.  The only problematic case is all operands and result are
678     * distinct, and none have been promoted.  In that case, we can succeed by aggressively
679     * freeing operand temp registers after they are no longer needed.  All other cases
680     * can proceed normally.  We'll just punt on the case of the result having a misaligned
681     * overlap with either operand and send that case to a runtime handler.
682     */
683    RegLocation rl_result;
684    if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
685      ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
686      FlushAllRegs();
687      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
688      rl_result = GetReturnWide(false);
689      StoreValueWide(rl_dest, rl_result);
690      return;
691    }
692    // Temporarily add LR to the temp pool, and assign it to tmp1
693    MarkTemp(rARM_LR);
694    FreeTemp(rARM_LR);
695    int tmp1 = rARM_LR;
696    LockTemp(rARM_LR);
697
698    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
699    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
700
701    bool special_case = true;
702    // If operands are the same, or any pair has been promoted we're not the special case.
703    if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
704        (!IsTemp(rl_src1.low_reg) && !IsTemp(rl_src1.high_reg)) ||
705        (!IsTemp(rl_src2.low_reg) && !IsTemp(rl_src2.high_reg))) {
706      special_case = false;
707    }
708    // Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
709    int res_lo = AllocTemp();
710    int res_hi;
711    if (rl_src1.low_reg == rl_src2.low_reg) {
712      res_hi = AllocTemp();
713      NewLIR3(kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
714      NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
715      OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
716    } else {
717      // In the special case, all temps are now allocated
718      NewLIR3(kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
719      if (special_case) {
720        DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg);
721        DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg);
722        FreeTemp(rl_src1.high_reg);
723      }
724      res_hi = AllocTemp();
725
726      NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
727      NewLIR4(kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
728      NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
729      if (special_case) {
730        FreeTemp(rl_src1.low_reg);
731        Clobber(rl_src1.low_reg);
732        Clobber(rl_src1.high_reg);
733      }
734    }
735    FreeTemp(tmp1);
736    rl_result = GetReturnWide(false);  // Just using as a template.
737    rl_result.low_reg = res_lo;
738    rl_result.high_reg = res_hi;
739    StoreValueWide(rl_dest, rl_result);
740    // Now, restore lr to its non-temp status.
741    Clobber(rARM_LR);
742    UnmarkTemp(rARM_LR);
743}
744
745void ArmMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
746                            RegLocation rl_src2) {
747  LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
748}
749
750void ArmMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
751                            RegLocation rl_src2) {
752  LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
753}
754
755void ArmMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
756                            RegLocation rl_src2) {
757  LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
758}
759
760void ArmMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1,
761                           RegLocation rl_src2) {
762  LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
763}
764
765void ArmMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1,
766                            RegLocation rl_src2) {
767  LOG(FATAL) << "Unexpected use of genXoLong for Arm";
768}
769
770/*
771 * Generate array load
772 */
773void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
774                             RegLocation rl_index, RegLocation rl_dest, int scale) {
775  RegisterClass reg_class = oat_reg_class_by_size(size);
776  int len_offset = mirror::Array::LengthOffset().Int32Value();
777  int data_offset;
778  RegLocation rl_result;
779  bool constant_index = rl_index.is_const;
780  rl_array = LoadValue(rl_array, kCoreReg);
781  if (!constant_index) {
782    rl_index = LoadValue(rl_index, kCoreReg);
783  }
784
785  if (rl_dest.wide) {
786    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
787  } else {
788    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
789  }
790
791  // If index is constant, just fold it into the data offset
792  if (constant_index) {
793    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
794  }
795
796  /* null object? */
797  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
798
799  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
800  int reg_len = INVALID_REG;
801  if (needs_range_check) {
802    reg_len = AllocTemp();
803    /* Get len */
804    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
805  }
806  if (rl_dest.wide || rl_dest.fp || constant_index) {
807    int reg_ptr;
808    if (constant_index) {
809      reg_ptr = rl_array.low_reg;  // NOTE: must not alter reg_ptr in constant case.
810    } else {
811      // No special indexed operation, lea + load w/ displacement
812      reg_ptr = AllocTemp();
813      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
814                       EncodeShift(kArmLsl, scale));
815      FreeTemp(rl_index.low_reg);
816    }
817    rl_result = EvalLoc(rl_dest, reg_class, true);
818
819    if (needs_range_check) {
820      if (constant_index) {
821        GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
822      } else {
823        GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
824      }
825      FreeTemp(reg_len);
826    }
827    if (rl_dest.wide) {
828      LoadBaseDispWide(reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
829      if (!constant_index) {
830        FreeTemp(reg_ptr);
831      }
832      StoreValueWide(rl_dest, rl_result);
833    } else {
834      LoadBaseDisp(reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
835      if (!constant_index) {
836        FreeTemp(reg_ptr);
837      }
838      StoreValue(rl_dest, rl_result);
839    }
840  } else {
841    // Offset base, then use indexed load
842    int reg_ptr = AllocTemp();
843    OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
844    FreeTemp(rl_array.low_reg);
845    rl_result = EvalLoc(rl_dest, reg_class, true);
846
847    if (needs_range_check) {
848      // TODO: change kCondCS to a more meaningful name, is the sense of
849      // carry-set/clear flipped?
850      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
851      FreeTemp(reg_len);
852    }
853    LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
854    FreeTemp(reg_ptr);
855    StoreValue(rl_dest, rl_result);
856  }
857}
858
859/*
860 * Generate array store
861 *
862 */
863void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
864                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
865  RegisterClass reg_class = oat_reg_class_by_size(size);
866  int len_offset = mirror::Array::LengthOffset().Int32Value();
867  bool constant_index = rl_index.is_const;
868
869  int data_offset;
870  if (size == kLong || size == kDouble) {
871    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
872  } else {
873    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
874  }
875
876  // If index is constant, just fold it into the data offset.
877  if (constant_index) {
878    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
879  }
880
881  rl_array = LoadValue(rl_array, kCoreReg);
882  if (!constant_index) {
883    rl_index = LoadValue(rl_index, kCoreReg);
884  }
885
886  int reg_ptr;
887  if (constant_index) {
888    reg_ptr = rl_array.low_reg;
889  } else if (IsTemp(rl_array.low_reg)) {
890    Clobber(rl_array.low_reg);
891    reg_ptr = rl_array.low_reg;
892  } else {
893    reg_ptr = AllocTemp();
894  }
895
896  /* null object? */
897  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
898
899  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
900  int reg_len = INVALID_REG;
901  if (needs_range_check) {
902    reg_len = AllocTemp();
903    // NOTE: max live temps(4) here.
904    /* Get len */
905    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
906  }
907  /* at this point, reg_ptr points to array, 2 live temps */
908  if (rl_src.wide || rl_src.fp || constant_index) {
909    if (rl_src.wide) {
910      rl_src = LoadValueWide(rl_src, reg_class);
911    } else {
912      rl_src = LoadValue(rl_src, reg_class);
913    }
914    if (!constant_index) {
915      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
916                       EncodeShift(kArmLsl, scale));
917    }
918    if (needs_range_check) {
919      if (constant_index) {
920        GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
921      } else {
922        GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
923      }
924      FreeTemp(reg_len);
925    }
926
927    if (rl_src.wide) {
928      StoreBaseDispWide(reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
929    } else {
930      StoreBaseDisp(reg_ptr, data_offset, rl_src.low_reg, size);
931    }
932  } else {
933    /* reg_ptr -> array data */
934    OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
935    rl_src = LoadValue(rl_src, reg_class);
936    if (needs_range_check) {
937      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
938      FreeTemp(reg_len);
939    }
940    StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
941                     scale, size);
942  }
943  if (!constant_index) {
944    FreeTemp(reg_ptr);
945  }
946  if (card_mark) {
947    MarkGCCard(rl_src.low_reg, rl_array.low_reg);
948  }
949}
950
951
952void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
953                                   RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
954  rl_src = LoadValueWide(rl_src, kCoreReg);
955  // Per spec, we only care about low 6 bits of shift amount.
956  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
957  if (shift_amount == 0) {
958    StoreValueWide(rl_dest, rl_src);
959    return;
960  }
961  if (BadOverlap(rl_src, rl_dest)) {
962    GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
963    return;
964  }
965  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
966  switch (opcode) {
967    case Instruction::SHL_LONG:
968    case Instruction::SHL_LONG_2ADDR:
969      if (shift_amount == 1) {
970        OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
971        OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
972      } else if (shift_amount == 32) {
973        OpRegCopy(rl_result.high_reg, rl_src.low_reg);
974        LoadConstant(rl_result.low_reg, 0);
975      } else if (shift_amount > 31) {
976        OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
977        LoadConstant(rl_result.low_reg, 0);
978      } else {
979        OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
980        OpRegRegRegShift(kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
981                         EncodeShift(kArmLsr, 32 - shift_amount));
982        OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
983      }
984      break;
985    case Instruction::SHR_LONG:
986    case Instruction::SHR_LONG_2ADDR:
987      if (shift_amount == 32) {
988        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
989        OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
990      } else if (shift_amount > 31) {
991        OpRegRegImm(kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
992        OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
993      } else {
994        int t_reg = AllocTemp();
995        OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
996        OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
997                         EncodeShift(kArmLsl, 32 - shift_amount));
998        FreeTemp(t_reg);
999        OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
1000      }
1001      break;
1002    case Instruction::USHR_LONG:
1003    case Instruction::USHR_LONG_2ADDR:
1004      if (shift_amount == 32) {
1005        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
1006        LoadConstant(rl_result.high_reg, 0);
1007      } else if (shift_amount > 31) {
1008        OpRegRegImm(kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
1009        LoadConstant(rl_result.high_reg, 0);
1010      } else {
1011        int t_reg = AllocTemp();
1012        OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
1013        OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
1014                         EncodeShift(kArmLsl, 32 - shift_amount));
1015        FreeTemp(t_reg);
1016        OpRegRegImm(kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
1017      }
1018      break;
1019    default:
1020      LOG(FATAL) << "Unexpected case";
1021  }
1022  StoreValueWide(rl_dest, rl_result);
1023}
1024
1025void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
1026                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
1027  if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
1028    if (!rl_src2.is_const) {
1029      // Don't bother with special handling for subtract from immediate.
1030      GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1031      return;
1032    }
1033  } else {
1034    // Normalize
1035    if (!rl_src2.is_const) {
1036      DCHECK(rl_src1.is_const);
1037      RegLocation rl_temp = rl_src1;
1038      rl_src1 = rl_src2;
1039      rl_src2 = rl_temp;
1040    }
1041  }
1042  if (BadOverlap(rl_src1, rl_dest)) {
1043    GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1044    return;
1045  }
1046  DCHECK(rl_src2.is_const);
1047  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1048  uint32_t val_lo = Low32Bits(val);
1049  uint32_t val_hi = High32Bits(val);
1050  int32_t mod_imm_lo = ModifiedImmediate(val_lo);
1051  int32_t mod_imm_hi = ModifiedImmediate(val_hi);
1052
1053  // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
1054  switch (opcode) {
1055    case Instruction::ADD_LONG:
1056    case Instruction::ADD_LONG_2ADDR:
1057    case Instruction::SUB_LONG:
1058    case Instruction::SUB_LONG_2ADDR:
1059      if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
1060        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1061        return;
1062      }
1063      break;
1064    default:
1065      break;
1066  }
1067  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1068  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1069  // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
1070  switch (opcode) {
1071    case Instruction::ADD_LONG:
1072    case Instruction::ADD_LONG_2ADDR:
1073      NewLIR3(kThumb2AddRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
1074      NewLIR3(kThumb2AdcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
1075      break;
1076    case Instruction::OR_LONG:
1077    case Instruction::OR_LONG_2ADDR:
1078      if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) {
1079        OpRegRegImm(kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
1080      }
1081      if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) {
1082        OpRegRegImm(kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
1083      }
1084      break;
1085    case Instruction::XOR_LONG:
1086    case Instruction::XOR_LONG_2ADDR:
1087      OpRegRegImm(kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
1088      OpRegRegImm(kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
1089      break;
1090    case Instruction::AND_LONG:
1091    case Instruction::AND_LONG_2ADDR:
1092      if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) {
1093        OpRegRegImm(kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
1094      }
1095      if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) {
1096        OpRegRegImm(kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
1097      }
1098      break;
1099    case Instruction::SUB_LONG_2ADDR:
1100    case Instruction::SUB_LONG:
1101      NewLIR3(kThumb2SubRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
1102      NewLIR3(kThumb2SbcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
1103      break;
1104    default:
1105      LOG(FATAL) << "Unexpected opcode " << opcode;
1106  }
1107  StoreValueWide(rl_dest, rl_result);
1108}
1109
1110}  // namespace art
1111