utility_arm.cc revision 9cf44af1a223f905457688931317a4e4cb086a84
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "arm_lir.h"
18#include "codegen_arm.h"
19#include "dex/quick/mir_to_lir-inl.h"
20
21namespace art {
22
23/* This file contains codegen for the Thumb ISA. */
24
25static int32_t EncodeImmSingle(int32_t value) {
26  int32_t res;
27  int32_t bit_a =  (value & 0x80000000) >> 31;
28  int32_t not_bit_b = (value & 0x40000000) >> 30;
29  int32_t bit_b =  (value & 0x20000000) >> 29;
30  int32_t b_smear =  (value & 0x3e000000) >> 25;
31  int32_t slice =   (value & 0x01f80000) >> 19;
32  int32_t zeroes =  (value & 0x0007ffff);
33  if (zeroes != 0)
34    return -1;
35  if (bit_b) {
36    if ((not_bit_b != 0) || (b_smear != 0x1f))
37      return -1;
38  } else {
39    if ((not_bit_b != 1) || (b_smear != 0x0))
40      return -1;
41  }
42  res = (bit_a << 7) | (bit_b << 6) | slice;
43  return res;
44}
45
46/*
47 * Determine whether value can be encoded as a Thumb2 floating point
48 * immediate.  If not, return -1.  If so return encoded 8-bit value.
49 */
50static int32_t EncodeImmDouble(int64_t value) {
51  int32_t res;
52  int32_t bit_a = (value & INT64_C(0x8000000000000000)) >> 63;
53  int32_t not_bit_b = (value & INT64_C(0x4000000000000000)) >> 62;
54  int32_t bit_b = (value & INT64_C(0x2000000000000000)) >> 61;
55  int32_t b_smear = (value & INT64_C(0x3fc0000000000000)) >> 54;
56  int32_t slice =  (value & INT64_C(0x003f000000000000)) >> 48;
57  uint64_t zeroes = (value & INT64_C(0x0000ffffffffffff));
58  if (zeroes != 0ull)
59    return -1;
60  if (bit_b) {
61    if ((not_bit_b != 0) || (b_smear != 0xff))
62      return -1;
63  } else {
64    if ((not_bit_b != 1) || (b_smear != 0x0))
65      return -1;
66  }
67  res = (bit_a << 7) | (bit_b << 6) | slice;
68  return res;
69}
70
71LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
72  DCHECK(RegStorage::IsSingle(r_dest));
73  if (value == 0) {
74    // TODO: we need better info about the target CPU.  a vector exclusive or
75    //       would probably be better here if we could rely on its existance.
76    // Load an immediate +2.0 (which encodes to 0)
77    NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0);
78    // +0.0 = +2.0 - +2.0
79    return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest);
80  } else {
81    int encoded_imm = EncodeImmSingle(value);
82    if (encoded_imm >= 0) {
83      return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm);
84    }
85  }
86  LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
87  if (data_target == NULL) {
88    data_target = AddWordData(&literal_list_, value);
89  }
90  LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
91                          r_dest, rs_r15pc.GetReg(), 0, 0, 0, data_target);
92  SetMemRefType(load_pc_rel, true, kLiteral);
93  AppendLIR(load_pc_rel);
94  return load_pc_rel;
95}
96
97static int LeadingZeros(uint32_t val) {
98  uint32_t alt;
99  int32_t n;
100  int32_t count;
101
102  count = 16;
103  n = 32;
104  do {
105    alt = val >> count;
106    if (alt != 0) {
107      n = n - count;
108      val = alt;
109    }
110    count >>= 1;
111  } while (count);
112  return n - val;
113}
114
115/*
116 * Determine whether value can be encoded as a Thumb2 modified
117 * immediate.  If not, return -1.  If so, return i:imm3:a:bcdefgh form.
118 */
119int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
120  int32_t z_leading;
121  int32_t z_trailing;
122  uint32_t b0 = value & 0xff;
123
124  /* Note: case of value==0 must use 0:000:0:0000000 encoding */
125  if (value <= 0xFF)
126    return b0;  // 0:000:a:bcdefgh
127  if (value == ((b0 << 16) | b0))
128    return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
129  if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
130    return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
131  b0 = (value >> 8) & 0xff;
132  if (value == ((b0 << 24) | (b0 << 8)))
133    return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
134  /* Can we do it with rotation? */
135  z_leading = LeadingZeros(value);
136  z_trailing = 32 - LeadingZeros(~value & (value - 1));
137  /* A run of eight or fewer active bits? */
138  if ((z_leading + z_trailing) < 24)
139    return -1;  /* No - bail */
140  /* left-justify the constant, discarding msb (known to be 1) */
141  value <<= z_leading + 1;
142  /* Create bcdefgh */
143  value >>= 25;
144  /* Put it all together */
145  return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
146}
147
148bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) {
149  return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
150}
151
152bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) {
153  return EncodeImmSingle(value) >= 0;
154}
155
156bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) {
157  return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
158}
159
160bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) {
161  return EncodeImmDouble(value) >= 0;
162}
163
164/*
165 * Load a immediate using a shortcut if possible; otherwise
166 * grab from the per-translation literal pool.
167 *
168 * No additional register clobbering operation performed. Use this version when
169 * 1) r_dest is freshly returned from AllocTemp or
170 * 2) The codegen is under fixed register usage
171 */
172LIR* ArmMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
173  LIR* res;
174  int mod_imm;
175
176  if (r_dest.IsFloat()) {
177    return LoadFPConstantValue(r_dest.GetReg(), value);
178  }
179
180  /* See if the value can be constructed cheaply */
181  if (r_dest.Low8() && (value >= 0) && (value <= 255)) {
182    return NewLIR2(kThumbMovImm, r_dest.GetReg(), value);
183  }
184  /* Check Modified immediate special cases */
185  mod_imm = ModifiedImmediate(value);
186  if (mod_imm >= 0) {
187    res = NewLIR2(kThumb2MovI8M, r_dest.GetReg(), mod_imm);
188    return res;
189  }
190  mod_imm = ModifiedImmediate(~value);
191  if (mod_imm >= 0) {
192    res = NewLIR2(kThumb2MvnI8M, r_dest.GetReg(), mod_imm);
193    return res;
194  }
195  /* 16-bit immediate? */
196  if ((value & 0xffff) == value) {
197    res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), value);
198    return res;
199  }
200  /* Do a low/high pair */
201  res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), Low16Bits(value));
202  NewLIR2(kThumb2MovImm16H, r_dest.GetReg(), High16Bits(value));
203  return res;
204}
205
206LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
207  LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly */);
208  res->target = target;
209  return res;
210}
211
212LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
213  // This is kThumb2BCond instead of kThumbBCond for performance reasons. The assembly
214  // time required for a new pass after kThumbBCond is fixed up to kThumb2BCond is
215  // substantial.
216  LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
217                        ArmConditionEncoding(cc));
218  branch->target = target;
219  return branch;
220}
221
222LIR* ArmMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
223  ArmOpcode opcode = kThumbBkpt;
224  switch (op) {
225    case kOpBlx:
226      opcode = kThumbBlxR;
227      break;
228    case kOpBx:
229      opcode = kThumbBx;
230      break;
231    default:
232      LOG(FATAL) << "Bad opcode " << op;
233  }
234  return NewLIR1(opcode, r_dest_src.GetReg());
235}
236
237LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
238                               int shift) {
239  bool thumb_form =
240      ((shift == 0) && r_dest_src1.Low8() && r_src2.Low8());
241  ArmOpcode opcode = kThumbBkpt;
242  switch (op) {
243    case kOpAdc:
244      opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
245      break;
246    case kOpAnd:
247      opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
248      break;
249    case kOpBic:
250      opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
251      break;
252    case kOpCmn:
253      DCHECK_EQ(shift, 0);
254      opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
255      break;
256    case kOpCmp:
257      if (thumb_form)
258        opcode = kThumbCmpRR;
259      else if ((shift == 0) && !r_dest_src1.Low8() && !r_src2.Low8())
260        opcode = kThumbCmpHH;
261      else if ((shift == 0) && r_dest_src1.Low8())
262        opcode = kThumbCmpLH;
263      else if (shift == 0)
264        opcode = kThumbCmpHL;
265      else
266        opcode = kThumb2CmpRR;
267      break;
268    case kOpXor:
269      opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
270      break;
271    case kOpMov:
272      DCHECK_EQ(shift, 0);
273      if (r_dest_src1.Low8() && r_src2.Low8())
274        opcode = kThumbMovRR;
275      else if (!r_dest_src1.Low8() && !r_src2.Low8())
276        opcode = kThumbMovRR_H2H;
277      else if (r_dest_src1.Low8())
278        opcode = kThumbMovRR_H2L;
279      else
280        opcode = kThumbMovRR_L2H;
281      break;
282    case kOpMul:
283      DCHECK_EQ(shift, 0);
284      opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
285      break;
286    case kOpMvn:
287      opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
288      break;
289    case kOpNeg:
290      DCHECK_EQ(shift, 0);
291      opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
292      break;
293    case kOpOr:
294      opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
295      break;
296    case kOpSbc:
297      opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
298      break;
299    case kOpTst:
300      opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
301      break;
302    case kOpLsl:
303      DCHECK_EQ(shift, 0);
304      opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
305      break;
306    case kOpLsr:
307      DCHECK_EQ(shift, 0);
308      opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
309      break;
310    case kOpAsr:
311      DCHECK_EQ(shift, 0);
312      opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
313      break;
314    case kOpRor:
315      DCHECK_EQ(shift, 0);
316      opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
317      break;
318    case kOpAdd:
319      opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
320      break;
321    case kOpSub:
322      opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
323      break;
324    case kOpRev:
325      DCHECK_EQ(shift, 0);
326      if (!thumb_form) {
327        // Binary, but rm is encoded twice.
328        return NewLIR3(kThumb2RevRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
329      }
330      opcode = kThumbRev;
331      break;
332    case kOpRevsh:
333      DCHECK_EQ(shift, 0);
334      if (!thumb_form) {
335        // Binary, but rm is encoded twice.
336        return NewLIR3(kThumb2RevshRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
337      }
338      opcode = kThumbRevsh;
339      break;
340    case kOp2Byte:
341      DCHECK_EQ(shift, 0);
342      return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 8);
343    case kOp2Short:
344      DCHECK_EQ(shift, 0);
345      return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
346    case kOp2Char:
347      DCHECK_EQ(shift, 0);
348      return NewLIR4(kThumb2Ubfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
349    default:
350      LOG(FATAL) << "Bad opcode: " << op;
351      break;
352  }
353  DCHECK(!IsPseudoLirOp(opcode));
354  if (EncodingMap[opcode].flags & IS_BINARY_OP) {
355    return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
356  } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
357    if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) {
358      return NewLIR3(opcode, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
359    } else {
360      return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg());
361    }
362  } else if (EncodingMap[opcode].flags & IS_QUAD_OP) {
363    return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
364  } else {
365    LOG(FATAL) << "Unexpected encoding operand count";
366    return NULL;
367  }
368}
369
370LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
371  return OpRegRegShift(op, r_dest_src1, r_src2, 0);
372}
373
374LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
375  UNIMPLEMENTED(FATAL);
376  return nullptr;
377}
378
379LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
380  UNIMPLEMENTED(FATAL);
381  return nullptr;
382}
383
384LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
385  LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
386  return NULL;
387}
388
389LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
390                                  RegStorage r_src2, int shift) {
391  ArmOpcode opcode = kThumbBkpt;
392  bool thumb_form = (shift == 0) && r_dest.Low8() && r_src1.Low8() && r_src2.Low8();
393  switch (op) {
394    case kOpAdd:
395      opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
396      break;
397    case kOpSub:
398      opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
399      break;
400    case kOpRsub:
401      opcode = kThumb2RsubRRR;
402      break;
403    case kOpAdc:
404      opcode = kThumb2AdcRRR;
405      break;
406    case kOpAnd:
407      opcode = kThumb2AndRRR;
408      break;
409    case kOpBic:
410      opcode = kThumb2BicRRR;
411      break;
412    case kOpXor:
413      opcode = kThumb2EorRRR;
414      break;
415    case kOpMul:
416      DCHECK_EQ(shift, 0);
417      opcode = kThumb2MulRRR;
418      break;
419    case kOpDiv:
420      DCHECK_EQ(shift, 0);
421      opcode = kThumb2SdivRRR;
422      break;
423    case kOpOr:
424      opcode = kThumb2OrrRRR;
425      break;
426    case kOpSbc:
427      opcode = kThumb2SbcRRR;
428      break;
429    case kOpLsl:
430      DCHECK_EQ(shift, 0);
431      opcode = kThumb2LslRRR;
432      break;
433    case kOpLsr:
434      DCHECK_EQ(shift, 0);
435      opcode = kThumb2LsrRRR;
436      break;
437    case kOpAsr:
438      DCHECK_EQ(shift, 0);
439      opcode = kThumb2AsrRRR;
440      break;
441    case kOpRor:
442      DCHECK_EQ(shift, 0);
443      opcode = kThumb2RorRRR;
444      break;
445    default:
446      LOG(FATAL) << "Bad opcode: " << op;
447      break;
448  }
449  DCHECK(!IsPseudoLirOp(opcode));
450  if (EncodingMap[opcode].flags & IS_QUAD_OP) {
451    return NewLIR4(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
452  } else {
453    DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
454    return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
455  }
456}
457
458LIR* ArmMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
459  return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
460}
461
462LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
463  LIR* res;
464  bool neg = (value < 0);
465  int32_t abs_value = (neg) ? -value : value;
466  ArmOpcode opcode = kThumbBkpt;
467  ArmOpcode alt_opcode = kThumbBkpt;
468  bool all_low_regs = r_dest.Low8() && r_src1.Low8();
469  int32_t mod_imm = ModifiedImmediate(value);
470
471  switch (op) {
472    case kOpLsl:
473      if (all_low_regs)
474        return NewLIR3(kThumbLslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
475      else
476        return NewLIR3(kThumb2LslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
477    case kOpLsr:
478      if (all_low_regs)
479        return NewLIR3(kThumbLsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
480      else
481        return NewLIR3(kThumb2LsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
482    case kOpAsr:
483      if (all_low_regs)
484        return NewLIR3(kThumbAsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
485      else
486        return NewLIR3(kThumb2AsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
487    case kOpRor:
488      return NewLIR3(kThumb2RorRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
489    case kOpAdd:
490      if (r_dest.Low8() && (r_src1 == rs_r13sp) && (value <= 1020) && ((value & 0x3) == 0)) {
491        return NewLIR3(kThumbAddSpRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
492      } else if (r_dest.Low8() && (r_src1 == rs_r15pc) &&
493          (value <= 1020) && ((value & 0x3) == 0)) {
494        return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
495      }
496      // Note: intentional fallthrough
497    case kOpSub:
498      if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
499        if (op == kOpAdd)
500          opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
501        else
502          opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
503        return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
504      }
505      if (mod_imm < 0) {
506        mod_imm = ModifiedImmediate(-value);
507        if (mod_imm >= 0) {
508          op = (op == kOpAdd) ? kOpSub : kOpAdd;
509        }
510      }
511      if (mod_imm < 0 && (abs_value & 0x3ff) == abs_value) {
512        // This is deliberately used only if modified immediate encoding is inadequate since
513        // we sometimes actually use the flags for small values but not necessarily low regs.
514        if (op == kOpAdd)
515          opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
516        else
517          opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
518        return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
519      }
520      if (op == kOpSub) {
521        opcode = kThumb2SubRRI8M;
522        alt_opcode = kThumb2SubRRR;
523      } else {
524        opcode = kThumb2AddRRI8M;
525        alt_opcode = kThumb2AddRRR;
526      }
527      break;
528    case kOpRsub:
529      opcode = kThumb2RsubRRI8M;
530      alt_opcode = kThumb2RsubRRR;
531      break;
532    case kOpAdc:
533      opcode = kThumb2AdcRRI8M;
534      alt_opcode = kThumb2AdcRRR;
535      break;
536    case kOpSbc:
537      opcode = kThumb2SbcRRI8M;
538      alt_opcode = kThumb2SbcRRR;
539      break;
540    case kOpOr:
541      opcode = kThumb2OrrRRI8M;
542      alt_opcode = kThumb2OrrRRR;
543      break;
544    case kOpAnd:
545      if (mod_imm < 0) {
546        mod_imm = ModifiedImmediate(~value);
547        if (mod_imm >= 0) {
548          return NewLIR3(kThumb2BicRRI8M, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
549        }
550      }
551      opcode = kThumb2AndRRI8M;
552      alt_opcode = kThumb2AndRRR;
553      break;
554    case kOpXor:
555      opcode = kThumb2EorRRI8M;
556      alt_opcode = kThumb2EorRRR;
557      break;
558    case kOpMul:
559      // TUNING: power of 2, shift & add
560      mod_imm = -1;
561      alt_opcode = kThumb2MulRRR;
562      break;
563    case kOpCmp: {
564      LIR* res;
565      if (mod_imm >= 0) {
566        res = NewLIR2(kThumb2CmpRI8M, r_src1.GetReg(), mod_imm);
567      } else {
568        mod_imm = ModifiedImmediate(-value);
569        if (mod_imm >= 0) {
570          res = NewLIR2(kThumb2CmnRI8M, r_src1.GetReg(), mod_imm);
571        } else {
572          RegStorage r_tmp = AllocTemp();
573          res = LoadConstant(r_tmp, value);
574          OpRegReg(kOpCmp, r_src1, r_tmp);
575          FreeTemp(r_tmp);
576        }
577      }
578      return res;
579    }
580    default:
581      LOG(FATAL) << "Bad opcode: " << op;
582  }
583
584  if (mod_imm >= 0) {
585    return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
586  } else {
587    RegStorage r_scratch = AllocTemp();
588    LoadConstant(r_scratch, value);
589    if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
590      res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
591    else
592      res = NewLIR3(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
593    FreeTemp(r_scratch);
594    return res;
595  }
596}
597
598/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
599LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
600  bool neg = (value < 0);
601  int32_t abs_value = (neg) ? -value : value;
602  bool short_form = (((abs_value & 0xff) == abs_value) && r_dest_src1.Low8());
603  ArmOpcode opcode = kThumbBkpt;
604  switch (op) {
605    case kOpAdd:
606      if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
607        DCHECK_EQ((value & 0x3), 0);
608        return NewLIR1(kThumbAddSpI7, value >> 2);
609      } else if (short_form) {
610        opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
611      }
612      break;
613    case kOpSub:
614      if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
615        DCHECK_EQ((value & 0x3), 0);
616        return NewLIR1(kThumbSubSpI7, value >> 2);
617      } else if (short_form) {
618        opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
619      }
620      break;
621    case kOpCmp:
622      if (!neg && short_form) {
623        opcode = kThumbCmpRI8;
624      } else {
625        short_form = false;
626      }
627      break;
628    default:
629      /* Punt to OpRegRegImm - if bad case catch it there */
630      short_form = false;
631      break;
632  }
633  if (short_form) {
634    return NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
635  } else {
636    return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
637  }
638}
639
640LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
641  LIR* res = NULL;
642  int32_t val_lo = Low32Bits(value);
643  int32_t val_hi = High32Bits(value);
644  if (r_dest.IsFloat()) {
645    DCHECK(!r_dest.IsPair());
646    if ((val_lo == 0) && (val_hi == 0)) {
647      // TODO: we need better info about the target CPU.  a vector exclusive or
648      //       would probably be better here if we could rely on its existance.
649      // Load an immediate +2.0 (which encodes to 0)
650      NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), 0);
651      // +0.0 = +2.0 - +2.0
652      res = NewLIR3(kThumb2Vsubd, r_dest.GetReg(), r_dest.GetReg(), r_dest.GetReg());
653    } else {
654      int encoded_imm = EncodeImmDouble(value);
655      if (encoded_imm >= 0) {
656        res = NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), encoded_imm);
657      }
658    }
659  } else {
660    // NOTE: Arm32 assumption here.
661    DCHECK(r_dest.IsPair());
662    if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
663      res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
664      LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
665    }
666  }
667  if (res == NULL) {
668    // No short form - load from the literal pool.
669    LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
670    if (data_target == NULL) {
671      data_target = AddWideData(&literal_list_, val_lo, val_hi);
672    }
673    if (r_dest.IsFloat()) {
674      res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
675                   r_dest.GetReg(), rs_r15pc.GetReg(), 0, 0, 0, data_target);
676    } else {
677      DCHECK(r_dest.IsPair());
678      res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
679                   r_dest.GetLowReg(), r_dest.GetHighReg(), rs_r15pc.GetReg(), 0, 0, data_target);
680    }
681    SetMemRefType(res, true, kLiteral);
682    AppendLIR(res);
683  }
684  return res;
685}
686
687int ArmMir2Lir::EncodeShift(int code, int amount) {
688  return ((amount & 0x1f) << 2) | code;
689}
690
691LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
692                                 int scale, OpSize size) {
693  bool all_low_regs = r_base.Low8() && r_index.Low8() && r_dest.Low8();
694  LIR* load;
695  ArmOpcode opcode = kThumbBkpt;
696  bool thumb_form = (all_low_regs && (scale == 0));
697  RegStorage reg_ptr;
698
699  if (r_dest.IsFloat()) {
700    if (r_dest.IsSingle()) {
701      DCHECK((size == k32) || (size == kSingle) || (size == kReference));
702      opcode = kThumb2Vldrs;
703      size = kSingle;
704    } else {
705      DCHECK(r_dest.IsDouble());
706      DCHECK((size == k64) || (size == kDouble));
707      opcode = kThumb2Vldrd;
708      size = kDouble;
709    }
710  } else {
711    if (size == kSingle)
712      size = k32;
713  }
714
715  switch (size) {
716    case kDouble:  // fall-through
717    // Intentional fall-though.
718    case kSingle:
719      reg_ptr = AllocTemp();
720      if (scale) {
721        NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
722                EncodeShift(kArmLsl, scale));
723      } else {
724        OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
725      }
726      load = NewLIR3(opcode, r_dest.GetReg(), reg_ptr.GetReg(), 0);
727      FreeTemp(reg_ptr);
728      return load;
729    case k32:
730    // Intentional fall-though.
731    case kReference:
732      opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
733      break;
734    case kUnsignedHalf:
735      opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
736      break;
737    case kSignedHalf:
738      opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
739      break;
740    case kUnsignedByte:
741      opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
742      break;
743    case kSignedByte:
744      opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
745      break;
746    default:
747      LOG(FATAL) << "Bad size: " << size;
748  }
749  if (thumb_form)
750    load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
751  else
752    load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
753
754  return load;
755}
756
757LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
758                                  int scale, OpSize size) {
759  bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
760  LIR* store = NULL;
761  ArmOpcode opcode = kThumbBkpt;
762  bool thumb_form = (all_low_regs && (scale == 0));
763  RegStorage reg_ptr;
764
765  if (r_src.IsFloat()) {
766    if (r_src.IsSingle()) {
767      DCHECK((size == k32) || (size == kSingle) || (size == kReference));
768      opcode = kThumb2Vstrs;
769      size = kSingle;
770    } else {
771      DCHECK(r_src.IsDouble());
772      DCHECK((size == k64) || (size == kDouble));
773      DCHECK_EQ((r_src.GetReg() & 0x1), 0);
774      opcode = kThumb2Vstrd;
775      size = kDouble;
776    }
777  } else {
778    if (size == kSingle)
779      size = k32;
780  }
781
782  switch (size) {
783    case kDouble:  // fall-through
784    // Intentional fall-though.
785    case kSingle:
786      reg_ptr = AllocTemp();
787      if (scale) {
788        NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
789                EncodeShift(kArmLsl, scale));
790      } else {
791        OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
792      }
793      store = NewLIR3(opcode, r_src.GetReg(), reg_ptr.GetReg(), 0);
794      FreeTemp(reg_ptr);
795      return store;
796    case k32:
797    // Intentional fall-though.
798    case kReference:
799      opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
800      break;
801    case kUnsignedHalf:
802    // Intentional fall-though.
803    case kSignedHalf:
804      opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
805      break;
806    case kUnsignedByte:
807    // Intentional fall-though.
808    case kSignedByte:
809      opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
810      break;
811    default:
812      LOG(FATAL) << "Bad size: " << size;
813  }
814  if (thumb_form)
815    store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
816  else
817    store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
818
819  return store;
820}
821
822/*
823 * Load value from base + displacement.  Optionally perform null check
824 * on base (which must have an associated s_reg and MIR).  If not
825 * performing null check, incoming MIR can be null.
826 */
827LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
828                                  OpSize size) {
829  LIR* load = NULL;
830  ArmOpcode opcode = kThumbBkpt;
831  bool short_form = false;
832  bool thumb2Form = (displacement < 4092 && displacement >= 0);
833  bool all_low = r_dest.Is32Bit() && r_base.Low8() && r_dest.Low8();
834  int encoded_disp = displacement;
835  bool already_generated = false;
836  switch (size) {
837    case kDouble:
838    // Intentional fall-though.
839    case k64: {
840      DCHECK_EQ(displacement & 3, 0);
841      encoded_disp = (displacement & 1020) >> 2;  // Within range of kThumb2Vldrd/kThumb2LdrdI8.
842      RegStorage r_ptr = r_base;
843      if ((displacement & ~1020) != 0) {
844        // For core register load, use the r_dest.GetLow() for the temporary pointer.
845        r_ptr = r_dest.IsFloat() ? AllocTemp() : r_dest.GetLow();
846        // Add displacement & ~1020 to base, it's a single instruction for up to +-256KiB.
847        OpRegRegImm(kOpAdd, r_ptr, r_base, displacement & ~1020);
848      }
849      if (r_dest.IsFloat()) {
850        DCHECK(!r_dest.IsPair());
851        load = NewLIR3(kThumb2Vldrd, r_dest.GetReg(), r_ptr.GetReg(), encoded_disp);
852      } else {
853        load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(),
854                       encoded_disp);
855      }
856      if ((displacement & ~1020) != 0 && !r_dest.IsFloat()) {
857        FreeTemp(r_ptr);
858      }
859      already_generated = true;
860      break;
861    }
862    case kSingle:
863    // Intentional fall-though.
864    case k32:
865    // Intentional fall-though.
866    case kReference:
867      if (r_dest.IsFloat()) {
868        opcode = kThumb2Vldrs;
869        if (displacement <= 1020) {
870          short_form = true;
871          encoded_disp >>= 2;
872        }
873        break;
874      }
875      if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
876          (displacement >= 0)) {
877        short_form = true;
878        encoded_disp >>= 2;
879        opcode = kThumbLdrPcRel;
880      } else if (r_dest.Low8() && (r_base == rs_rARM_SP) && (displacement <= 1020) &&
881                 (displacement >= 0)) {
882        short_form = true;
883        encoded_disp >>= 2;
884        opcode = kThumbLdrSpRel;
885      } else if (all_low && displacement < 128 && displacement >= 0) {
886        DCHECK_EQ((displacement & 0x3), 0);
887        short_form = true;
888        encoded_disp >>= 2;
889        opcode = kThumbLdrRRI5;
890      } else if (thumb2Form) {
891        short_form = true;
892        opcode = kThumb2LdrRRI12;
893      }
894      break;
895    case kUnsignedHalf:
896      if (all_low && displacement < 64 && displacement >= 0) {
897        DCHECK_EQ((displacement & 0x1), 0);
898        short_form = true;
899        encoded_disp >>= 1;
900        opcode = kThumbLdrhRRI5;
901      } else if (displacement < 4092 && displacement >= 0) {
902        short_form = true;
903        opcode = kThumb2LdrhRRI12;
904      }
905      break;
906    case kSignedHalf:
907      if (thumb2Form) {
908        short_form = true;
909        opcode = kThumb2LdrshRRI12;
910      }
911      break;
912    case kUnsignedByte:
913      if (all_low && displacement < 32 && displacement >= 0) {
914        short_form = true;
915        opcode = kThumbLdrbRRI5;
916      } else if (thumb2Form) {
917        short_form = true;
918        opcode = kThumb2LdrbRRI12;
919      }
920      break;
921    case kSignedByte:
922      if (thumb2Form) {
923        short_form = true;
924        opcode = kThumb2LdrsbRRI12;
925      }
926      break;
927    default:
928      LOG(FATAL) << "Bad size: " << size;
929  }
930
931  if (!already_generated) {
932    if (short_form) {
933      load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), encoded_disp);
934    } else {
935      RegStorage reg_offset = AllocTemp();
936      LoadConstant(reg_offset, encoded_disp);
937      if (r_dest.IsFloat()) {
938        // No index ops - must use a long sequence.  Turn the offset into a direct pointer.
939        OpRegReg(kOpAdd, reg_offset, r_base);
940        load = LoadBaseDispBody(reg_offset, 0, r_dest, size);
941      } else {
942        load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size);
943      }
944      FreeTemp(reg_offset);
945    }
946  }
947
948  // TODO: in future may need to differentiate Dalvik accesses w/ spills
949  if (r_base == rs_rARM_SP) {
950    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
951  }
952  return load;
953}
954
955LIR* ArmMir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
956                                      OpSize size) {
957  // Only 64-bit load needs special handling.
958  if (UNLIKELY(size == k64 || size == kDouble)) {
959    DCHECK(!r_dest.IsFloat());  // See RegClassForFieldLoadSave().
960    // If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
961    if (!cu_->compiler_driver->GetInstructionSetFeatures().HasLpae()) {
962      // Use LDREXD for the atomic load. (Expect displacement > 0, don't optimize for == 0.)
963      RegStorage r_ptr = AllocTemp();
964      OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
965      LIR* lir = NewLIR3(kThumb2Ldrexd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg());
966      FreeTemp(r_ptr);
967      return lir;
968    }
969  }
970  return LoadBaseDisp(r_base, displacement, r_dest, size);
971}
972
973LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
974                              OpSize size) {
975  // TODO: base this on target.
976  if (size == kWord) {
977    size = k32;
978  }
979  return LoadBaseDispBody(r_base, displacement, r_dest, size);
980}
981
982
983LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
984                                   OpSize size) {
985  LIR* store = NULL;
986  ArmOpcode opcode = kThumbBkpt;
987  bool short_form = false;
988  bool thumb2Form = (displacement < 4092 && displacement >= 0);
989  bool all_low = r_src.Is32Bit() && r_base.Low8() && r_src.Low8();
990  int encoded_disp = displacement;
991  bool already_generated = false;
992  switch (size) {
993    case kDouble:
994    // Intentional fall-though.
995    case k64: {
996      DCHECK_EQ(displacement & 3, 0);
997      encoded_disp = (displacement & 1020) >> 2;  // Within range of kThumb2Vstrd/kThumb2StrdI8.
998      RegStorage r_ptr = r_base;
999      if ((displacement & ~1020) != 0) {
1000        r_ptr = AllocTemp();
1001        // Add displacement & ~1020 to base, it's a single instruction for up to +-256KiB.
1002        OpRegRegImm(kOpAdd, r_ptr, r_base, displacement & ~1020);
1003      }
1004      if (r_src.IsFloat()) {
1005        DCHECK(!r_src.IsPair());
1006        store = NewLIR3(kThumb2Vstrd, r_src.GetReg(), r_ptr.GetReg(), encoded_disp);
1007      } else {
1008        store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_ptr.GetReg(),
1009                        encoded_disp);
1010      }
1011      if ((displacement & ~1020) != 0) {
1012        FreeTemp(r_ptr);
1013      }
1014      already_generated = true;
1015      break;
1016    }
1017    case kSingle:
1018    // Intentional fall-through.
1019    case k32:
1020    // Intentional fall-through.
1021    case kReference:
1022      if (r_src.IsFloat()) {
1023        DCHECK(r_src.IsSingle());
1024        opcode = kThumb2Vstrs;
1025        if (displacement <= 1020) {
1026          short_form = true;
1027          encoded_disp >>= 2;
1028        }
1029        break;
1030      }
1031      if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
1032        short_form = true;
1033        encoded_disp >>= 2;
1034        opcode = kThumbStrSpRel;
1035      } else if (all_low && displacement < 128 && displacement >= 0) {
1036        DCHECK_EQ((displacement & 0x3), 0);
1037        short_form = true;
1038        encoded_disp >>= 2;
1039        opcode = kThumbStrRRI5;
1040      } else if (thumb2Form) {
1041        short_form = true;
1042        opcode = kThumb2StrRRI12;
1043      }
1044      break;
1045    case kUnsignedHalf:
1046    case kSignedHalf:
1047      if (all_low && displacement < 64 && displacement >= 0) {
1048        DCHECK_EQ((displacement & 0x1), 0);
1049        short_form = true;
1050        encoded_disp >>= 1;
1051        opcode = kThumbStrhRRI5;
1052      } else if (thumb2Form) {
1053        short_form = true;
1054        opcode = kThumb2StrhRRI12;
1055      }
1056      break;
1057    case kUnsignedByte:
1058    case kSignedByte:
1059      if (all_low && displacement < 32 && displacement >= 0) {
1060        short_form = true;
1061        opcode = kThumbStrbRRI5;
1062      } else if (thumb2Form) {
1063        short_form = true;
1064        opcode = kThumb2StrbRRI12;
1065      }
1066      break;
1067    default:
1068      LOG(FATAL) << "Bad size: " << size;
1069  }
1070  if (!already_generated) {
1071    if (short_form) {
1072      store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), encoded_disp);
1073    } else {
1074      RegStorage r_scratch = AllocTemp();
1075      LoadConstant(r_scratch, encoded_disp);
1076      if (r_src.IsFloat()) {
1077        // No index ops - must use a long sequence.  Turn the offset into a direct pointer.
1078        OpRegReg(kOpAdd, r_scratch, r_base);
1079        store = StoreBaseDispBody(r_scratch, 0, r_src, size);
1080      } else {
1081        store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
1082      }
1083      FreeTemp(r_scratch);
1084    }
1085  }
1086
1087  // TODO: In future, may need to differentiate Dalvik & spill accesses
1088  if (r_base == rs_rARM_SP) {
1089    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
1090  }
1091  return store;
1092}
1093
1094LIR* ArmMir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
1095                                       OpSize size) {
1096  // Only 64-bit store needs special handling.
1097  if (UNLIKELY(size == k64 || size == kDouble)) {
1098    DCHECK(!r_src.IsFloat());  // See RegClassForFieldLoadSave().
1099    // If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
1100    if (!cu_->compiler_driver->GetInstructionSetFeatures().HasLpae()) {
1101      // Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
1102      RegStorage r_ptr = AllocTemp();
1103      OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
1104      LIR* fail_target = NewLIR0(kPseudoTargetLabel);
1105      // We have only 5 temporary registers available and if r_base, r_src and r_ptr already
1106      // take 4, we can't directly allocate 2 more for LDREXD temps. In that case clobber r_ptr
1107      // in LDREXD and recalculate it from r_base.
1108      RegStorage r_temp = AllocTemp();
1109      RegStorage r_temp_high = AllocFreeTemp();  // We may not have another temp.
1110      if (r_temp_high.Valid()) {
1111        NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_temp_high.GetReg(), r_ptr.GetReg());
1112        FreeTemp(r_temp_high);
1113        FreeTemp(r_temp);
1114      } else {
1115        // If we don't have another temp, clobber r_ptr in LDREXD and reload it.
1116        NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_ptr.GetReg(), r_ptr.GetReg());
1117        FreeTemp(r_temp);  // May need the temp for kOpAdd.
1118        OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
1119      }
1120      LIR* lir = NewLIR4(kThumb2Strexd, r_temp.GetReg(), r_src.GetLowReg(), r_src.GetHighReg(),
1121                         r_ptr.GetReg());
1122      OpCmpImmBranch(kCondNe, r_temp, 0, fail_target);
1123      FreeTemp(r_ptr);
1124      return lir;
1125    }
1126  }
1127  return StoreBaseDisp(r_base, displacement, r_src, size);
1128}
1129
1130LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
1131                               OpSize size) {
1132  // TODO: base this on target.
1133  if (size == kWord) {
1134    size = k32;
1135  }
1136  return StoreBaseDispBody(r_base, displacement, r_src, size);
1137}
1138
1139LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
1140  int opcode;
1141  DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
1142  if (r_dest.IsDouble()) {
1143    opcode = kThumb2Vmovd;
1144  } else {
1145    if (r_dest.IsSingle()) {
1146      opcode = r_src.IsSingle() ? kThumb2Vmovs : kThumb2Fmsr;
1147    } else {
1148      DCHECK(r_src.IsSingle());
1149      opcode = kThumb2Fmrs;
1150    }
1151  }
1152  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
1153  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
1154    res->flags.is_nop = true;
1155  }
1156  return res;
1157}
1158
1159LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
1160  LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
1161  return NULL;
1162}
1163
1164LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
1165  UNIMPLEMENTED(FATAL) << "Should not be called.";
1166  return nullptr;
1167}
1168
1169LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
1170  LOG(FATAL) << "Unexpected use of OpMem for Arm";
1171  return NULL;
1172}
1173
1174LIR* ArmMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
1175                                      int displacement, RegStorage r_src, OpSize size) {
1176  LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
1177  return NULL;
1178}
1179
1180LIR* ArmMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
1181  LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
1182  return NULL;
1183}
1184
1185LIR* ArmMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
1186                                     int displacement, RegStorage r_dest, OpSize size) {
1187  LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
1188  return NULL;
1189}
1190
1191}  // namespace art
1192