utility_arm.cc revision 7020278bce98a0735dc6abcbd33bdf1ed2634f1d
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "arm_lir.h"
18#include "codegen_arm.h"
19#include "dex/quick/mir_to_lir-inl.h"
20
21namespace art {
22
23/* This file contains codegen for the Thumb ISA. */
24
25static int32_t EncodeImmSingle(int32_t value) {
26  int32_t res;
27  int32_t bit_a =  (value & 0x80000000) >> 31;
28  int32_t not_bit_b = (value & 0x40000000) >> 30;
29  int32_t bit_b =  (value & 0x20000000) >> 29;
30  int32_t b_smear =  (value & 0x3e000000) >> 25;
31  int32_t slice =   (value & 0x01f80000) >> 19;
32  int32_t zeroes =  (value & 0x0007ffff);
33  if (zeroes != 0)
34    return -1;
35  if (bit_b) {
36    if ((not_bit_b != 0) || (b_smear != 0x1f))
37      return -1;
38  } else {
39    if ((not_bit_b != 1) || (b_smear != 0x0))
40      return -1;
41  }
42  res = (bit_a << 7) | (bit_b << 6) | slice;
43  return res;
44}
45
46/*
47 * Determine whether value can be encoded as a Thumb2 floating point
48 * immediate.  If not, return -1.  If so return encoded 8-bit value.
49 */
50static int32_t EncodeImmDouble(int64_t value) {
51  int32_t res;
52  int32_t bit_a = (value & 0x8000000000000000ll) >> 63;
53  int32_t not_bit_b = (value & 0x4000000000000000ll) >> 62;
54  int32_t bit_b = (value & 0x2000000000000000ll) >> 61;
55  int32_t b_smear = (value & 0x3fc0000000000000ll) >> 54;
56  int32_t slice =  (value & 0x003f000000000000ll) >> 48;
57  uint64_t zeroes = (value & 0x0000ffffffffffffll);
58  if (zeroes != 0ull)
59    return -1;
60  if (bit_b) {
61    if ((not_bit_b != 0) || (b_smear != 0xff))
62      return -1;
63  } else {
64    if ((not_bit_b != 1) || (b_smear != 0x0))
65      return -1;
66  }
67  res = (bit_a << 7) | (bit_b << 6) | slice;
68  return res;
69}
70
71LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
72  DCHECK(ARM_SINGLEREG(r_dest));
73  if (value == 0) {
74    // TODO: we need better info about the target CPU.  a vector exclusive or
75    //       would probably be better here if we could rely on its existance.
76    // Load an immediate +2.0 (which encodes to 0)
77    NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0);
78    // +0.0 = +2.0 - +2.0
79    return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest);
80  } else {
81    int encoded_imm = EncodeImmSingle(value);
82    if (encoded_imm >= 0) {
83      return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm);
84    }
85  }
86  LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
87  if (data_target == NULL) {
88    data_target = AddWordData(&literal_list_, value);
89  }
90  LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
91                          r_dest, r15pc, 0, 0, 0, data_target);
92  SetMemRefType(load_pc_rel, true, kLiteral);
93  AppendLIR(load_pc_rel);
94  return load_pc_rel;
95}
96
97static int LeadingZeros(uint32_t val) {
98  uint32_t alt;
99  int32_t n;
100  int32_t count;
101
102  count = 16;
103  n = 32;
104  do {
105    alt = val >> count;
106    if (alt != 0) {
107      n = n - count;
108      val = alt;
109    }
110    count >>= 1;
111  } while (count);
112  return n - val;
113}
114
115/*
116 * Determine whether value can be encoded as a Thumb2 modified
117 * immediate.  If not, return -1.  If so, return i:imm3:a:bcdefgh form.
118 */
119int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
120  int32_t z_leading;
121  int32_t z_trailing;
122  uint32_t b0 = value & 0xff;
123
124  /* Note: case of value==0 must use 0:000:0:0000000 encoding */
125  if (value <= 0xFF)
126    return b0;  // 0:000:a:bcdefgh
127  if (value == ((b0 << 16) | b0))
128    return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
129  if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
130    return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
131  b0 = (value >> 8) & 0xff;
132  if (value == ((b0 << 24) | (b0 << 8)))
133    return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
134  /* Can we do it with rotation? */
135  z_leading = LeadingZeros(value);
136  z_trailing = 32 - LeadingZeros(~value & (value - 1));
137  /* A run of eight or fewer active bits? */
138  if ((z_leading + z_trailing) < 24)
139    return -1;  /* No - bail */
140  /* left-justify the constant, discarding msb (known to be 1) */
141  value <<= z_leading + 1;
142  /* Create bcdefgh */
143  value >>= 25;
144  /* Put it all together */
145  return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
146}
147
148bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) {
149  return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
150}
151
152bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) {
153  return EncodeImmSingle(value) >= 0;
154}
155
156bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) {
157  return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
158}
159
160bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) {
161  return EncodeImmDouble(value) >= 0;
162}
163
164/*
165 * Load a immediate using a shortcut if possible; otherwise
166 * grab from the per-translation literal pool.
167 *
168 * No additional register clobbering operation performed. Use this version when
169 * 1) r_dest is freshly returned from AllocTemp or
170 * 2) The codegen is under fixed register usage
171 */
172LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) {
173  LIR* res;
174  int mod_imm;
175
176  if (ARM_FPREG(r_dest)) {
177    return LoadFPConstantValue(r_dest, value);
178  }
179
180  /* See if the value can be constructed cheaply */
181  if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
182    return NewLIR2(kThumbMovImm, r_dest, value);
183  }
184  /* Check Modified immediate special cases */
185  mod_imm = ModifiedImmediate(value);
186  if (mod_imm >= 0) {
187    res = NewLIR2(kThumb2MovImmShift, r_dest, mod_imm);
188    return res;
189  }
190  mod_imm = ModifiedImmediate(~value);
191  if (mod_imm >= 0) {
192    res = NewLIR2(kThumb2MvnImm12, r_dest, mod_imm);
193    return res;
194  }
195  /* 16-bit immediate? */
196  if ((value & 0xffff) == value) {
197    res = NewLIR2(kThumb2MovImm16, r_dest, value);
198    return res;
199  }
200  /* Do a low/high pair */
201  res = NewLIR2(kThumb2MovImm16, r_dest, Low16Bits(value));
202  NewLIR2(kThumb2MovImm16H, r_dest, High16Bits(value));
203  return res;
204}
205
206LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
207  LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly*/);
208  res->target = target;
209  return res;
210}
211
212LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
213  LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
214                        ArmConditionEncoding(cc));
215  branch->target = target;
216  return branch;
217}
218
219LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) {
220  ArmOpcode opcode = kThumbBkpt;
221  switch (op) {
222    case kOpBlx:
223      opcode = kThumbBlxR;
224      break;
225    default:
226      LOG(FATAL) << "Bad opcode " << op;
227  }
228  return NewLIR1(opcode, r_dest_src);
229}
230
231LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2,
232                               int shift) {
233  bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
234  ArmOpcode opcode = kThumbBkpt;
235  switch (op) {
236    case kOpAdc:
237      opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
238      break;
239    case kOpAnd:
240      opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
241      break;
242    case kOpBic:
243      opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
244      break;
245    case kOpCmn:
246      DCHECK_EQ(shift, 0);
247      opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
248      break;
249    case kOpCmp:
250      if (thumb_form)
251        opcode = kThumbCmpRR;
252      else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
253        opcode = kThumbCmpHH;
254      else if ((shift == 0) && ARM_LOWREG(r_dest_src1))
255        opcode = kThumbCmpLH;
256      else if (shift == 0)
257        opcode = kThumbCmpHL;
258      else
259        opcode = kThumb2CmpRR;
260      break;
261    case kOpXor:
262      opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
263      break;
264    case kOpMov:
265      DCHECK_EQ(shift, 0);
266      if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2))
267        opcode = kThumbMovRR;
268      else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
269        opcode = kThumbMovRR_H2H;
270      else if (ARM_LOWREG(r_dest_src1))
271        opcode = kThumbMovRR_H2L;
272      else
273        opcode = kThumbMovRR_L2H;
274      break;
275    case kOpMul:
276      DCHECK_EQ(shift, 0);
277      opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
278      break;
279    case kOpMvn:
280      opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
281      break;
282    case kOpNeg:
283      DCHECK_EQ(shift, 0);
284      opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
285      break;
286    case kOpOr:
287      opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
288      break;
289    case kOpSbc:
290      opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
291      break;
292    case kOpTst:
293      opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
294      break;
295    case kOpLsl:
296      DCHECK_EQ(shift, 0);
297      opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
298      break;
299    case kOpLsr:
300      DCHECK_EQ(shift, 0);
301      opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
302      break;
303    case kOpAsr:
304      DCHECK_EQ(shift, 0);
305      opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
306      break;
307    case kOpRor:
308      DCHECK_EQ(shift, 0);
309      opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
310      break;
311    case kOpAdd:
312      opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
313      break;
314    case kOpSub:
315      opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
316      break;
317    case kOpRev:
318      DCHECK_EQ(shift, 0);
319      if (!thumb_form) {
320        // Binary, but rm is encoded twice.
321        return NewLIR3(kThumb2RevRR, r_dest_src1, r_src2, r_src2);
322      }
323      opcode = kThumbRev;
324      break;
325    case kOpRevsh:
326      DCHECK_EQ(shift, 0);
327      if (!thumb_form) {
328        // Binary, but rm is encoded twice.
329        return NewLIR3(kThumb2RevshRR, r_dest_src1, r_src2, r_src2);
330      }
331      opcode = kThumbRevsh;
332      break;
333    case kOp2Byte:
334      DCHECK_EQ(shift, 0);
335      return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
336    case kOp2Short:
337      DCHECK_EQ(shift, 0);
338      return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
339    case kOp2Char:
340      DCHECK_EQ(shift, 0);
341      return NewLIR4(kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
342    default:
343      LOG(FATAL) << "Bad opcode: " << op;
344      break;
345  }
346  DCHECK(!IsPseudoLirOp(opcode));
347  if (EncodingMap[opcode].flags & IS_BINARY_OP) {
348    return NewLIR2(opcode, r_dest_src1, r_src2);
349  } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
350    if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) {
351      return NewLIR3(opcode, r_dest_src1, r_src2, shift);
352    } else {
353      return NewLIR3(opcode, r_dest_src1, r_dest_src1, r_src2);
354    }
355  } else if (EncodingMap[opcode].flags & IS_QUAD_OP) {
356    return NewLIR4(opcode, r_dest_src1, r_dest_src1, r_src2, shift);
357  } else {
358    LOG(FATAL) << "Unexpected encoding operand count";
359    return NULL;
360  }
361}
362
363LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
364  return OpRegRegShift(op, r_dest_src1, r_src2, 0);
365}
366
367LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1,
368                                  int r_src2, int shift) {
369  ArmOpcode opcode = kThumbBkpt;
370  bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
371      ARM_LOWREG(r_src2);
372  switch (op) {
373    case kOpAdd:
374      opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
375      break;
376    case kOpSub:
377      opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
378      break;
379    case kOpRsub:
380      opcode = kThumb2RsubRRR;
381      break;
382    case kOpAdc:
383      opcode = kThumb2AdcRRR;
384      break;
385    case kOpAnd:
386      opcode = kThumb2AndRRR;
387      break;
388    case kOpBic:
389      opcode = kThumb2BicRRR;
390      break;
391    case kOpXor:
392      opcode = kThumb2EorRRR;
393      break;
394    case kOpMul:
395      DCHECK_EQ(shift, 0);
396      opcode = kThumb2MulRRR;
397      break;
398    case kOpDiv:
399      DCHECK_EQ(shift, 0);
400      opcode = kThumb2SdivRRR;
401      break;
402    case kOpOr:
403      opcode = kThumb2OrrRRR;
404      break;
405    case kOpSbc:
406      opcode = kThumb2SbcRRR;
407      break;
408    case kOpLsl:
409      DCHECK_EQ(shift, 0);
410      opcode = kThumb2LslRRR;
411      break;
412    case kOpLsr:
413      DCHECK_EQ(shift, 0);
414      opcode = kThumb2LsrRRR;
415      break;
416    case kOpAsr:
417      DCHECK_EQ(shift, 0);
418      opcode = kThumb2AsrRRR;
419      break;
420    case kOpRor:
421      DCHECK_EQ(shift, 0);
422      opcode = kThumb2RorRRR;
423      break;
424    default:
425      LOG(FATAL) << "Bad opcode: " << op;
426      break;
427  }
428  DCHECK(!IsPseudoLirOp(opcode));
429  if (EncodingMap[opcode].flags & IS_QUAD_OP) {
430    return NewLIR4(opcode, r_dest, r_src1, r_src2, shift);
431  } else {
432    DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
433    return NewLIR3(opcode, r_dest, r_src1, r_src2);
434  }
435}
436
437LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) {
438  return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
439}
440
441LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
442  LIR* res;
443  bool neg = (value < 0);
444  int32_t abs_value = (neg) ? -value : value;
445  ArmOpcode opcode = kThumbBkpt;
446  ArmOpcode alt_opcode = kThumbBkpt;
447  bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
448  int32_t mod_imm = ModifiedImmediate(value);
449  int32_t mod_imm_neg = ModifiedImmediate(-value);
450
451  switch (op) {
452    case kOpLsl:
453      if (all_low_regs)
454        return NewLIR3(kThumbLslRRI5, r_dest, r_src1, value);
455      else
456        return NewLIR3(kThumb2LslRRI5, r_dest, r_src1, value);
457    case kOpLsr:
458      if (all_low_regs)
459        return NewLIR3(kThumbLsrRRI5, r_dest, r_src1, value);
460      else
461        return NewLIR3(kThumb2LsrRRI5, r_dest, r_src1, value);
462    case kOpAsr:
463      if (all_low_regs)
464        return NewLIR3(kThumbAsrRRI5, r_dest, r_src1, value);
465      else
466        return NewLIR3(kThumb2AsrRRI5, r_dest, r_src1, value);
467    case kOpRor:
468      return NewLIR3(kThumb2RorRRI5, r_dest, r_src1, value);
469    case kOpAdd:
470      if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
471        (value <= 1020) && ((value & 0x3) == 0)) {
472        return NewLIR3(kThumbAddSpRel, r_dest, r_src1, value >> 2);
473      } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
474          (value <= 1020) && ((value & 0x3) == 0)) {
475        return NewLIR3(kThumbAddPcRel, r_dest, r_src1, value >> 2);
476      }
477      // Note: intentional fallthrough
478    case kOpSub:
479      if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
480        if (op == kOpAdd)
481          opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
482        else
483          opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
484        return NewLIR3(opcode, r_dest, r_src1, abs_value);
485      } else if ((abs_value & 0xff) == abs_value) {
486        if (op == kOpAdd)
487          opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
488        else
489          opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
490        return NewLIR3(opcode, r_dest, r_src1, abs_value);
491      }
492      if (mod_imm_neg >= 0) {
493        op = (op == kOpAdd) ? kOpSub : kOpAdd;
494        mod_imm = mod_imm_neg;
495      }
496      if (op == kOpSub) {
497        opcode = kThumb2SubRRI8;
498        alt_opcode = kThumb2SubRRR;
499      } else {
500        opcode = kThumb2AddRRI8;
501        alt_opcode = kThumb2AddRRR;
502      }
503      break;
504    case kOpRsub:
505      opcode = kThumb2RsubRRI8;
506      alt_opcode = kThumb2RsubRRR;
507      break;
508    case kOpAdc:
509      opcode = kThumb2AdcRRI8;
510      alt_opcode = kThumb2AdcRRR;
511      break;
512    case kOpSbc:
513      opcode = kThumb2SbcRRI8;
514      alt_opcode = kThumb2SbcRRR;
515      break;
516    case kOpOr:
517      opcode = kThumb2OrrRRI8;
518      alt_opcode = kThumb2OrrRRR;
519      break;
520    case kOpAnd:
521      opcode = kThumb2AndRRI8;
522      alt_opcode = kThumb2AndRRR;
523      break;
524    case kOpXor:
525      opcode = kThumb2EorRRI8;
526      alt_opcode = kThumb2EorRRR;
527      break;
528    case kOpMul:
529      // TUNING: power of 2, shift & add
530      mod_imm = -1;
531      alt_opcode = kThumb2MulRRR;
532      break;
533    case kOpCmp: {
534      int mod_imm = ModifiedImmediate(value);
535      LIR* res;
536      if (mod_imm >= 0) {
537        res = NewLIR2(kThumb2CmpRI12, r_src1, mod_imm);
538      } else {
539        int r_tmp = AllocTemp();
540        res = LoadConstant(r_tmp, value);
541        OpRegReg(kOpCmp, r_src1, r_tmp);
542        FreeTemp(r_tmp);
543      }
544      return res;
545    }
546    default:
547      LOG(FATAL) << "Bad opcode: " << op;
548  }
549
550  if (mod_imm >= 0) {
551    return NewLIR3(opcode, r_dest, r_src1, mod_imm);
552  } else {
553    int r_scratch = AllocTemp();
554    LoadConstant(r_scratch, value);
555    if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
556      res = NewLIR4(alt_opcode, r_dest, r_src1, r_scratch, 0);
557    else
558      res = NewLIR3(alt_opcode, r_dest, r_src1, r_scratch);
559    FreeTemp(r_scratch);
560    return res;
561  }
562}
563
564/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
565LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
566  bool neg = (value < 0);
567  int32_t abs_value = (neg) ? -value : value;
568  bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
569  ArmOpcode opcode = kThumbBkpt;
570  switch (op) {
571    case kOpAdd:
572      if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
573        DCHECK_EQ((value & 0x3), 0);
574        return NewLIR1(kThumbAddSpI7, value >> 2);
575      } else if (short_form) {
576        opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
577      }
578      break;
579    case kOpSub:
580      if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
581        DCHECK_EQ((value & 0x3), 0);
582        return NewLIR1(kThumbSubSpI7, value >> 2);
583      } else if (short_form) {
584        opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
585      }
586      break;
587    case kOpCmp:
588      if (ARM_LOWREG(r_dest_src1) && short_form) {
589        opcode = (short_form) ?  kThumbCmpRI8 : kThumbCmpRR;
590      } else if (ARM_LOWREG(r_dest_src1)) {
591        opcode = kThumbCmpRR;
592      } else {
593        short_form = false;
594        opcode = kThumbCmpHL;
595      }
596      break;
597    default:
598      /* Punt to OpRegRegImm - if bad case catch it there */
599      short_form = false;
600      break;
601  }
602  if (short_form) {
603    return NewLIR2(opcode, r_dest_src1, abs_value);
604  } else {
605    return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
606  }
607}
608
609LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
610  LIR* res = NULL;
611  int32_t val_lo = Low32Bits(value);
612  int32_t val_hi = High32Bits(value);
613  int target_reg = S2d(r_dest_lo, r_dest_hi);
614  if (ARM_FPREG(r_dest_lo)) {
615    if ((val_lo == 0) && (val_hi == 0)) {
616      // TODO: we need better info about the target CPU.  a vector exclusive or
617      //       would probably be better here if we could rely on its existance.
618      // Load an immediate +2.0 (which encodes to 0)
619      NewLIR2(kThumb2Vmovd_IMM8, target_reg, 0);
620      // +0.0 = +2.0 - +2.0
621      res = NewLIR3(kThumb2Vsubd, target_reg, target_reg, target_reg);
622    } else {
623      int encoded_imm = EncodeImmDouble(value);
624      if (encoded_imm >= 0) {
625        res = NewLIR2(kThumb2Vmovd_IMM8, target_reg, encoded_imm);
626      }
627    }
628  } else {
629    if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
630      res = LoadConstantNoClobber(r_dest_lo, val_lo);
631      LoadConstantNoClobber(r_dest_hi, val_hi);
632    }
633  }
634  if (res == NULL) {
635    // No short form - load from the literal pool.
636    LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
637    if (data_target == NULL) {
638      data_target = AddWideData(&literal_list_, val_lo, val_hi);
639    }
640    if (ARM_FPREG(r_dest_lo)) {
641      res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
642                   target_reg, r15pc, 0, 0, 0, data_target);
643    } else {
644      res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
645                   r_dest_lo, r_dest_hi, r15pc, 0, 0, data_target);
646    }
647    SetMemRefType(res, true, kLiteral);
648    AppendLIR(res);
649  }
650  return res;
651}
652
653int ArmMir2Lir::EncodeShift(int code, int amount) {
654  return ((amount & 0x1f) << 2) | code;
655}
656
657LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
658                                 int scale, OpSize size) {
659  bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
660  LIR* load;
661  ArmOpcode opcode = kThumbBkpt;
662  bool thumb_form = (all_low_regs && (scale == 0));
663  int reg_ptr;
664
665  if (ARM_FPREG(r_dest)) {
666    if (ARM_SINGLEREG(r_dest)) {
667      DCHECK((size == kWord) || (size == kSingle));
668      opcode = kThumb2Vldrs;
669      size = kSingle;
670    } else {
671      DCHECK(ARM_DOUBLEREG(r_dest));
672      DCHECK((size == kLong) || (size == kDouble));
673      DCHECK_EQ((r_dest & 0x1), 0);
674      opcode = kThumb2Vldrd;
675      size = kDouble;
676    }
677  } else {
678    if (size == kSingle)
679      size = kWord;
680  }
681
682  switch (size) {
683    case kDouble:  // fall-through
684    case kSingle:
685      reg_ptr = AllocTemp();
686      if (scale) {
687        NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
688                EncodeShift(kArmLsl, scale));
689      } else {
690        OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
691      }
692      load = NewLIR3(opcode, r_dest, reg_ptr, 0);
693      FreeTemp(reg_ptr);
694      return load;
695    case kWord:
696      opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
697      break;
698    case kUnsignedHalf:
699      opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
700      break;
701    case kSignedHalf:
702      opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
703      break;
704    case kUnsignedByte:
705      opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
706      break;
707    case kSignedByte:
708      opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
709      break;
710    default:
711      LOG(FATAL) << "Bad size: " << size;
712  }
713  if (thumb_form)
714    load = NewLIR3(opcode, r_dest, rBase, r_index);
715  else
716    load = NewLIR4(opcode, r_dest, rBase, r_index, scale);
717
718  return load;
719}
720
721LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
722                                  int scale, OpSize size) {
723  bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
724  LIR* store = NULL;
725  ArmOpcode opcode = kThumbBkpt;
726  bool thumb_form = (all_low_regs && (scale == 0));
727  int reg_ptr;
728
729  if (ARM_FPREG(r_src)) {
730    if (ARM_SINGLEREG(r_src)) {
731      DCHECK((size == kWord) || (size == kSingle));
732      opcode = kThumb2Vstrs;
733      size = kSingle;
734    } else {
735      DCHECK(ARM_DOUBLEREG(r_src));
736      DCHECK((size == kLong) || (size == kDouble));
737      DCHECK_EQ((r_src & 0x1), 0);
738      opcode = kThumb2Vstrd;
739      size = kDouble;
740    }
741  } else {
742    if (size == kSingle)
743      size = kWord;
744  }
745
746  switch (size) {
747    case kDouble:  // fall-through
748    case kSingle:
749      reg_ptr = AllocTemp();
750      if (scale) {
751        NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
752                EncodeShift(kArmLsl, scale));
753      } else {
754        OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
755      }
756      store = NewLIR3(opcode, r_src, reg_ptr, 0);
757      FreeTemp(reg_ptr);
758      return store;
759    case kWord:
760      opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
761      break;
762    case kUnsignedHalf:
763    case kSignedHalf:
764      opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
765      break;
766    case kUnsignedByte:
767    case kSignedByte:
768      opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
769      break;
770    default:
771      LOG(FATAL) << "Bad size: " << size;
772  }
773  if (thumb_form)
774    store = NewLIR3(opcode, r_src, rBase, r_index);
775  else
776    store = NewLIR4(opcode, r_src, rBase, r_index, scale);
777
778  return store;
779}
780
781/*
782 * Load value from base + displacement.  Optionally perform null check
783 * on base (which must have an associated s_reg and MIR).  If not
784 * performing null check, incoming MIR can be null.
785 */
786LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
787                                  int r_dest_hi, OpSize size, int s_reg) {
788  LIR* load = NULL;
789  ArmOpcode opcode = kThumbBkpt;
790  bool short_form = false;
791  bool thumb2Form = (displacement < 4092 && displacement >= 0);
792  bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest));
793  int encoded_disp = displacement;
794  bool is64bit = false;
795  bool already_generated = false;
796  switch (size) {
797    case kDouble:
798    case kLong:
799      is64bit = true;
800      if (ARM_FPREG(r_dest)) {
801        if (ARM_SINGLEREG(r_dest)) {
802          DCHECK(ARM_FPREG(r_dest_hi));
803          r_dest = S2d(r_dest, r_dest_hi);
804        }
805        opcode = kThumb2Vldrd;
806        if (displacement <= 1020) {
807          short_form = true;
808          encoded_disp >>= 2;
809        }
810        break;
811      } else {
812        if (displacement <= 1020) {
813          load = NewLIR4(kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2);
814        } else {
815          load = LoadBaseDispBody(rBase, displacement, r_dest,
816                                 -1, kWord, s_reg);
817          LoadBaseDispBody(rBase, displacement + 4, r_dest_hi,
818                           -1, kWord, INVALID_SREG);
819        }
820        already_generated = true;
821      }
822    case kSingle:
823    case kWord:
824      if (ARM_FPREG(r_dest)) {
825        opcode = kThumb2Vldrs;
826        if (displacement <= 1020) {
827          short_form = true;
828          encoded_disp >>= 2;
829        }
830        break;
831      }
832      if (ARM_LOWREG(r_dest) && (rBase == r15pc) &&
833          (displacement <= 1020) && (displacement >= 0)) {
834        short_form = true;
835        encoded_disp >>= 2;
836        opcode = kThumbLdrPcRel;
837      } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) &&
838          (displacement <= 1020) && (displacement >= 0)) {
839        short_form = true;
840        encoded_disp >>= 2;
841        opcode = kThumbLdrSpRel;
842      } else if (all_low_regs && displacement < 128 && displacement >= 0) {
843        DCHECK_EQ((displacement & 0x3), 0);
844        short_form = true;
845        encoded_disp >>= 2;
846        opcode = kThumbLdrRRI5;
847      } else if (thumb2Form) {
848        short_form = true;
849        opcode = kThumb2LdrRRI12;
850      }
851      break;
852    case kUnsignedHalf:
853      if (all_low_regs && displacement < 64 && displacement >= 0) {
854        DCHECK_EQ((displacement & 0x1), 0);
855        short_form = true;
856        encoded_disp >>= 1;
857        opcode = kThumbLdrhRRI5;
858      } else if (displacement < 4092 && displacement >= 0) {
859        short_form = true;
860        opcode = kThumb2LdrhRRI12;
861      }
862      break;
863    case kSignedHalf:
864      if (thumb2Form) {
865        short_form = true;
866        opcode = kThumb2LdrshRRI12;
867      }
868      break;
869    case kUnsignedByte:
870      if (all_low_regs && displacement < 32 && displacement >= 0) {
871        short_form = true;
872        opcode = kThumbLdrbRRI5;
873      } else if (thumb2Form) {
874        short_form = true;
875        opcode = kThumb2LdrbRRI12;
876      }
877      break;
878    case kSignedByte:
879      if (thumb2Form) {
880        short_form = true;
881        opcode = kThumb2LdrsbRRI12;
882      }
883      break;
884    default:
885      LOG(FATAL) << "Bad size: " << size;
886  }
887
888  if (!already_generated) {
889    if (short_form) {
890      load = NewLIR3(opcode, r_dest, rBase, encoded_disp);
891    } else {
892      int reg_offset = AllocTemp();
893      LoadConstant(reg_offset, encoded_disp);
894      load = LoadBaseIndexed(rBase, reg_offset, r_dest, 0, size);
895      FreeTemp(reg_offset);
896    }
897  }
898
899  // TODO: in future may need to differentiate Dalvik accesses w/ spills
900  if (rBase == rARM_SP) {
901    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
902  }
903  return load;
904}
905
906LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
907                              OpSize size, int s_reg) {
908  return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg);
909}
910
911LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo,
912                                  int r_dest_hi, int s_reg) {
913  return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
914}
915
916
917LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement,
918                                   int r_src, int r_src_hi, OpSize size) {
919  LIR* store = NULL;
920  ArmOpcode opcode = kThumbBkpt;
921  bool short_form = false;
922  bool thumb2Form = (displacement < 4092 && displacement >= 0);
923  bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src));
924  int encoded_disp = displacement;
925  bool is64bit = false;
926  bool already_generated = false;
927  switch (size) {
928    case kLong:
929    case kDouble:
930      is64bit = true;
931      if (!ARM_FPREG(r_src)) {
932        if (displacement <= 1020) {
933          store = NewLIR4(kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2);
934        } else {
935          store = StoreBaseDispBody(rBase, displacement, r_src, -1, kWord);
936          StoreBaseDispBody(rBase, displacement + 4, r_src_hi, -1, kWord);
937        }
938        already_generated = true;
939      } else {
940        if (ARM_SINGLEREG(r_src)) {
941          DCHECK(ARM_FPREG(r_src_hi));
942          r_src = S2d(r_src, r_src_hi);
943        }
944        opcode = kThumb2Vstrd;
945        if (displacement <= 1020) {
946          short_form = true;
947          encoded_disp >>= 2;
948        }
949      }
950      break;
951    case kSingle:
952    case kWord:
953      if (ARM_FPREG(r_src)) {
954        DCHECK(ARM_SINGLEREG(r_src));
955        opcode = kThumb2Vstrs;
956        if (displacement <= 1020) {
957          short_form = true;
958          encoded_disp >>= 2;
959        }
960        break;
961      }
962      if (ARM_LOWREG(r_src) && (rBase == r13sp) &&
963          (displacement <= 1020) && (displacement >= 0)) {
964        short_form = true;
965        encoded_disp >>= 2;
966        opcode = kThumbStrSpRel;
967      } else if (all_low_regs && displacement < 128 && displacement >= 0) {
968        DCHECK_EQ((displacement & 0x3), 0);
969        short_form = true;
970        encoded_disp >>= 2;
971        opcode = kThumbStrRRI5;
972      } else if (thumb2Form) {
973        short_form = true;
974        opcode = kThumb2StrRRI12;
975      }
976      break;
977    case kUnsignedHalf:
978    case kSignedHalf:
979      if (all_low_regs && displacement < 64 && displacement >= 0) {
980        DCHECK_EQ((displacement & 0x1), 0);
981        short_form = true;
982        encoded_disp >>= 1;
983        opcode = kThumbStrhRRI5;
984      } else if (thumb2Form) {
985        short_form = true;
986        opcode = kThumb2StrhRRI12;
987      }
988      break;
989    case kUnsignedByte:
990    case kSignedByte:
991      if (all_low_regs && displacement < 32 && displacement >= 0) {
992        short_form = true;
993        opcode = kThumbStrbRRI5;
994      } else if (thumb2Form) {
995        short_form = true;
996        opcode = kThumb2StrbRRI12;
997      }
998      break;
999    default:
1000      LOG(FATAL) << "Bad size: " << size;
1001  }
1002  if (!already_generated) {
1003    if (short_form) {
1004      store = NewLIR3(opcode, r_src, rBase, encoded_disp);
1005    } else {
1006      int r_scratch = AllocTemp();
1007      LoadConstant(r_scratch, encoded_disp);
1008      store = StoreBaseIndexed(rBase, r_scratch, r_src, 0, size);
1009      FreeTemp(r_scratch);
1010    }
1011  }
1012
1013  // TODO: In future, may need to differentiate Dalvik & spill accesses
1014  if (rBase == rARM_SP) {
1015    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, is64bit);
1016  }
1017  return store;
1018}
1019
1020LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
1021                               OpSize size) {
1022  return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
1023}
1024
1025LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement,
1026                                   int r_src_lo, int r_src_hi) {
1027  return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
1028}
1029
1030LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) {
1031  int opcode;
1032  DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
1033  if (ARM_DOUBLEREG(r_dest)) {
1034    opcode = kThumb2Vmovd;
1035  } else {
1036    if (ARM_SINGLEREG(r_dest)) {
1037      opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr;
1038    } else {
1039      DCHECK(ARM_SINGLEREG(r_src));
1040      opcode = kThumb2Fmrs;
1041    }
1042  }
1043  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
1044  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
1045    res->flags.is_nop = true;
1046  }
1047  return res;
1048}
1049
1050LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
1051  LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
1052  return NULL;
1053}
1054
1055LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp) {
1056  LOG(FATAL) << "Unexpected use of OpMem for Arm";
1057  return NULL;
1058}
1059
1060LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
1061                                      int displacement, int r_src, int r_src_hi, OpSize size,
1062                                      int s_reg) {
1063  LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
1064  return NULL;
1065}
1066
1067LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset) {
1068  LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
1069  return NULL;
1070}
1071
1072LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
1073                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
1074                                     int s_reg) {
1075  LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
1076  return NULL;
1077}
1078
1079}  // namespace art
1080