int_arm.cc revision 2dd0e2cea360bc9206eb88ecc40d259e796c239d
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "oat_compilation_unit.h"
20#include "oat/runtime/oat_support_entrypoints.h"
21#include "arm_lir.h"
22#include "codegen_arm.h"
23#include "../codegen_util.h"
24#include "../ralloc_util.h"
25
26namespace art {
27
28LIR* ArmCodegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
29         int src2, LIR* target)
30{
31  OpRegReg(cu, kOpCmp, src1, src2);
32  return OpCondBranch(cu, cond, target);
33}
34
35/*
36 * Generate a Thumb2 IT instruction, which can nullify up to
37 * four subsequent instructions based on a condition and its
38 * inverse.  The condition applies to the first instruction, which
39 * is executed if the condition is met.  The string "guide" consists
40 * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
41 * A "T" means the instruction is executed if the condition is
42 * met, and an "E" means the instruction is executed if the condition
43 * is not met.
44 */
45LIR* ArmCodegen::OpIT(CompilationUnit* cu, ConditionCode ccode, const char* guide)
46{
47  int mask;
48  int mask3 = 0;
49  int mask2 = 0;
50  int mask1 = 0;
51  ArmConditionCode code = ArmConditionEncoding(ccode);
52  int cond_bit = code & 1;
53  int alt_bit = cond_bit ^ 1;
54
55  //Note: case fallthroughs intentional
56  switch (strlen(guide)) {
57    case 3:
58      mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
59    case 2:
60      mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
61    case 1:
62      mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
63      break;
64    case 0:
65      break;
66    default:
67      LOG(FATAL) << "OAT: bad case in OpIT";
68  }
69  mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
70       (1 << (3 - strlen(guide)));
71  return NewLIR2(cu, kThumb2It, code, mask);
72}
73
74/*
75 * 64-bit 3way compare function.
76 *     mov   rX, #-1
77 *     cmp   op1hi, op2hi
78 *     blt   done
79 *     bgt   flip
80 *     sub   rX, op1lo, op2lo (treat as unsigned)
81 *     beq   done
82 *     ite   hi
83 *     mov(hi)   rX, #-1
84 *     mov(!hi)  rX, #1
85 * flip:
86 *     neg   rX
87 * done:
88 */
89void ArmCodegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
90                            RegLocation rl_src2)
91{
92  LIR* target1;
93  LIR* target2;
94  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
95  rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
96  int t_reg = AllocTemp(cu);
97  LoadConstant(cu, t_reg, -1);
98  OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
99  LIR* branch1 = OpCondBranch(cu, kCondLt, NULL);
100  LIR* branch2 = OpCondBranch(cu, kCondGt, NULL);
101  OpRegRegReg(cu, kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
102  LIR* branch3 = OpCondBranch(cu, kCondEq, NULL);
103
104  OpIT(cu, kCondHi, "E");
105  NewLIR2(cu, kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
106  LoadConstant(cu, t_reg, 1);
107  GenBarrier(cu);
108
109  target2 = NewLIR0(cu, kPseudoTargetLabel);
110  OpRegReg(cu, kOpNeg, t_reg, t_reg);
111
112  target1 = NewLIR0(cu, kPseudoTargetLabel);
113
114  RegLocation rl_temp = LocCReturn(); // Just using as template, will change
115  rl_temp.low_reg = t_reg;
116  StoreValue(cu, rl_dest, rl_temp);
117  FreeTemp(cu, t_reg);
118
119  branch1->target = target1;
120  branch2->target = target2;
121  branch3->target = branch1->target;
122}
123
124void ArmCodegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
125{
126  LIR* label_list = cu->block_label_list;
127  LIR* taken = &label_list[bb->taken->id];
128  LIR* not_taken = &label_list[bb->fall_through->id];
129  RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
130  RegLocation rl_src2 = GetSrcWide(cu, mir, 2);
131  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
132  rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
133  ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
134  OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
135  switch(ccode) {
136    case kCondEq:
137      OpCondBranch(cu, kCondNe, not_taken);
138      break;
139    case kCondNe:
140      OpCondBranch(cu, kCondNe, taken);
141      break;
142    case kCondLt:
143      OpCondBranch(cu, kCondLt, taken);
144      OpCondBranch(cu, kCondGt, not_taken);
145      ccode = kCondCc;
146      break;
147    case kCondLe:
148      OpCondBranch(cu, kCondLt, taken);
149      OpCondBranch(cu, kCondGt, not_taken);
150      ccode = kCondLs;
151      break;
152    case kCondGt:
153      OpCondBranch(cu, kCondGt, taken);
154      OpCondBranch(cu, kCondLt, not_taken);
155      ccode = kCondHi;
156      break;
157    case kCondGe:
158      OpCondBranch(cu, kCondGt, taken);
159      OpCondBranch(cu, kCondLt, not_taken);
160      ccode = kCondCs;
161      break;
162    default:
163      LOG(FATAL) << "Unexpected ccode: " << ccode;
164  }
165  OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
166  OpCondBranch(cu, ccode, taken);
167}
168
169/*
170 * Generate a register comparison to an immediate and branch.  Caller
171 * is responsible for setting branch target field.
172 */
173LIR* ArmCodegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
174                                LIR* target)
175{
176  LIR* branch;
177  int mod_imm;
178  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
179  if ((ARM_LOWREG(reg)) && (check_value == 0) &&
180     ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
181    branch = NewLIR2(cu, (arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
182                     reg, 0);
183  } else {
184    mod_imm = ModifiedImmediate(check_value);
185    if (ARM_LOWREG(reg) && ((check_value & 0xff) == check_value)) {
186      NewLIR2(cu, kThumbCmpRI8, reg, check_value);
187    } else if (mod_imm >= 0) {
188      NewLIR2(cu, kThumb2CmpRI8, reg, mod_imm);
189    } else {
190      int t_reg = AllocTemp(cu);
191      LoadConstant(cu, t_reg, check_value);
192      OpRegReg(cu, kOpCmp, reg, t_reg);
193    }
194    branch = NewLIR2(cu, kThumbBCond, 0, arm_cond);
195  }
196  branch->target = target;
197  return branch;
198}
199
200LIR* ArmCodegen::OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src)
201{
202  LIR* res;
203  int opcode;
204  if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
205    return OpFpRegCopy(cu, r_dest, r_src);
206  if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
207    opcode = kThumbMovRR;
208  else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
209     opcode = kThumbMovRR_H2H;
210  else if (ARM_LOWREG(r_dest))
211     opcode = kThumbMovRR_H2L;
212  else
213     opcode = kThumbMovRR_L2H;
214  res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
215  if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
216    res->flags.is_nop = true;
217  }
218  return res;
219}
220
221LIR* ArmCodegen::OpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
222{
223  LIR* res = OpRegCopyNoInsert(cu, r_dest, r_src);
224  AppendLIR(cu, res);
225  return res;
226}
227
228void ArmCodegen::OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
229                               int src_hi)
230{
231  bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
232  bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
233  DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
234  DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
235  if (dest_fp) {
236    if (src_fp) {
237      OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
238    } else {
239      NewLIR3(cu, kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
240    }
241  } else {
242    if (src_fp) {
243      NewLIR3(cu, kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
244    } else {
245      // Handle overlap
246      if (src_hi == dest_lo) {
247        OpRegCopy(cu, dest_hi, src_hi);
248        OpRegCopy(cu, dest_lo, src_lo);
249      } else {
250        OpRegCopy(cu, dest_lo, src_lo);
251        OpRegCopy(cu, dest_hi, src_hi);
252      }
253    }
254  }
255}
256
257// Table of magic divisors
258struct MagicTable {
259  uint32_t magic;
260  uint32_t shift;
261  DividePattern pattern;
262};
263
264static const MagicTable magic_table[] = {
265  {0, 0, DivideNone},        // 0
266  {0, 0, DivideNone},        // 1
267  {0, 0, DivideNone},        // 2
268  {0x55555556, 0, Divide3},  // 3
269  {0, 0, DivideNone},        // 4
270  {0x66666667, 1, Divide5},  // 5
271  {0x2AAAAAAB, 0, Divide3},  // 6
272  {0x92492493, 2, Divide7},  // 7
273  {0, 0, DivideNone},        // 8
274  {0x38E38E39, 1, Divide5},  // 9
275  {0x66666667, 2, Divide5},  // 10
276  {0x2E8BA2E9, 1, Divide5},  // 11
277  {0x2AAAAAAB, 1, Divide5},  // 12
278  {0x4EC4EC4F, 2, Divide5},  // 13
279  {0x92492493, 3, Divide7},  // 14
280  {0x88888889, 3, Divide7},  // 15
281};
282
283// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
284bool ArmCodegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
285                                    RegLocation rl_src, RegLocation rl_dest, int lit)
286{
287  if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
288    return false;
289  }
290  DividePattern pattern = magic_table[lit].pattern;
291  if (pattern == DivideNone) {
292    return false;
293  }
294  // Tuning: add rem patterns
295  if (dalvik_opcode != Instruction::DIV_INT_LIT8) {
296    return false;
297  }
298
299  int r_magic = AllocTemp(cu);
300  LoadConstant(cu, r_magic, magic_table[lit].magic);
301  rl_src = LoadValue(cu, rl_src, kCoreReg);
302  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
303  int r_hi = AllocTemp(cu);
304  int r_lo = AllocTemp(cu);
305  NewLIR4(cu, kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
306  switch(pattern) {
307    case Divide3:
308      OpRegRegRegShift(cu, kOpSub, rl_result.low_reg, r_hi,
309               rl_src.low_reg, EncodeShift(kArmAsr, 31));
310      break;
311    case Divide5:
312      OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
313      OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
314               EncodeShift(kArmAsr, magic_table[lit].shift));
315      break;
316    case Divide7:
317      OpRegReg(cu, kOpAdd, r_hi, rl_src.low_reg);
318      OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
319      OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
320               EncodeShift(kArmAsr, magic_table[lit].shift));
321      break;
322    default:
323      LOG(FATAL) << "Unexpected pattern: " << pattern;
324  }
325  StoreValue(cu, rl_dest, rl_result);
326  return true;
327}
328
329LIR* ArmCodegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
330                    int reg1, int base, int offset, ThrowKind kind)
331{
332  LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
333  return NULL;
334}
335
336RegLocation ArmCodegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit,
337                                     bool is_div)
338{
339  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
340  return rl_dest;
341}
342
343RegLocation ArmCodegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2,
344                                  bool is_div)
345{
346  LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
347  return rl_dest;
348}
349
350bool ArmCodegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
351{
352  DCHECK_EQ(cu->instruction_set, kThumb2);
353  RegLocation rl_src1 = info->args[0];
354  RegLocation rl_src2 = info->args[1];
355  rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
356  rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
357  RegLocation rl_dest = InlineTarget(cu, info);
358  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
359  OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
360  OpIT(cu, (is_min) ? kCondGt : kCondLt, "E");
361  OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src2.low_reg);
362  OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src1.low_reg);
363  GenBarrier(cu);
364  StoreValue(cu, rl_dest, rl_result);
365  return true;
366}
367
368void ArmCodegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
369{
370  LOG(FATAL) << "Unexpected use of OpLea for Arm";
371}
372
373void ArmCodegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
374{
375  LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
376}
377
378bool ArmCodegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
379  DCHECK_EQ(cu->instruction_set, kThumb2);
380  // Unused - RegLocation rl_src_unsafe = info->args[0];
381  RegLocation rl_src_obj= info->args[1];  // Object - known non-null
382  RegLocation rl_src_offset= info->args[2];  // long low
383  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
384  RegLocation rl_src_expected= info->args[4];  // int or Object
385  RegLocation rl_src_new_value= info->args[5];  // int or Object
386  RegLocation rl_dest = InlineTarget(cu, info);  // boolean place for result
387
388
389  // Release store semantics, get the barrier out of the way.  TODO: revisit
390  GenMemBarrier(cu, kStoreLoad);
391
392  RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
393  RegLocation rl_new_value = LoadValue(cu, rl_src_new_value, kCoreReg);
394
395  if (need_write_barrier) {
396    // Mark card for object assuming new value is stored.
397    MarkGCCard(cu, rl_new_value.low_reg, rl_object.low_reg);
398  }
399
400  RegLocation rl_offset = LoadValue(cu, rl_src_offset, kCoreReg);
401
402  int r_ptr = AllocTemp(cu);
403  OpRegRegReg(cu, kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
404
405  // Free now unneeded rl_object and rl_offset to give more temps.
406  ClobberSReg(cu, rl_object.s_reg_low);
407  FreeTemp(cu, rl_object.low_reg);
408  ClobberSReg(cu, rl_offset.s_reg_low);
409  FreeTemp(cu, rl_offset.low_reg);
410
411  int r_old_value = AllocTemp(cu);
412  NewLIR3(cu, kThumb2Ldrex, r_old_value, r_ptr, 0);  // r_old_value := [r_ptr]
413
414  RegLocation rl_expected = LoadValue(cu, rl_src_expected, kCoreReg);
415
416  // if (r_old_value == rExpected) {
417  //   [r_ptr] <- r_new_value && r_result := success ? 0 : 1
418  //   r_result ^= 1
419  // } else {
420  //   r_result := 0
421  // }
422  OpRegReg(cu, kOpCmp, r_old_value, rl_expected.low_reg);
423  FreeTemp(cu, r_old_value);  // Now unneeded.
424  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
425  OpIT(cu, kCondEq, "TE");
426  NewLIR4(cu, kThumb2Strex, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
427  FreeTemp(cu, r_ptr);  // Now unneeded.
428  OpRegImm(cu, kOpXor, rl_result.low_reg, 1);
429  OpRegReg(cu, kOpXor, rl_result.low_reg, rl_result.low_reg);
430
431  StoreValue(cu, rl_dest, rl_result);
432
433  return true;
434}
435
436LIR* ArmCodegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target)
437{
438  return RawLIR(cu, cu->current_dalvik_offset, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
439}
440
441LIR* ArmCodegen::OpVldm(CompilationUnit* cu, int rBase, int count)
442{
443  return NewLIR3(cu, kThumb2Vldms, rBase, fr0, count);
444}
445
446LIR* ArmCodegen::OpVstm(CompilationUnit* cu, int rBase, int count)
447{
448  return NewLIR3(cu, kThumb2Vstms, rBase, fr0, count);
449}
450
451void ArmCodegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
452                                               RegLocation rl_result, int lit,
453                                               int first_bit, int second_bit)
454{
455  OpRegRegRegShift(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
456                   EncodeShift(kArmLsl, second_bit - first_bit));
457  if (first_bit != 0) {
458    OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
459  }
460}
461
462void ArmCodegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
463{
464  int t_reg = AllocTemp(cu);
465  NewLIR4(cu, kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
466  FreeTemp(cu, t_reg);
467  GenCheck(cu, kCondEq, kThrowDivZero);
468}
469
470// Test suspend flag, return target of taken suspend branch
471LIR* ArmCodegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
472{
473  NewLIR2(cu, kThumbSubRI8, rARM_SUSPEND, 1);
474  return OpCondBranch(cu, (target == NULL) ? kCondEq : kCondNe, target);
475}
476
477// Decrement register and branch on condition
478LIR* ArmCodegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
479{
480  // Combine sub & test using sub setflags encoding here
481  NewLIR3(cu, kThumb2SubsRRI12, reg, reg, 1);
482  return OpCondBranch(cu, c_code, target);
483}
484
485void ArmCodegen::GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind)
486{
487#if ANDROID_SMP != 0
488  int dmb_flavor;
489  // TODO: revisit Arm barrier kinds
490  switch (barrier_kind) {
491    case kLoadStore: dmb_flavor = kSY; break;
492    case kLoadLoad: dmb_flavor = kSY; break;
493    case kStoreStore: dmb_flavor = kST; break;
494    case kStoreLoad: dmb_flavor = kSY; break;
495    default:
496      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
497      dmb_flavor = kSY;  // quiet gcc.
498      break;
499  }
500  LIR* dmb = NewLIR1(cu, kThumb2Dmb, dmb_flavor);
501  dmb->def_mask = ENCODE_ALL;
502#endif
503}
504
505bool ArmCodegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
506{
507  rl_src = LoadValueWide(cu, rl_src, kCoreReg);
508  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
509  int z_reg = AllocTemp(cu);
510  LoadConstantNoClobber(cu, z_reg, 0);
511  // Check for destructive overlap
512  if (rl_result.low_reg == rl_src.high_reg) {
513    int t_reg = AllocTemp(cu);
514    OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
515    OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, t_reg);
516    FreeTemp(cu, t_reg);
517  } else {
518    OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
519    OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
520  }
521  FreeTemp(cu, z_reg);
522  StoreValueWide(cu, rl_dest, rl_result);
523  return false;
524}
525
526bool ArmCodegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
527                            RegLocation rl_src2)
528{
529  LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
530  return false;
531}
532
533bool ArmCodegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
534                            RegLocation rl_src2)
535{
536  LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
537  return false;
538}
539
540bool ArmCodegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
541                            RegLocation rl_src2)
542{
543  LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
544  return false;
545}
546
547bool ArmCodegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
548                           RegLocation rl_src2)
549{
550  LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
551  return false;
552}
553
554bool ArmCodegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
555                            RegLocation rl_src2)
556{
557  LOG(FATAL) << "Unexpected use of genXoLong for Arm";
558  return false;
559}
560
561/*
562 * Generate array load
563 */
564void ArmCodegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
565                          RegLocation rl_index, RegLocation rl_dest, int scale)
566{
567  RegisterClass reg_class = oat_reg_class_by_size(size);
568  int len_offset = mirror::Array::LengthOffset().Int32Value();
569  int data_offset;
570  RegLocation rl_result;
571  rl_array = LoadValue(cu, rl_array, kCoreReg);
572  rl_index = LoadValue(cu, rl_index, kCoreReg);
573
574  if (rl_dest.wide) {
575    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
576  } else {
577    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
578  }
579
580  /* null object? */
581  GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
582
583  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
584  int reg_len = INVALID_REG;
585  if (needs_range_check) {
586    reg_len = AllocTemp(cu);
587    /* Get len */
588    LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
589  }
590  if (rl_dest.wide || rl_dest.fp) {
591    // No special indexed operation, lea + load w/ displacement
592    int reg_ptr = AllocTemp(cu);
593    OpRegRegRegShift(cu, kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
594                     EncodeShift(kArmLsl, scale));
595    FreeTemp(cu, rl_index.low_reg);
596    rl_result = EvalLoc(cu, rl_dest, reg_class, true);
597
598    if (needs_range_check) {
599      // TODO: change kCondCS to a more meaningful name, is the sense of
600      // carry-set/clear flipped?
601      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
602      FreeTemp(cu, reg_len);
603    }
604    if (rl_dest.wide) {
605      LoadBaseDispWide(cu, reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
606      FreeTemp(cu, reg_ptr);
607      StoreValueWide(cu, rl_dest, rl_result);
608    } else {
609      LoadBaseDisp(cu, reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
610      FreeTemp(cu, reg_ptr);
611      StoreValue(cu, rl_dest, rl_result);
612    }
613  } else {
614    // Offset base, then use indexed load
615    int reg_ptr = AllocTemp(cu);
616    OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
617    FreeTemp(cu, rl_array.low_reg);
618    rl_result = EvalLoc(cu, rl_dest, reg_class, true);
619
620    if (needs_range_check) {
621      // TODO: change kCondCS to a more meaningful name, is the sense of
622      // carry-set/clear flipped?
623      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
624      FreeTemp(cu, reg_len);
625    }
626    LoadBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
627    FreeTemp(cu, reg_ptr);
628    StoreValue(cu, rl_dest, rl_result);
629  }
630}
631
632/*
633 * Generate array store
634 *
635 */
636void ArmCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
637                          RegLocation rl_index, RegLocation rl_src, int scale)
638{
639  RegisterClass reg_class = oat_reg_class_by_size(size);
640  int len_offset = mirror::Array::LengthOffset().Int32Value();
641  int data_offset;
642
643  if (size == kLong || size == kDouble) {
644    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
645  } else {
646    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
647  }
648
649  rl_array = LoadValue(cu, rl_array, kCoreReg);
650  rl_index = LoadValue(cu, rl_index, kCoreReg);
651  int reg_ptr = INVALID_REG;
652  if (IsTemp(cu, rl_array.low_reg)) {
653    Clobber(cu, rl_array.low_reg);
654    reg_ptr = rl_array.low_reg;
655  } else {
656    reg_ptr = AllocTemp(cu);
657  }
658
659  /* null object? */
660  GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
661
662  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
663  int reg_len = INVALID_REG;
664  if (needs_range_check) {
665    reg_len = AllocTemp(cu);
666    //NOTE: max live temps(4) here.
667    /* Get len */
668    LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
669  }
670  /* at this point, reg_ptr points to array, 2 live temps */
671  if (rl_src.wide || rl_src.fp) {
672    if (rl_src.wide) {
673      rl_src = LoadValueWide(cu, rl_src, reg_class);
674    } else {
675      rl_src = LoadValue(cu, rl_src, reg_class);
676    }
677    OpRegRegRegShift(cu, kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
678                     EncodeShift(kArmLsl, scale));
679    if (needs_range_check) {
680      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
681      FreeTemp(cu, reg_len);
682    }
683    if (rl_src.wide) {
684      StoreBaseDispWide(cu, reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
685    } else {
686      StoreBaseDisp(cu, reg_ptr, data_offset, rl_src.low_reg, size);
687    }
688  } else {
689    /* reg_ptr -> array data */
690    OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
691    rl_src = LoadValue(cu, rl_src, reg_class);
692    if (needs_range_check) {
693      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
694      FreeTemp(cu, reg_len);
695    }
696    StoreBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_src.low_reg,
697                     scale, size);
698  }
699  FreeTemp(cu, reg_ptr);
700}
701
702/*
703 * Generate array store
704 *
705 */
706void ArmCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
707                             RegLocation rl_index, RegLocation rl_src, int scale)
708{
709  int len_offset = mirror::Array::LengthOffset().Int32Value();
710  int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
711
712  FlushAllRegs(cu);  // Use explicit registers
713  LockCallTemps(cu);
714
715  int r_value = TargetReg(kArg0);  // Register holding value
716  int r_array_class = TargetReg(kArg1);  // Register holding array's Class
717  int r_array = TargetReg(kArg2);  // Register holding array
718  int r_index = TargetReg(kArg3);  // Register holding index into array
719
720  LoadValueDirectFixed(cu, rl_array, r_array);  // Grab array
721  LoadValueDirectFixed(cu, rl_src, r_value);  // Grab value
722  LoadValueDirectFixed(cu, rl_index, r_index);  // Grab index
723
724  GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags);  // NPE?
725
726  // Store of null?
727  LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
728
729  // Get the array's class.
730  LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
731  CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
732                          r_array_class, true);
733  // Redo LoadValues in case they didn't survive the call.
734  LoadValueDirectFixed(cu, rl_array, r_array);  // Reload array
735  LoadValueDirectFixed(cu, rl_index, r_index);  // Reload index
736  LoadValueDirectFixed(cu, rl_src, r_value);  // Reload value
737  r_array_class = INVALID_REG;
738
739  // Branch here if value to be stored == null
740  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
741  null_value_check->target = target;
742
743  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
744  int reg_len = INVALID_REG;
745  if (needs_range_check) {
746    reg_len = TargetReg(kArg1);
747    LoadWordDisp(cu, r_array, len_offset, reg_len);  // Get len
748  }
749  /* r_ptr -> array data */
750  int r_ptr = AllocTemp(cu);
751  OpRegRegImm(cu, kOpAdd, r_ptr, r_array, data_offset);
752  if (needs_range_check) {
753    GenRegRegCheck(cu, kCondCs, r_index, reg_len, kThrowArrayBounds);
754  }
755  StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
756  FreeTemp(cu, r_ptr);
757  FreeTemp(cu, r_index);
758  MarkGCCard(cu, r_value, r_array);
759}
760
761}  // namespace art
762