1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the X86 ISA */
18
19#include "codegen_x86.h"
20#include "dex/quick/mir_to_lir-inl.h"
21#include "mirror/array.h"
22#include "x86_lir.h"
23
24namespace art {
25
26/*
27 * Perform register memory operation.
28 */
29LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code,
30                                int reg1, int base, int offset, ThrowKind kind) {
31  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind,
32                    current_dalvik_offset_, reg1, base, offset);
33  OpRegMem(kOpCmp, reg1, base, offset);
34  LIR* branch = OpCondBranch(c_code, tgt);
35  // Remember branch target - will process later
36  throw_launchpads_.Insert(tgt);
37  return branch;
38}
39
40/*
41 * Compare two 64-bit values
42 *    x = y     return  0
43 *    x < y     return -1
44 *    x > y     return  1
45 */
46void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
47                            RegLocation rl_src2) {
48  FlushAllRegs();
49  LockCallTemps();  // Prepare for explicit register usage
50  LoadValueDirectWideFixed(rl_src1, r0, r1);
51  LoadValueDirectWideFixed(rl_src2, r2, r3);
52  // Compute (r1:r0) = (r1:r0) - (r3:r2)
53  OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
54  OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
55  NewLIR2(kX86Set8R, r2, kX86CondL);  // r2 = (r1:r0) < (r3:r2) ? 1 : 0
56  NewLIR2(kX86Movzx8RR, r2, r2);
57  OpReg(kOpNeg, r2);         // r2 = -r2
58  OpRegReg(kOpOr, r0, r1);   // r0 = high | low - sets ZF
59  NewLIR2(kX86Set8R, r0, kX86CondNz);  // r0 = (r1:r0) != (r3:r2) ? 1 : 0
60  NewLIR2(kX86Movzx8RR, r0, r0);
61  OpRegReg(kOpOr, r0, r2);   // r0 = r0 | r2
62  RegLocation rl_result = LocCReturn();
63  StoreValue(rl_dest, rl_result);
64}
65
66X86ConditionCode X86ConditionEncoding(ConditionCode cond) {
67  switch (cond) {
68    case kCondEq: return kX86CondEq;
69    case kCondNe: return kX86CondNe;
70    case kCondCs: return kX86CondC;
71    case kCondCc: return kX86CondNc;
72    case kCondMi: return kX86CondS;
73    case kCondPl: return kX86CondNs;
74    case kCondVs: return kX86CondO;
75    case kCondVc: return kX86CondNo;
76    case kCondHi: return kX86CondA;
77    case kCondLs: return kX86CondBe;
78    case kCondGe: return kX86CondGe;
79    case kCondLt: return kX86CondL;
80    case kCondGt: return kX86CondG;
81    case kCondLe: return kX86CondLe;
82    case kCondAl:
83    case kCondNv: LOG(FATAL) << "Should not reach here";
84  }
85  return kX86CondO;
86}
87
88LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2,
89                             LIR* target) {
90  NewLIR2(kX86Cmp32RR, src1, src2);
91  X86ConditionCode cc = X86ConditionEncoding(cond);
92  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
93                        cc);
94  branch->target = target;
95  return branch;
96}
97
98LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg,
99                                int check_value, LIR* target) {
100  if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
101    // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
102    NewLIR2(kX86Test32RR, reg, reg);
103  } else {
104    NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, check_value);
105  }
106  X86ConditionCode cc = X86ConditionEncoding(cond);
107  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
108  branch->target = target;
109  return branch;
110}
111
112LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) {
113  if (X86_FPREG(r_dest) || X86_FPREG(r_src))
114    return OpFpRegCopy(r_dest, r_src);
115  LIR* res = RawLIR(current_dalvik_offset_, kX86Mov32RR,
116                    r_dest, r_src);
117  if (r_dest == r_src) {
118    res->flags.is_nop = true;
119  }
120  return res;
121}
122
123LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src) {
124  LIR *res = OpRegCopyNoInsert(r_dest, r_src);
125  AppendLIR(res);
126  return res;
127}
128
129void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi,
130                               int src_lo, int src_hi) {
131  bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
132  bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi);
133  assert(X86_FPREG(src_lo) == X86_FPREG(src_hi));
134  assert(X86_FPREG(dest_lo) == X86_FPREG(dest_hi));
135  if (dest_fp) {
136    if (src_fp) {
137      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
138    } else {
139      // TODO: Prevent this from happening in the code. The result is often
140      // unused or could have been loaded more easily from memory.
141      NewLIR2(kX86MovdxrRR, dest_lo, src_lo);
142      NewLIR2(kX86MovdxrRR, dest_hi, src_hi);
143      NewLIR2(kX86PsllqRI, dest_hi, 32);
144      NewLIR2(kX86OrpsRR, dest_lo, dest_hi);
145    }
146  } else {
147    if (src_fp) {
148      NewLIR2(kX86MovdrxRR, dest_lo, src_lo);
149      NewLIR2(kX86PsrlqRI, src_lo, 32);
150      NewLIR2(kX86MovdrxRR, dest_hi, src_lo);
151    } else {
152      // Handle overlap
153      if (src_hi == dest_lo) {
154        OpRegCopy(dest_hi, src_hi);
155        OpRegCopy(dest_lo, src_lo);
156      } else {
157        OpRegCopy(dest_lo, src_lo);
158        OpRegCopy(dest_hi, src_hi);
159      }
160    }
161  }
162}
163
164void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
165  UNIMPLEMENTED(FATAL) << "Need codegen for GenSelect";
166}
167
168void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
169  LIR* taken = &block_label_list_[bb->taken->id];
170  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
171  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
172  FlushAllRegs();
173  LockCallTemps();  // Prepare for explicit register usage
174  LoadValueDirectWideFixed(rl_src1, r0, r1);
175  LoadValueDirectWideFixed(rl_src2, r2, r3);
176  ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
177  // Swap operands and condition code to prevent use of zero flag.
178  if (ccode == kCondLe || ccode == kCondGt) {
179    // Compute (r3:r2) = (r3:r2) - (r1:r0)
180    OpRegReg(kOpSub, r2, r0);  // r2 = r2 - r0
181    OpRegReg(kOpSbc, r3, r1);  // r3 = r3 - r1 - CF
182  } else {
183    // Compute (r1:r0) = (r1:r0) - (r3:r2)
184    OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
185    OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
186  }
187  switch (ccode) {
188    case kCondEq:
189    case kCondNe:
190      OpRegReg(kOpOr, r0, r1);  // r0 = r0 | r1
191      break;
192    case kCondLe:
193      ccode = kCondGe;
194      break;
195    case kCondGt:
196      ccode = kCondLt;
197      break;
198    case kCondLt:
199    case kCondGe:
200      break;
201    default:
202      LOG(FATAL) << "Unexpected ccode: " << ccode;
203  }
204  OpCondBranch(ccode, taken);
205}
206
207RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, int reg_lo,
208                                     int lit, bool is_div) {
209  LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
210  return rl_dest;
211}
212
213RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, int reg_lo,
214                                  int reg_hi, bool is_div) {
215  LOG(FATAL) << "Unexpected use of GenDivRem for x86";
216  return rl_dest;
217}
218
219bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
220  DCHECK_EQ(cu_->instruction_set, kX86);
221  RegLocation rl_src1 = info->args[0];
222  RegLocation rl_src2 = info->args[1];
223  rl_src1 = LoadValue(rl_src1, kCoreReg);
224  rl_src2 = LoadValue(rl_src2, kCoreReg);
225  RegLocation rl_dest = InlineTarget(info);
226  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
227  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
228  DCHECK_EQ(cu_->instruction_set, kX86);
229  LIR* branch = NewLIR2(kX86Jcc8, 0, is_min ? kX86CondG : kX86CondL);
230  OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
231  LIR* branch2 = NewLIR1(kX86Jmp8, 0);
232  branch->target = NewLIR0(kPseudoTargetLabel);
233  OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
234  branch2->target = NewLIR0(kPseudoTargetLabel);
235  StoreValue(rl_dest, rl_result);
236  return true;
237}
238
239void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) {
240  NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset);
241}
242
243void X86Mir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
244  NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
245}
246
247bool X86Mir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
248  DCHECK_NE(cu_->instruction_set, kThumb2);
249  return false;
250}
251
252LIR* X86Mir2Lir::OpPcRelLoad(int reg, LIR* target) {
253  LOG(FATAL) << "Unexpected use of OpPcRelLoad for x86";
254  return NULL;
255}
256
257LIR* X86Mir2Lir::OpVldm(int rBase, int count) {
258  LOG(FATAL) << "Unexpected use of OpVldm for x86";
259  return NULL;
260}
261
262LIR* X86Mir2Lir::OpVstm(int rBase, int count) {
263  LOG(FATAL) << "Unexpected use of OpVstm for x86";
264  return NULL;
265}
266
267void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
268                                               RegLocation rl_result, int lit,
269                                               int first_bit, int second_bit) {
270  int t_reg = AllocTemp();
271  OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
272  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
273  FreeTemp(t_reg);
274  if (first_bit != 0) {
275    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
276  }
277}
278
279void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) {
280  int t_reg = AllocTemp();
281  OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi);
282  GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero);
283  FreeTemp(t_reg);
284}
285
286// Test suspend flag, return target of taken suspend branch
287LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
288  OpTlsCmp(Thread::ThreadFlagsOffset(), 0);
289  return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
290}
291
292// Decrement register and branch on condition
293LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
294  OpRegImm(kOpSub, reg, 1);
295  return OpCmpImmBranch(c_code, reg, 0, target);
296}
297
298bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
299                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
300  LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
301  return false;
302}
303
304LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
305  LOG(FATAL) << "Unexpected use of OpIT in x86";
306  return NULL;
307}
308
309void X86Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
310                            RegLocation rl_src2) {
311  LOG(FATAL) << "Unexpected use of GenX86Long for x86";
312}
313void X86Mir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
314                         RegLocation rl_src2) {
315  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
316  // enough.
317  FlushAllRegs();
318  LockCallTemps();  // Prepare for explicit register usage
319  LoadValueDirectWideFixed(rl_src1, r0, r1);
320  LoadValueDirectWideFixed(rl_src2, r2, r3);
321  // Compute (r1:r0) = (r1:r0) + (r2:r3)
322  OpRegReg(kOpAdd, r0, r2);  // r0 = r0 + r2
323  OpRegReg(kOpAdc, r1, r3);  // r1 = r1 + r3 + CF
324  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
325                          INVALID_SREG, INVALID_SREG};
326  StoreValueWide(rl_dest, rl_result);
327}
328
329void X86Mir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
330                            RegLocation rl_src2) {
331  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
332  // enough.
333  FlushAllRegs();
334  LockCallTemps();  // Prepare for explicit register usage
335  LoadValueDirectWideFixed(rl_src1, r0, r1);
336  LoadValueDirectWideFixed(rl_src2, r2, r3);
337  // Compute (r1:r0) = (r1:r0) + (r2:r3)
338  OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
339  OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
340  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
341                          INVALID_SREG, INVALID_SREG};
342  StoreValueWide(rl_dest, rl_result);
343}
344
345void X86Mir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
346                            RegLocation rl_src2) {
347  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
348  // enough.
349  FlushAllRegs();
350  LockCallTemps();  // Prepare for explicit register usage
351  LoadValueDirectWideFixed(rl_src1, r0, r1);
352  LoadValueDirectWideFixed(rl_src2, r2, r3);
353  // Compute (r1:r0) = (r1:r0) & (r2:r3)
354  OpRegReg(kOpAnd, r0, r2);  // r0 = r0 & r2
355  OpRegReg(kOpAnd, r1, r3);  // r1 = r1 & r3
356  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
357                          INVALID_SREG, INVALID_SREG};
358  StoreValueWide(rl_dest, rl_result);
359}
360
361void X86Mir2Lir::GenOrLong(RegLocation rl_dest,
362                           RegLocation rl_src1, RegLocation rl_src2) {
363  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
364  // enough.
365  FlushAllRegs();
366  LockCallTemps();  // Prepare for explicit register usage
367  LoadValueDirectWideFixed(rl_src1, r0, r1);
368  LoadValueDirectWideFixed(rl_src2, r2, r3);
369  // Compute (r1:r0) = (r1:r0) | (r2:r3)
370  OpRegReg(kOpOr, r0, r2);  // r0 = r0 | r2
371  OpRegReg(kOpOr, r1, r3);  // r1 = r1 | r3
372  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
373                          INVALID_SREG, INVALID_SREG};
374  StoreValueWide(rl_dest, rl_result);
375}
376
377void X86Mir2Lir::GenXorLong(RegLocation rl_dest,
378                            RegLocation rl_src1, RegLocation rl_src2) {
379  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
380  // enough.
381  FlushAllRegs();
382  LockCallTemps();  // Prepare for explicit register usage
383  LoadValueDirectWideFixed(rl_src1, r0, r1);
384  LoadValueDirectWideFixed(rl_src2, r2, r3);
385  // Compute (r1:r0) = (r1:r0) ^ (r2:r3)
386  OpRegReg(kOpXor, r0, r2);  // r0 = r0 ^ r2
387  OpRegReg(kOpXor, r1, r3);  // r1 = r1 ^ r3
388  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
389                          INVALID_SREG, INVALID_SREG};
390  StoreValueWide(rl_dest, rl_result);
391}
392
393void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
394  FlushAllRegs();
395  LockCallTemps();  // Prepare for explicit register usage
396  LoadValueDirectWideFixed(rl_src, r0, r1);
397  // Compute (r1:r0) = -(r1:r0)
398  OpRegReg(kOpNeg, r0, r0);  // r0 = -r0
399  OpRegImm(kOpAdc, r1, 0);   // r1 = r1 + CF
400  OpRegReg(kOpNeg, r1, r1);  // r1 = -r1
401  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
402                          INVALID_SREG, INVALID_SREG};
403  StoreValueWide(rl_dest, rl_result);
404}
405
406void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset) {
407  X86OpCode opcode = kX86Bkpt;
408  switch (op) {
409  case kOpCmp: opcode = kX86Cmp32RT;  break;
410  case kOpMov: opcode = kX86Mov32RT;  break;
411  default:
412    LOG(FATAL) << "Bad opcode: " << op;
413    break;
414  }
415  NewLIR2(opcode, r_dest, thread_offset.Int32Value());
416}
417
418/*
419 * Generate array load
420 */
421void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
422                          RegLocation rl_index, RegLocation rl_dest, int scale) {
423  RegisterClass reg_class = oat_reg_class_by_size(size);
424  int len_offset = mirror::Array::LengthOffset().Int32Value();
425  int data_offset;
426  RegLocation rl_result;
427  rl_array = LoadValue(rl_array, kCoreReg);
428  rl_index = LoadValue(rl_index, kCoreReg);
429
430  if (size == kLong || size == kDouble) {
431    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
432  } else {
433    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
434  }
435
436  /* null object? */
437  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
438
439  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
440    /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
441    GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
442                   len_offset, kThrowArrayBounds);
443  }
444  if ((size == kLong) || (size == kDouble)) {
445    int reg_addr = AllocTemp();
446    OpLea(reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
447    FreeTemp(rl_array.low_reg);
448    FreeTemp(rl_index.low_reg);
449    rl_result = EvalLoc(rl_dest, reg_class, true);
450    LoadBaseIndexedDisp(reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
451                        rl_result.high_reg, size, INVALID_SREG);
452    StoreValueWide(rl_dest, rl_result);
453  } else {
454    rl_result = EvalLoc(rl_dest, reg_class, true);
455
456    LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale,
457                        data_offset, rl_result.low_reg, INVALID_REG, size,
458                        INVALID_SREG);
459
460    StoreValue(rl_dest, rl_result);
461  }
462}
463
464/*
465 * Generate array store
466 *
467 */
468void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
469                             RegLocation rl_index, RegLocation rl_src, int scale) {
470  RegisterClass reg_class = oat_reg_class_by_size(size);
471  int len_offset = mirror::Array::LengthOffset().Int32Value();
472  int data_offset;
473
474  if (size == kLong || size == kDouble) {
475    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
476  } else {
477    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
478  }
479
480  rl_array = LoadValue(rl_array, kCoreReg);
481  rl_index = LoadValue(rl_index, kCoreReg);
482
483  /* null object? */
484  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
485
486  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
487    /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
488    GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
489  }
490  if ((size == kLong) || (size == kDouble)) {
491    rl_src = LoadValueWide(rl_src, reg_class);
492  } else {
493    rl_src = LoadValue(rl_src, reg_class);
494  }
495  // If the src reg can't be byte accessed, move it to a temp first.
496  if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
497    int temp = AllocTemp();
498    OpRegCopy(temp, rl_src.low_reg);
499    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
500                         INVALID_REG, size, INVALID_SREG);
501  } else {
502    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
503                         rl_src.high_reg, size, INVALID_SREG);
504  }
505}
506
507/*
508 * Generate array store
509 *
510 */
511void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
512                             RegLocation rl_index, RegLocation rl_src, int scale) {
513  int len_offset = mirror::Array::LengthOffset().Int32Value();
514  int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
515
516  FlushAllRegs();  // Use explicit registers
517  LockCallTemps();
518
519  int r_value = TargetReg(kArg0);  // Register holding value
520  int r_array_class = TargetReg(kArg1);  // Register holding array's Class
521  int r_array = TargetReg(kArg2);  // Register holding array
522  int r_index = TargetReg(kArg3);  // Register holding index into array
523
524  LoadValueDirectFixed(rl_array, r_array);  // Grab array
525  LoadValueDirectFixed(rl_src, r_value);  // Grab value
526  LoadValueDirectFixed(rl_index, r_index);  // Grab index
527
528  GenNullCheck(rl_array.s_reg_low, r_array, opt_flags);  // NPE?
529
530  // Store of null?
531  LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
532
533  // Get the array's class.
534  LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
535  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value,
536                          r_array_class, true);
537  // Redo LoadValues in case they didn't survive the call.
538  LoadValueDirectFixed(rl_array, r_array);  // Reload array
539  LoadValueDirectFixed(rl_index, r_index);  // Reload index
540  LoadValueDirectFixed(rl_src, r_value);  // Reload value
541  r_array_class = INVALID_REG;
542
543  // Branch here if value to be stored == null
544  LIR* target = NewLIR0(kPseudoTargetLabel);
545  null_value_check->target = target;
546
547  // make an extra temp available for card mark below
548  FreeTemp(TargetReg(kArg1));
549  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
550    /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
551    GenRegMemCheck(kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
552  }
553  StoreBaseIndexedDisp(r_array, r_index, scale,
554                       data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
555  FreeTemp(r_index);
556  if (!mir_graph_->IsConstantNullRef(rl_src)) {
557    MarkGCCard(r_value, r_array);
558  }
559}
560
561void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
562                                   RegLocation rl_src1, RegLocation rl_shift) {
563  // Default implementation is just to ignore the constant case.
564  GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
565}
566
567void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode,
568                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
569  // Default - bail to non-const handler.
570  GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
571}
572
573}  // namespace art
574