gen_common.cc revision 468532ea115657709bc32ee498e701a4c71762d4
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/compiler_internals.h"
19#include "dex/quick/mir_to_lir-inl.h"
20#include "entrypoints/quick/quick_entrypoints.h"
21#include "mirror/array.h"
22#include "verifier/method_verifier.h"
23
24namespace art {
25
26/*
27 * This source files contains "gen" codegen routines that should
28 * be applicable to most targets.  Only mid-level support utilities
29 * and "op" calls may be used here.
30 */
31
32/*
33 * Generate an kPseudoBarrier marker to indicate the boundary of special
34 * blocks.
35 */
36void Mir2Lir::GenBarrier() {
37  LIR* barrier = NewLIR0(kPseudoBarrier);
38  /* Mark all resources as being clobbered */
39  barrier->def_mask = -1;
40}
41
42// FIXME: need to do some work to split out targets with
43// condition codes and those without
44LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) {
45  DCHECK_NE(cu_->instruction_set, kMips);
46  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
47  LIR* branch = OpCondBranch(c_code, tgt);
48  // Remember branch target - will process later
49  throw_launchpads_.Insert(tgt);
50  return branch;
51}
52
53LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) {
54  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val);
55  LIR* branch;
56  if (c_code == kCondAl) {
57    branch = OpUnconditionalBranch(tgt);
58  } else {
59    branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
60  }
61  // Remember branch target - will process later
62  throw_launchpads_.Insert(tgt);
63  return branch;
64}
65
66/* Perform null-check on a register.  */
67LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) {
68  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
69    opt_flags & MIR_IGNORE_NULL_CHECK) {
70    return NULL;
71  }
72  return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
73}
74
75/* Perform check on two registers */
76LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
77                             ThrowKind kind) {
78  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
79  LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
80  // Remember branch target - will process later
81  throw_launchpads_.Insert(tgt);
82  return branch;
83}
84
85void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
86                                  RegLocation rl_src2, LIR* taken,
87                                  LIR* fall_through) {
88  ConditionCode cond;
89  switch (opcode) {
90    case Instruction::IF_EQ:
91      cond = kCondEq;
92      break;
93    case Instruction::IF_NE:
94      cond = kCondNe;
95      break;
96    case Instruction::IF_LT:
97      cond = kCondLt;
98      break;
99    case Instruction::IF_GE:
100      cond = kCondGe;
101      break;
102    case Instruction::IF_GT:
103      cond = kCondGt;
104      break;
105    case Instruction::IF_LE:
106      cond = kCondLe;
107      break;
108    default:
109      cond = static_cast<ConditionCode>(0);
110      LOG(FATAL) << "Unexpected opcode " << opcode;
111  }
112
113  // Normalize such that if either operand is constant, src2 will be constant
114  if (rl_src1.is_const) {
115    RegLocation rl_temp = rl_src1;
116    rl_src1 = rl_src2;
117    rl_src2 = rl_temp;
118    cond = FlipComparisonOrder(cond);
119  }
120
121  rl_src1 = LoadValue(rl_src1, kCoreReg);
122  // Is this really an immediate comparison?
123  if (rl_src2.is_const) {
124    // If it's already live in a register or not easily materialized, just keep going
125    RegLocation rl_temp = UpdateLoc(rl_src2);
126    if ((rl_temp.location == kLocDalvikFrame) &&
127        InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
128      // OK - convert this to a compare immediate and branch
129      OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
130      OpUnconditionalBranch(fall_through);
131      return;
132    }
133  }
134  rl_src2 = LoadValue(rl_src2, kCoreReg);
135  OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
136  OpUnconditionalBranch(fall_through);
137}
138
139void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
140                                      LIR* fall_through) {
141  ConditionCode cond;
142  rl_src = LoadValue(rl_src, kCoreReg);
143  switch (opcode) {
144    case Instruction::IF_EQZ:
145      cond = kCondEq;
146      break;
147    case Instruction::IF_NEZ:
148      cond = kCondNe;
149      break;
150    case Instruction::IF_LTZ:
151      cond = kCondLt;
152      break;
153    case Instruction::IF_GEZ:
154      cond = kCondGe;
155      break;
156    case Instruction::IF_GTZ:
157      cond = kCondGt;
158      break;
159    case Instruction::IF_LEZ:
160      cond = kCondLe;
161      break;
162    default:
163      cond = static_cast<ConditionCode>(0);
164      LOG(FATAL) << "Unexpected opcode " << opcode;
165  }
166  OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
167  OpUnconditionalBranch(fall_through);
168}
169
170void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
171  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
172  if (rl_src.location == kLocPhysReg) {
173    OpRegCopy(rl_result.low_reg, rl_src.low_reg);
174  } else {
175    LoadValueDirect(rl_src, rl_result.low_reg);
176  }
177  OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
178  StoreValueWide(rl_dest, rl_result);
179}
180
181void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
182                              RegLocation rl_src) {
183  rl_src = LoadValue(rl_src, kCoreReg);
184  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
185  OpKind op = kOpInvalid;
186  switch (opcode) {
187    case Instruction::INT_TO_BYTE:
188      op = kOp2Byte;
189      break;
190    case Instruction::INT_TO_SHORT:
191       op = kOp2Short;
192       break;
193    case Instruction::INT_TO_CHAR:
194       op = kOp2Char;
195       break;
196    default:
197      LOG(ERROR) << "Bad int conversion type";
198  }
199  OpRegReg(op, rl_result.low_reg, rl_src.low_reg);
200  StoreValue(rl_dest, rl_result);
201}
202
203/*
204 * Let helper function take care of everything.  Will call
205 * Array::AllocFromCode(type_idx, method, count);
206 * Note: AllocFromCode will handle checks for errNegativeArraySize.
207 */
208void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
209                          RegLocation rl_src) {
210  FlushAllRegs();  /* Everything to home location */
211  ThreadOffset func_offset(-1);
212  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
213                                                       type_idx)) {
214    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray);
215  } else {
216    func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck);
217  }
218  CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
219  RegLocation rl_result = GetReturn(false);
220  StoreValue(rl_dest, rl_result);
221}
222
223/*
224 * Similar to GenNewArray, but with post-allocation initialization.
225 * Verifier guarantees we're dealing with an array class.  Current
226 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
227 * Current code also throws internal unimp if not 'L', '[' or 'I'.
228 */
229void Mir2Lir::GenFilledNewArray(CallInfo* info) {
230  int elems = info->num_arg_words;
231  int type_idx = info->index;
232  FlushAllRegs();  /* Everything to home location */
233  ThreadOffset func_offset(-1);
234  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
235                                                       type_idx)) {
236    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArray);
237  } else {
238    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayWithAccessCheck);
239  }
240  CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
241  FreeTemp(TargetReg(kArg2));
242  FreeTemp(TargetReg(kArg1));
243  /*
244   * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
245   * return region.  Because AllocFromCode placed the new array
246   * in kRet0, we'll just lock it into place.  When debugger support is
247   * added, it may be necessary to additionally copy all return
248   * values to a home location in thread-local storage
249   */
250  LockTemp(TargetReg(kRet0));
251
252  // TODO: use the correct component size, currently all supported types
253  // share array alignment with ints (see comment at head of function)
254  size_t component_size = sizeof(int32_t);
255
256  // Having a range of 0 is legal
257  if (info->is_range && (elems > 0)) {
258    /*
259     * Bit of ugliness here.  We're going generate a mem copy loop
260     * on the register range, but it is possible that some regs
261     * in the range have been promoted.  This is unlikely, but
262     * before generating the copy, we'll just force a flush
263     * of any regs in the source range that have been promoted to
264     * home location.
265     */
266    for (int i = 0; i < elems; i++) {
267      RegLocation loc = UpdateLoc(info->args[i]);
268      if (loc.location == kLocPhysReg) {
269        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
270                      loc.low_reg, kWord);
271      }
272    }
273    /*
274     * TUNING note: generated code here could be much improved, but
275     * this is an uncommon operation and isn't especially performance
276     * critical.
277     */
278    int r_src = AllocTemp();
279    int r_dst = AllocTemp();
280    int r_idx = AllocTemp();
281    int r_val = INVALID_REG;
282    switch (cu_->instruction_set) {
283      case kThumb2:
284        r_val = TargetReg(kLr);
285        break;
286      case kX86:
287        FreeTemp(TargetReg(kRet0));
288        r_val = AllocTemp();
289        break;
290      case kMips:
291        r_val = AllocTemp();
292        break;
293      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
294    }
295    // Set up source pointer
296    RegLocation rl_first = info->args[0];
297    OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low));
298    // Set up the target pointer
299    OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0),
300                mirror::Array::DataOffset(component_size).Int32Value());
301    // Set up the loop counter (known to be > 0)
302    LoadConstant(r_idx, elems - 1);
303    // Generate the copy loop.  Going backwards for convenience
304    LIR* target = NewLIR0(kPseudoTargetLabel);
305    // Copy next element
306    LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
307    StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
308    FreeTemp(r_val);
309    OpDecAndBranch(kCondGe, r_idx, target);
310    if (cu_->instruction_set == kX86) {
311      // Restore the target pointer
312      OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst,
313                  -mirror::Array::DataOffset(component_size).Int32Value());
314    }
315  } else if (!info->is_range) {
316    // TUNING: interleave
317    for (int i = 0; i < elems; i++) {
318      RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
319      StoreBaseDisp(TargetReg(kRet0),
320                    mirror::Array::DataOffset(component_size).Int32Value() +
321                    i * 4, rl_arg.low_reg, kWord);
322      // If the LoadValue caused a temp to be allocated, free it
323      if (IsTemp(rl_arg.low_reg)) {
324        FreeTemp(rl_arg.low_reg);
325      }
326    }
327  }
328  if (info->result.location != kLocInvalid) {
329    StoreValue(info->result, GetReturn(false /* not fp */));
330  }
331}
332
333void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
334                      bool is_object) {
335  int field_offset;
336  int ssb_index;
337  bool is_volatile;
338  bool is_referrers_class;
339  bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
340      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
341      is_referrers_class, is_volatile, true);
342  if (fast_path && !SLOW_FIELD_PATH) {
343    DCHECK_GE(field_offset, 0);
344    int rBase;
345    if (is_referrers_class) {
346      // Fast path, static storage base is this method's class
347      RegLocation rl_method  = LoadCurrMethod();
348      rBase = AllocTemp();
349      LoadWordDisp(rl_method.low_reg,
350                   mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
351      if (IsTemp(rl_method.low_reg)) {
352        FreeTemp(rl_method.low_reg);
353      }
354    } else {
355      // Medium path, static storage base in a different class which requires checks that the other
356      // class is initialized.
357      // TODO: remove initialized check now that we are initializing classes in the compiler driver.
358      DCHECK_GE(ssb_index, 0);
359      // May do runtime call so everything to home locations.
360      FlushAllRegs();
361      // Using fixed register to sync with possible call to runtime support.
362      int r_method = TargetReg(kArg1);
363      LockTemp(r_method);
364      LoadCurrMethodDirect(r_method);
365      rBase = TargetReg(kArg0);
366      LockTemp(rBase);
367      LoadWordDisp(r_method,
368                   mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
369                   rBase);
370      LoadWordDisp(rBase,
371                   mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
372                   sizeof(int32_t*) * ssb_index, rBase);
373      // rBase now points at appropriate static storage base (Class*)
374      // or NULL if not initialized. Check for NULL and call helper if NULL.
375      // TUNING: fast path should fall through
376      LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
377      LoadConstant(TargetReg(kArg0), ssb_index);
378      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
379      if (cu_->instruction_set == kMips) {
380        // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
381        OpRegCopy(rBase, TargetReg(kRet0));
382      }
383      LIR* skip_target = NewLIR0(kPseudoTargetLabel);
384      branch_over->target = skip_target;
385      FreeTemp(r_method);
386    }
387    // rBase now holds static storage base
388    if (is_long_or_double) {
389      rl_src = LoadValueWide(rl_src, kAnyReg);
390    } else {
391      rl_src = LoadValue(rl_src, kAnyReg);
392    }
393    if (is_volatile) {
394      GenMemBarrier(kStoreStore);
395    }
396    if (is_long_or_double) {
397      StoreBaseDispWide(rBase, field_offset, rl_src.low_reg,
398                        rl_src.high_reg);
399    } else {
400      StoreWordDisp(rBase, field_offset, rl_src.low_reg);
401    }
402    if (is_volatile) {
403      GenMemBarrier(kStoreLoad);
404    }
405    if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
406      MarkGCCard(rl_src.low_reg, rBase);
407    }
408    FreeTemp(rBase);
409  } else {
410    FlushAllRegs();  // Everything to home locations
411    ThreadOffset setter_offset =
412        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
413                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
414                                       : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
415    CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
416  }
417}
418
419void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
420                      bool is_long_or_double, bool is_object) {
421  int field_offset;
422  int ssb_index;
423  bool is_volatile;
424  bool is_referrers_class;
425  bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
426      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
427      is_referrers_class, is_volatile, false);
428  if (fast_path && !SLOW_FIELD_PATH) {
429    DCHECK_GE(field_offset, 0);
430    int rBase;
431    if (is_referrers_class) {
432      // Fast path, static storage base is this method's class
433      RegLocation rl_method  = LoadCurrMethod();
434      rBase = AllocTemp();
435      LoadWordDisp(rl_method.low_reg,
436                   mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
437    } else {
438      // Medium path, static storage base in a different class which requires checks that the other
439      // class is initialized
440      // TODO: remove initialized check now that we are initializing classes in the compiler driver.
441      DCHECK_GE(ssb_index, 0);
442      // May do runtime call so everything to home locations.
443      FlushAllRegs();
444      // Using fixed register to sync with possible call to runtime support.
445      int r_method = TargetReg(kArg1);
446      LockTemp(r_method);
447      LoadCurrMethodDirect(r_method);
448      rBase = TargetReg(kArg0);
449      LockTemp(rBase);
450      LoadWordDisp(r_method,
451                   mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
452                   rBase);
453      LoadWordDisp(rBase, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
454                   sizeof(int32_t*) * ssb_index, rBase);
455      // rBase now points at appropriate static storage base (Class*)
456      // or NULL if not initialized. Check for NULL and call helper if NULL.
457      // TUNING: fast path should fall through
458      LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
459      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
460      if (cu_->instruction_set == kMips) {
461        // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
462        OpRegCopy(rBase, TargetReg(kRet0));
463      }
464      LIR* skip_target = NewLIR0(kPseudoTargetLabel);
465      branch_over->target = skip_target;
466      FreeTemp(r_method);
467    }
468    // rBase now holds static storage base
469    RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
470    if (is_volatile) {
471      GenMemBarrier(kLoadLoad);
472    }
473    if (is_long_or_double) {
474      LoadBaseDispWide(rBase, field_offset, rl_result.low_reg,
475                       rl_result.high_reg, INVALID_SREG);
476    } else {
477      LoadWordDisp(rBase, field_offset, rl_result.low_reg);
478    }
479    FreeTemp(rBase);
480    if (is_long_or_double) {
481      StoreValueWide(rl_dest, rl_result);
482    } else {
483      StoreValue(rl_dest, rl_result);
484    }
485  } else {
486    FlushAllRegs();  // Everything to home locations
487    ThreadOffset getterOffset =
488        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
489                          :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
490                                      : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
491    CallRuntimeHelperImm(getterOffset, field_idx, true);
492    if (is_long_or_double) {
493      RegLocation rl_result = GetReturnWide(rl_dest.fp);
494      StoreValueWide(rl_dest, rl_result);
495    } else {
496      RegLocation rl_result = GetReturn(rl_dest.fp);
497      StoreValue(rl_dest, rl_result);
498    }
499  }
500}
501
502void Mir2Lir::HandleSuspendLaunchPads() {
503  int num_elems = suspend_launchpads_.Size();
504  ThreadOffset helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspend);
505  for (int i = 0; i < num_elems; i++) {
506    ResetRegPool();
507    ResetDefTracking();
508    LIR* lab = suspend_launchpads_.Get(i);
509    LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
510    current_dalvik_offset_ = lab->operands[1];
511    AppendLIR(lab);
512    int r_tgt = CallHelperSetup(helper_offset);
513    CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
514    OpUnconditionalBranch(resume_lab);
515  }
516}
517
518void Mir2Lir::HandleIntrinsicLaunchPads() {
519  int num_elems = intrinsic_launchpads_.Size();
520  for (int i = 0; i < num_elems; i++) {
521    ResetRegPool();
522    ResetDefTracking();
523    LIR* lab = intrinsic_launchpads_.Get(i);
524    CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
525    current_dalvik_offset_ = info->offset;
526    AppendLIR(lab);
527    // NOTE: GenInvoke handles MarkSafepointPC
528    GenInvoke(info);
529    LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
530    if (resume_lab != NULL) {
531      OpUnconditionalBranch(resume_lab);
532    }
533  }
534}
535
536void Mir2Lir::HandleThrowLaunchPads() {
537  int num_elems = throw_launchpads_.Size();
538  for (int i = 0; i < num_elems; i++) {
539    ResetRegPool();
540    ResetDefTracking();
541    LIR* lab = throw_launchpads_.Get(i);
542    current_dalvik_offset_ = lab->operands[1];
543    AppendLIR(lab);
544    ThreadOffset func_offset(-1);
545    int v1 = lab->operands[2];
546    int v2 = lab->operands[3];
547    bool target_x86 = (cu_->instruction_set == kX86);
548    switch (lab->operands[0]) {
549      case kThrowNullPointer:
550        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointer);
551        break;
552      case kThrowConstantArrayBounds:  // v1 is length reg (for Arm/Mips), v2 constant index
553        // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
554        if (target_x86) {
555          OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
556        } else {
557          OpRegCopy(TargetReg(kArg1), v1);
558        }
559        // Make sure the following LoadConstant doesn't mess with kArg1.
560        LockTemp(TargetReg(kArg1));
561        LoadConstant(TargetReg(kArg0), v2);
562        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
563        break;
564      case kThrowArrayBounds:
565        // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
566        if (v2 != TargetReg(kArg0)) {
567          OpRegCopy(TargetReg(kArg0), v1);
568          if (target_x86) {
569            // x86 leaves the array pointer in v2, so load the array length that the handler expects
570            OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
571          } else {
572            OpRegCopy(TargetReg(kArg1), v2);
573          }
574        } else {
575          if (v1 == TargetReg(kArg1)) {
576            // Swap v1 and v2, using kArg2 as a temp
577            OpRegCopy(TargetReg(kArg2), v1);
578            if (target_x86) {
579              // x86 leaves the array pointer in v2; load the array length that the handler expects
580              OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
581            } else {
582              OpRegCopy(TargetReg(kArg1), v2);
583            }
584            OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
585          } else {
586            if (target_x86) {
587              // x86 leaves the array pointer in v2; load the array length that the handler expects
588              OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
589            } else {
590              OpRegCopy(TargetReg(kArg1), v2);
591            }
592            OpRegCopy(TargetReg(kArg0), v1);
593          }
594        }
595        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
596        break;
597      case kThrowDivZero:
598        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero);
599        break;
600      case kThrowNoSuchMethod:
601        OpRegCopy(TargetReg(kArg0), v1);
602        func_offset =
603          QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod);
604        break;
605      case kThrowStackOverflow:
606        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
607        // Restore stack alignment
608        if (target_x86) {
609          OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
610        } else {
611          OpRegImm(kOpAdd, TargetReg(kSp), (num_core_spills_ + num_fp_spills_) * 4);
612        }
613        break;
614      default:
615        LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
616    }
617    ClobberCalleeSave();
618    int r_tgt = CallHelperSetup(func_offset);
619    CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */);
620  }
621}
622
623void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
624                      RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
625                      bool is_object) {
626  int field_offset;
627  bool is_volatile;
628
629  bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
630
631  if (fast_path && !SLOW_FIELD_PATH) {
632    RegLocation rl_result;
633    RegisterClass reg_class = oat_reg_class_by_size(size);
634    DCHECK_GE(field_offset, 0);
635    rl_obj = LoadValue(rl_obj, kCoreReg);
636    if (is_long_or_double) {
637      DCHECK(rl_dest.wide);
638      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
639      if (cu_->instruction_set == kX86) {
640        rl_result = EvalLoc(rl_dest, reg_class, true);
641        GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
642        LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
643                         rl_result.high_reg, rl_obj.s_reg_low);
644        if (is_volatile) {
645          GenMemBarrier(kLoadLoad);
646        }
647      } else {
648        int reg_ptr = AllocTemp();
649        OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
650        rl_result = EvalLoc(rl_dest, reg_class, true);
651        LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
652        if (is_volatile) {
653          GenMemBarrier(kLoadLoad);
654        }
655        FreeTemp(reg_ptr);
656      }
657      StoreValueWide(rl_dest, rl_result);
658    } else {
659      rl_result = EvalLoc(rl_dest, reg_class, true);
660      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
661      LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
662                   kWord, rl_obj.s_reg_low);
663      if (is_volatile) {
664        GenMemBarrier(kLoadLoad);
665      }
666      StoreValue(rl_dest, rl_result);
667    }
668  } else {
669    ThreadOffset getterOffset =
670        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
671                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
672                                       : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
673    CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
674    if (is_long_or_double) {
675      RegLocation rl_result = GetReturnWide(rl_dest.fp);
676      StoreValueWide(rl_dest, rl_result);
677    } else {
678      RegLocation rl_result = GetReturn(rl_dest.fp);
679      StoreValue(rl_dest, rl_result);
680    }
681  }
682}
683
684void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
685                      RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
686                      bool is_object) {
687  int field_offset;
688  bool is_volatile;
689
690  bool fast_path = FastInstance(field_idx, field_offset, is_volatile,
691                 true);
692  if (fast_path && !SLOW_FIELD_PATH) {
693    RegisterClass reg_class = oat_reg_class_by_size(size);
694    DCHECK_GE(field_offset, 0);
695    rl_obj = LoadValue(rl_obj, kCoreReg);
696    if (is_long_or_double) {
697      int reg_ptr;
698      rl_src = LoadValueWide(rl_src, kAnyReg);
699      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
700      reg_ptr = AllocTemp();
701      OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
702      if (is_volatile) {
703        GenMemBarrier(kStoreStore);
704      }
705      StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
706      if (is_volatile) {
707        GenMemBarrier(kLoadLoad);
708      }
709      FreeTemp(reg_ptr);
710    } else {
711      rl_src = LoadValue(rl_src, reg_class);
712      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
713      if (is_volatile) {
714        GenMemBarrier(kStoreStore);
715      }
716      StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
717      if (is_volatile) {
718        GenMemBarrier(kLoadLoad);
719      }
720      if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
721        MarkGCCard(rl_src.low_reg, rl_obj.low_reg);
722      }
723    }
724  } else {
725    ThreadOffset setter_offset =
726        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
727                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
728                                       : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
729    CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
730  }
731}
732
733void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
734  RegLocation rl_method = LoadCurrMethod();
735  int res_reg = AllocTemp();
736  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
737  if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
738                                                   *cu_->dex_file,
739                                                   type_idx)) {
740    // Call out to helper which resolves type and verifies access.
741    // Resolved type returned in kRet0.
742    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
743                            type_idx, rl_method.low_reg, true);
744    RegLocation rl_result = GetReturn(false);
745    StoreValue(rl_dest, rl_result);
746  } else {
747    // We're don't need access checks, load type from dex cache
748    int32_t dex_cache_offset =
749        mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value();
750    LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg);
751    int32_t offset_of_type =
752        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
753                          * type_idx);
754    LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg);
755    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
756        type_idx) || SLOW_TYPE_PATH) {
757      // Slow path, at runtime test if type is null and if so initialize
758      FlushAllRegs();
759      LIR* branch1 = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL);
760      // Resolved, store and hop over following code
761      StoreValue(rl_dest, rl_result);
762      /*
763       * Because we have stores of the target value on two paths,
764       * clobber temp tracking for the destination using the ssa name
765       */
766      ClobberSReg(rl_dest.s_reg_low);
767      LIR* branch2 = OpUnconditionalBranch(0);
768      // TUNING: move slow path to end & remove unconditional branch
769      LIR* target1 = NewLIR0(kPseudoTargetLabel);
770      // Call out to helper, which will return resolved type in kArg0
771      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx,
772                              rl_method.low_reg, true);
773      RegLocation rl_result = GetReturn(false);
774      StoreValue(rl_dest, rl_result);
775      /*
776       * Because we have stores of the target value on two paths,
777       * clobber temp tracking for the destination using the ssa name
778       */
779      ClobberSReg(rl_dest.s_reg_low);
780      // Rejoin code paths
781      LIR* target2 = NewLIR0(kPseudoTargetLabel);
782      branch1->target = target1;
783      branch2->target = target2;
784    } else {
785      // Fast path, we're done - just store result
786      StoreValue(rl_dest, rl_result);
787    }
788  }
789}
790
791void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
792  /* NOTE: Most strings should be available at compile time */
793  int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
794                 (sizeof(mirror::String*) * string_idx);
795  if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
796      *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
797    // slow path, resolve string if not in dex cache
798    FlushAllRegs();
799    LockCallTemps();  // Using explicit registers
800    LoadCurrMethodDirect(TargetReg(kArg2));
801    LoadWordDisp(TargetReg(kArg2),
802                 mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
803    // Might call out to helper, which will return resolved string in kRet0
804    int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString));
805    LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
806    LoadConstant(TargetReg(kArg1), string_idx);
807    if (cu_->instruction_set == kThumb2) {
808      OpRegImm(kOpCmp, TargetReg(kRet0), 0);  // Is resolved?
809      GenBarrier();
810      // For testing, always force through helper
811      if (!EXERCISE_SLOWEST_STRING_PATH) {
812        OpIT(kCondEq, "T");
813      }
814      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));   // .eq
815      LIR* call_inst = OpReg(kOpBlx, r_tgt);    // .eq, helper(Method*, string_idx)
816      MarkSafepointPC(call_inst);
817      FreeTemp(r_tgt);
818    } else if (cu_->instruction_set == kMips) {
819      LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
820      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));   // .eq
821      LIR* call_inst = OpReg(kOpBlx, r_tgt);
822      MarkSafepointPC(call_inst);
823      FreeTemp(r_tgt);
824      LIR* target = NewLIR0(kPseudoTargetLabel);
825      branch->target = target;
826    } else {
827      DCHECK_EQ(cu_->instruction_set, kX86);
828      CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), TargetReg(kArg2),
829                              TargetReg(kArg1), true);
830    }
831    GenBarrier();
832    StoreValue(rl_dest, GetReturn(false));
833  } else {
834    RegLocation rl_method = LoadCurrMethod();
835    int res_reg = AllocTemp();
836    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
837    LoadWordDisp(rl_method.low_reg,
838                 mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg);
839    LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg);
840    StoreValue(rl_dest, rl_result);
841  }
842}
843
844/*
845 * Let helper function take care of everything.  Will
846 * call Class::NewInstanceFromCode(type_idx, method);
847 */
848void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
849  FlushAllRegs();  /* Everything to home location */
850  // alloc will always check for resolution, do we also need to verify
851  // access because the verifier was unable to?
852  ThreadOffset func_offset(-1);
853  if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
854      cu_->method_idx, *cu_->dex_file, type_idx)) {
855    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject);
856  } else {
857    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck);
858  }
859  CallRuntimeHelperImmMethod(func_offset, type_idx, true);
860  RegLocation rl_result = GetReturn(false);
861  StoreValue(rl_dest, rl_result);
862}
863
864void Mir2Lir::GenThrow(RegLocation rl_src) {
865  FlushAllRegs();
866  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
867}
868
869// For final classes there are no sub-classes to check and so we can answer the instance-of
870// question with simple comparisons.
871void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
872                                 RegLocation rl_src) {
873  RegLocation object = LoadValue(rl_src, kCoreReg);
874  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
875  int result_reg = rl_result.low_reg;
876  if (result_reg == object.low_reg) {
877    result_reg = AllocTypedTemp(false, kCoreReg);
878  }
879  LoadConstant(result_reg, 0);     // assume false
880  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
881
882  int check_class = AllocTypedTemp(false, kCoreReg);
883  int object_class = AllocTypedTemp(false, kCoreReg);
884
885  LoadCurrMethodDirect(check_class);
886  if (use_declaring_class) {
887    LoadWordDisp(check_class, mirror::AbstractMethod::DeclaringClassOffset().Int32Value(),
888                 check_class);
889    LoadWordDisp(object.low_reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
890  } else {
891    LoadWordDisp(check_class, mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(),
892                 check_class);
893    LoadWordDisp(object.low_reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
894    int32_t offset_of_type =
895      mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
896      (sizeof(mirror::Class*) * type_idx);
897    LoadWordDisp(check_class, offset_of_type, check_class);
898  }
899
900  LIR* ne_branchover = NULL;
901  if (cu_->instruction_set == kThumb2) {
902    OpRegReg(kOpCmp, check_class, object_class);  // Same?
903    OpIT(kCondEq, "");   // if-convert the test
904    LoadConstant(result_reg, 1);     // .eq case - load true
905  } else {
906    ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
907    LoadConstant(result_reg, 1);     // eq case - load true
908  }
909  LIR* target = NewLIR0(kPseudoTargetLabel);
910  null_branchover->target = target;
911  if (ne_branchover != NULL) {
912    ne_branchover->target = target;
913  }
914  FreeTemp(object_class);
915  FreeTemp(check_class);
916  if (IsTemp(result_reg)) {
917    OpRegCopy(rl_result.low_reg, result_reg);
918    FreeTemp(result_reg);
919  }
920  StoreValue(rl_dest, rl_result);
921}
922
923void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
924                                         bool type_known_abstract, bool use_declaring_class,
925                                         bool can_assume_type_is_in_dex_cache,
926                                         uint32_t type_idx, RegLocation rl_dest,
927                                         RegLocation rl_src) {
928  FlushAllRegs();
929  // May generate a call - use explicit registers
930  LockCallTemps();
931  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
932  int class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
933  if (needs_access_check) {
934    // Check we have access to type_idx and if not throw IllegalAccessError,
935    // returns Class* in kArg0
936    CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
937                         type_idx, true);
938    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
939    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
940  } else if (use_declaring_class) {
941    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
942    LoadWordDisp(TargetReg(kArg1),
943                 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
944  } else {
945    // Load dex cache entry into class_reg (kArg2)
946    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
947    LoadWordDisp(TargetReg(kArg1),
948                 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
949    int32_t offset_of_type =
950        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
951        * type_idx);
952    LoadWordDisp(class_reg, offset_of_type, class_reg);
953    if (!can_assume_type_is_in_dex_cache) {
954      // Need to test presence of type in dex cache at runtime
955      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
956      // Not resolved
957      // Call out to helper, which will return resolved type in kRet0
958      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true);
959      OpRegCopy(TargetReg(kArg2), TargetReg(kRet0));  // Align usage with fast path
960      LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
961      // Rejoin code paths
962      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
963      hop_branch->target = hop_target;
964    }
965  }
966  /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
967  RegLocation rl_result = GetReturn(false);
968  if (cu_->instruction_set == kMips) {
969    // On MIPS rArg0 != rl_result, place false in result if branch is taken.
970    LoadConstant(rl_result.low_reg, 0);
971  }
972  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
973
974  /* load object->klass_ */
975  DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
976  LoadWordDisp(TargetReg(kArg0),  mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
977  /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
978  LIR* branchover = NULL;
979  if (type_known_final) {
980    // rl_result == ref == null == 0.
981    if (cu_->instruction_set == kThumb2) {
982      OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
983      OpIT(kCondEq, "E");   // if-convert the test
984      LoadConstant(rl_result.low_reg, 1);     // .eq case - load true
985      LoadConstant(rl_result.low_reg, 0);     // .ne case - load false
986    } else {
987      LoadConstant(rl_result.low_reg, 0);     // ne case - load false
988      branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
989      LoadConstant(rl_result.low_reg, 1);     // eq case - load true
990    }
991  } else {
992    if (cu_->instruction_set == kThumb2) {
993      int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
994      if (!type_known_abstract) {
995      /* Uses conditional nullification */
996        OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
997        OpIT(kCondEq, "EE");   // if-convert the test
998        LoadConstant(TargetReg(kArg0), 1);     // .eq case - load true
999      }
1000      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
1001      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
1002      FreeTemp(r_tgt);
1003    } else {
1004      if (!type_known_abstract) {
1005        /* Uses branchovers */
1006        LoadConstant(rl_result.low_reg, 1);     // assume true
1007        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
1008      }
1009      if (cu_->instruction_set != kX86) {
1010        int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
1011        OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
1012        OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
1013        FreeTemp(r_tgt);
1014      } else {
1015        OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
1016        OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
1017      }
1018    }
1019  }
1020  // TODO: only clobber when type isn't final?
1021  ClobberCalleeSave();
1022  /* branch targets here */
1023  LIR* target = NewLIR0(kPseudoTargetLabel);
1024  StoreValue(rl_dest, rl_result);
1025  branch1->target = target;
1026  if (branchover != NULL) {
1027    branchover->target = target;
1028  }
1029}
1030
1031void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
1032  bool type_known_final, type_known_abstract, use_declaring_class;
1033  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1034                                                                              *cu_->dex_file,
1035                                                                              type_idx,
1036                                                                              &type_known_final,
1037                                                                              &type_known_abstract,
1038                                                                              &use_declaring_class);
1039  bool can_assume_type_is_in_dex_cache = !needs_access_check &&
1040      cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
1041
1042  if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
1043    GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
1044  } else {
1045    GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
1046                               use_declaring_class, can_assume_type_is_in_dex_cache,
1047                               type_idx, rl_dest, rl_src);
1048  }
1049}
1050
1051void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) {
1052  bool type_known_final, type_known_abstract, use_declaring_class;
1053  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1054                                                                              *cu_->dex_file,
1055                                                                              type_idx,
1056                                                                              &type_known_final,
1057                                                                              &type_known_abstract,
1058                                                                              &use_declaring_class);
1059  // Note: currently type_known_final is unused, as optimizing will only improve the performance
1060  // of the exception throw path.
1061  DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
1062  const MethodReference mr(cu->GetDexFile(), cu->GetDexMethodIndex());
1063  if (!needs_access_check && cu_->compiler_driver->IsSafeCast(mr, insn_idx)) {
1064    // Verifier type analysis proved this check cast would never cause an exception.
1065    return;
1066  }
1067  FlushAllRegs();
1068  // May generate a call - use explicit registers
1069  LockCallTemps();
1070  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
1071  int class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
1072  if (needs_access_check) {
1073    // Check we have access to type_idx and if not throw IllegalAccessError,
1074    // returns Class* in kRet0
1075    // InitializeTypeAndVerifyAccess(idx, method)
1076    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
1077                            type_idx, TargetReg(kArg1), true);
1078    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
1079  } else if (use_declaring_class) {
1080    LoadWordDisp(TargetReg(kArg1),
1081                 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
1082  } else {
1083    // Load dex cache entry into class_reg (kArg2)
1084    LoadWordDisp(TargetReg(kArg1),
1085                 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
1086    int32_t offset_of_type =
1087        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
1088        (sizeof(mirror::Class*) * type_idx);
1089    LoadWordDisp(class_reg, offset_of_type, class_reg);
1090    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
1091      // Need to test presence of type in dex cache at runtime
1092      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
1093      // Not resolved
1094      // Call out to helper, which will return resolved type in kArg0
1095      // InitializeTypeFromCode(idx, method)
1096      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx,
1097                              TargetReg(kArg1), true);
1098      OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
1099      // Rejoin code paths
1100      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
1101      hop_branch->target = hop_target;
1102    }
1103  }
1104  // At this point, class_reg (kArg2) has class
1105  LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1106  /* Null is OK - continue */
1107  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
1108  /* load object->klass_ */
1109  DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1110  LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
1111  /* kArg1 now contains object->klass_ */
1112  LIR* branch2 = NULL;
1113  if (!type_known_abstract) {
1114    branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
1115  }
1116  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCast), TargetReg(kArg1),
1117                          TargetReg(kArg2), true);
1118  /* branch target here */
1119  LIR* target = NewLIR0(kPseudoTargetLabel);
1120  branch1->target = target;
1121  if (branch2 != NULL) {
1122    branch2->target = target;
1123  }
1124}
1125
1126void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
1127                           RegLocation rl_src1, RegLocation rl_src2) {
1128  RegLocation rl_result;
1129  if (cu_->instruction_set == kThumb2) {
1130    /*
1131     * NOTE:  This is the one place in the code in which we might have
1132     * as many as six live temporary registers.  There are 5 in the normal
1133     * set for Arm.  Until we have spill capabilities, temporarily add
1134     * lr to the temp set.  It is safe to do this locally, but note that
1135     * lr is used explicitly elsewhere in the code generator and cannot
1136     * normally be used as a general temp register.
1137     */
1138    MarkTemp(TargetReg(kLr));   // Add lr to the temp pool
1139    FreeTemp(TargetReg(kLr));   // and make it available
1140  }
1141  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1142  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1143  rl_result = EvalLoc(rl_dest, kCoreReg, true);
1144  // The longs may overlap - use intermediate temp if so
1145  if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) {
1146    int t_reg = AllocTemp();
1147    OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
1148    OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
1149    OpRegCopy(rl_result.low_reg, t_reg);
1150    FreeTemp(t_reg);
1151  } else {
1152    OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
1153    OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg,
1154                rl_src2.high_reg);
1155  }
1156  /*
1157   * NOTE: If rl_dest refers to a frame variable in a large frame, the
1158   * following StoreValueWide might need to allocate a temp register.
1159   * To further work around the lack of a spill capability, explicitly
1160   * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
1161   * Remove when spill is functional.
1162   */
1163  FreeRegLocTemps(rl_result, rl_src1);
1164  FreeRegLocTemps(rl_result, rl_src2);
1165  StoreValueWide(rl_dest, rl_result);
1166  if (cu_->instruction_set == kThumb2) {
1167    Clobber(TargetReg(kLr));
1168    UnmarkTemp(TargetReg(kLr));  // Remove lr from the temp pool
1169  }
1170}
1171
1172
1173void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
1174                             RegLocation rl_src1, RegLocation rl_shift) {
1175  ThreadOffset func_offset(-1);
1176
1177  switch (opcode) {
1178    case Instruction::SHL_LONG:
1179    case Instruction::SHL_LONG_2ADDR:
1180      func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong);
1181      break;
1182    case Instruction::SHR_LONG:
1183    case Instruction::SHR_LONG_2ADDR:
1184      func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong);
1185      break;
1186    case Instruction::USHR_LONG:
1187    case Instruction::USHR_LONG_2ADDR:
1188      func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong);
1189      break;
1190    default:
1191      LOG(FATAL) << "Unexpected case";
1192  }
1193  FlushAllRegs();   /* Send everything to home location */
1194  CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
1195  RegLocation rl_result = GetReturnWide(false);
1196  StoreValueWide(rl_dest, rl_result);
1197}
1198
1199
1200void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
1201                            RegLocation rl_src1, RegLocation rl_src2) {
1202  OpKind op = kOpBkpt;
1203  bool is_div_rem = false;
1204  bool check_zero = false;
1205  bool unary = false;
1206  RegLocation rl_result;
1207  bool shift_op = false;
1208  switch (opcode) {
1209    case Instruction::NEG_INT:
1210      op = kOpNeg;
1211      unary = true;
1212      break;
1213    case Instruction::NOT_INT:
1214      op = kOpMvn;
1215      unary = true;
1216      break;
1217    case Instruction::ADD_INT:
1218    case Instruction::ADD_INT_2ADDR:
1219      op = kOpAdd;
1220      break;
1221    case Instruction::SUB_INT:
1222    case Instruction::SUB_INT_2ADDR:
1223      op = kOpSub;
1224      break;
1225    case Instruction::MUL_INT:
1226    case Instruction::MUL_INT_2ADDR:
1227      op = kOpMul;
1228      break;
1229    case Instruction::DIV_INT:
1230    case Instruction::DIV_INT_2ADDR:
1231      check_zero = true;
1232      op = kOpDiv;
1233      is_div_rem = true;
1234      break;
1235    /* NOTE: returns in kArg1 */
1236    case Instruction::REM_INT:
1237    case Instruction::REM_INT_2ADDR:
1238      check_zero = true;
1239      op = kOpRem;
1240      is_div_rem = true;
1241      break;
1242    case Instruction::AND_INT:
1243    case Instruction::AND_INT_2ADDR:
1244      op = kOpAnd;
1245      break;
1246    case Instruction::OR_INT:
1247    case Instruction::OR_INT_2ADDR:
1248      op = kOpOr;
1249      break;
1250    case Instruction::XOR_INT:
1251    case Instruction::XOR_INT_2ADDR:
1252      op = kOpXor;
1253      break;
1254    case Instruction::SHL_INT:
1255    case Instruction::SHL_INT_2ADDR:
1256      shift_op = true;
1257      op = kOpLsl;
1258      break;
1259    case Instruction::SHR_INT:
1260    case Instruction::SHR_INT_2ADDR:
1261      shift_op = true;
1262      op = kOpAsr;
1263      break;
1264    case Instruction::USHR_INT:
1265    case Instruction::USHR_INT_2ADDR:
1266      shift_op = true;
1267      op = kOpLsr;
1268      break;
1269    default:
1270      LOG(FATAL) << "Invalid word arith op: " << opcode;
1271  }
1272  if (!is_div_rem) {
1273    if (unary) {
1274      rl_src1 = LoadValue(rl_src1, kCoreReg);
1275      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1276      OpRegReg(op, rl_result.low_reg, rl_src1.low_reg);
1277    } else {
1278      if (shift_op) {
1279        int t_reg = INVALID_REG;
1280        if (cu_->instruction_set == kX86) {
1281          // X86 doesn't require masking and must use ECX
1282          t_reg = TargetReg(kCount);  // rCX
1283          LoadValueDirectFixed(rl_src2, t_reg);
1284        } else {
1285          rl_src2 = LoadValue(rl_src2, kCoreReg);
1286          t_reg = AllocTemp();
1287          OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31);
1288        }
1289        rl_src1 = LoadValue(rl_src1, kCoreReg);
1290        rl_result = EvalLoc(rl_dest, kCoreReg, true);
1291        OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg);
1292        FreeTemp(t_reg);
1293      } else {
1294        rl_src1 = LoadValue(rl_src1, kCoreReg);
1295        rl_src2 = LoadValue(rl_src2, kCoreReg);
1296        rl_result = EvalLoc(rl_dest, kCoreReg, true);
1297        OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
1298      }
1299    }
1300    StoreValue(rl_dest, rl_result);
1301  } else {
1302    if (cu_->instruction_set == kMips) {
1303      rl_src1 = LoadValue(rl_src1, kCoreReg);
1304      rl_src2 = LoadValue(rl_src2, kCoreReg);
1305      if (check_zero) {
1306          GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
1307      }
1308      rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
1309    } else {
1310      ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
1311      FlushAllRegs();   /* Send everything to home location */
1312      LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
1313      int r_tgt = CallHelperSetup(func_offset);
1314      LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
1315      if (check_zero) {
1316        GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
1317      }
1318      // NOTE: callout here is not a safepoint
1319      CallHelper(r_tgt, func_offset, false /* not a safepoint */);
1320      if (op == kOpDiv)
1321        rl_result = GetReturn(false);
1322      else
1323        rl_result = GetReturnAlt();
1324    }
1325    StoreValue(rl_dest, rl_result);
1326  }
1327}
1328
1329/*
1330 * The following are the first-level codegen routines that analyze the format
1331 * of each bytecode then either dispatch special purpose codegen routines
1332 * or produce corresponding Thumb instructions directly.
1333 */
1334
1335static bool IsPowerOfTwo(int x) {
1336  return (x & (x - 1)) == 0;
1337}
1338
1339// Returns true if no more than two bits are set in 'x'.
1340static bool IsPopCountLE2(unsigned int x) {
1341  x &= x - 1;
1342  return (x & (x - 1)) == 0;
1343}
1344
1345// Returns the index of the lowest set bit in 'x'.
1346static int LowestSetBit(unsigned int x) {
1347  int bit_posn = 0;
1348  while ((x & 0xf) == 0) {
1349    bit_posn += 4;
1350    x >>= 4;
1351  }
1352  while ((x & 1) == 0) {
1353    bit_posn++;
1354    x >>= 1;
1355  }
1356  return bit_posn;
1357}
1358
1359// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
1360// and store the result in 'rl_dest'.
1361bool Mir2Lir::HandleEasyDivide(Instruction::Code dalvik_opcode,
1362                               RegLocation rl_src, RegLocation rl_dest, int lit) {
1363  if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
1364    return false;
1365  }
1366  // No divide instruction for Arm, so check for more special cases
1367  if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
1368    return SmallLiteralDivide(dalvik_opcode, rl_src, rl_dest, lit);
1369  }
1370  int k = LowestSetBit(lit);
1371  if (k >= 30) {
1372    // Avoid special cases.
1373    return false;
1374  }
1375  bool div = (dalvik_opcode == Instruction::DIV_INT_LIT8 ||
1376      dalvik_opcode == Instruction::DIV_INT_LIT16);
1377  rl_src = LoadValue(rl_src, kCoreReg);
1378  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1379  if (div) {
1380    int t_reg = AllocTemp();
1381    if (lit == 2) {
1382      // Division by 2 is by far the most common division by constant.
1383      OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k);
1384      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
1385      OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
1386    } else {
1387      OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31);
1388      OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
1389      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
1390      OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
1391    }
1392  } else {
1393    int t_reg1 = AllocTemp();
1394    int t_reg2 = AllocTemp();
1395    if (lit == 2) {
1396      OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
1397      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
1398      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
1399      OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
1400    } else {
1401      OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31);
1402      OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
1403      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
1404      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
1405      OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
1406    }
1407  }
1408  StoreValue(rl_dest, rl_result);
1409  return true;
1410}
1411
1412// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
1413// and store the result in 'rl_dest'.
1414bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
1415  // Can we simplify this multiplication?
1416  bool power_of_two = false;
1417  bool pop_count_le2 = false;
1418  bool power_of_two_minus_one = false;
1419  if (lit < 2) {
1420    // Avoid special cases.
1421    return false;
1422  } else if (IsPowerOfTwo(lit)) {
1423    power_of_two = true;
1424  } else if (IsPopCountLE2(lit)) {
1425    pop_count_le2 = true;
1426  } else if (IsPowerOfTwo(lit + 1)) {
1427    power_of_two_minus_one = true;
1428  } else {
1429    return false;
1430  }
1431  rl_src = LoadValue(rl_src, kCoreReg);
1432  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1433  if (power_of_two) {
1434    // Shift.
1435    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
1436  } else if (pop_count_le2) {
1437    // Shift and add and shift.
1438    int first_bit = LowestSetBit(lit);
1439    int second_bit = LowestSetBit(lit ^ (1 << first_bit));
1440    GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
1441  } else {
1442    // Reverse subtract: (src << (shift + 1)) - src.
1443    DCHECK(power_of_two_minus_one);
1444    // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
1445    int t_reg = AllocTemp();
1446    OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
1447    OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
1448  }
1449  StoreValue(rl_dest, rl_result);
1450  return true;
1451}
1452
1453void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
1454                               int lit) {
1455  RegLocation rl_result;
1456  OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
1457  int shift_op = false;
1458  bool is_div = false;
1459
1460  switch (opcode) {
1461    case Instruction::RSUB_INT_LIT8:
1462    case Instruction::RSUB_INT: {
1463      rl_src = LoadValue(rl_src, kCoreReg);
1464      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1465      if (cu_->instruction_set == kThumb2) {
1466        OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
1467      } else {
1468        OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
1469        OpRegImm(kOpAdd, rl_result.low_reg, lit);
1470      }
1471      StoreValue(rl_dest, rl_result);
1472      return;
1473    }
1474
1475    case Instruction::SUB_INT:
1476    case Instruction::SUB_INT_2ADDR:
1477      lit = -lit;
1478      // Intended fallthrough
1479    case Instruction::ADD_INT:
1480    case Instruction::ADD_INT_2ADDR:
1481    case Instruction::ADD_INT_LIT8:
1482    case Instruction::ADD_INT_LIT16:
1483      op = kOpAdd;
1484      break;
1485    case Instruction::MUL_INT:
1486    case Instruction::MUL_INT_2ADDR:
1487    case Instruction::MUL_INT_LIT8:
1488    case Instruction::MUL_INT_LIT16: {
1489      if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
1490        return;
1491      }
1492      op = kOpMul;
1493      break;
1494    }
1495    case Instruction::AND_INT:
1496    case Instruction::AND_INT_2ADDR:
1497    case Instruction::AND_INT_LIT8:
1498    case Instruction::AND_INT_LIT16:
1499      op = kOpAnd;
1500      break;
1501    case Instruction::OR_INT:
1502    case Instruction::OR_INT_2ADDR:
1503    case Instruction::OR_INT_LIT8:
1504    case Instruction::OR_INT_LIT16:
1505      op = kOpOr;
1506      break;
1507    case Instruction::XOR_INT:
1508    case Instruction::XOR_INT_2ADDR:
1509    case Instruction::XOR_INT_LIT8:
1510    case Instruction::XOR_INT_LIT16:
1511      op = kOpXor;
1512      break;
1513    case Instruction::SHL_INT_LIT8:
1514    case Instruction::SHL_INT:
1515    case Instruction::SHL_INT_2ADDR:
1516      lit &= 31;
1517      shift_op = true;
1518      op = kOpLsl;
1519      break;
1520    case Instruction::SHR_INT_LIT8:
1521    case Instruction::SHR_INT:
1522    case Instruction::SHR_INT_2ADDR:
1523      lit &= 31;
1524      shift_op = true;
1525      op = kOpAsr;
1526      break;
1527    case Instruction::USHR_INT_LIT8:
1528    case Instruction::USHR_INT:
1529    case Instruction::USHR_INT_2ADDR:
1530      lit &= 31;
1531      shift_op = true;
1532      op = kOpLsr;
1533      break;
1534
1535    case Instruction::DIV_INT:
1536    case Instruction::DIV_INT_2ADDR:
1537    case Instruction::DIV_INT_LIT8:
1538    case Instruction::DIV_INT_LIT16:
1539    case Instruction::REM_INT:
1540    case Instruction::REM_INT_2ADDR:
1541    case Instruction::REM_INT_LIT8:
1542    case Instruction::REM_INT_LIT16: {
1543      if (lit == 0) {
1544        GenImmedCheck(kCondAl, 0, 0, kThrowDivZero);
1545        return;
1546      }
1547      if (HandleEasyDivide(opcode, rl_src, rl_dest, lit)) {
1548        return;
1549      }
1550      if ((opcode == Instruction::DIV_INT_LIT8) ||
1551          (opcode == Instruction::DIV_INT) ||
1552          (opcode == Instruction::DIV_INT_2ADDR) ||
1553          (opcode == Instruction::DIV_INT_LIT16)) {
1554        is_div = true;
1555      } else {
1556        is_div = false;
1557      }
1558      if (cu_->instruction_set == kMips) {
1559        rl_src = LoadValue(rl_src, kCoreReg);
1560        rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
1561      } else {
1562        FlushAllRegs();   /* Everything to home location */
1563        LoadValueDirectFixed(rl_src, TargetReg(kArg0));
1564        Clobber(TargetReg(kArg0));
1565        ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
1566        CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
1567        if (is_div)
1568          rl_result = GetReturn(false);
1569        else
1570          rl_result = GetReturnAlt();
1571      }
1572      StoreValue(rl_dest, rl_result);
1573      return;
1574    }
1575    default:
1576      LOG(FATAL) << "Unexpected opcode " << opcode;
1577  }
1578  rl_src = LoadValue(rl_src, kCoreReg);
1579  rl_result = EvalLoc(rl_dest, kCoreReg, true);
1580  // Avoid shifts by literal 0 - no support in Thumb.  Change to copy
1581  if (shift_op && (lit == 0)) {
1582    OpRegCopy(rl_result.low_reg, rl_src.low_reg);
1583  } else {
1584    OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit);
1585  }
1586  StoreValue(rl_dest, rl_result);
1587}
1588
1589void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
1590                             RegLocation rl_src1, RegLocation rl_src2) {
1591  RegLocation rl_result;
1592  OpKind first_op = kOpBkpt;
1593  OpKind second_op = kOpBkpt;
1594  bool call_out = false;
1595  bool check_zero = false;
1596  ThreadOffset func_offset(-1);
1597  int ret_reg = TargetReg(kRet0);
1598
1599  switch (opcode) {
1600    case Instruction::NOT_LONG:
1601      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1602      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1603      // Check for destructive overlap
1604      if (rl_result.low_reg == rl_src2.high_reg) {
1605        int t_reg = AllocTemp();
1606        OpRegCopy(t_reg, rl_src2.high_reg);
1607        OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
1608        OpRegReg(kOpMvn, rl_result.high_reg, t_reg);
1609        FreeTemp(t_reg);
1610      } else {
1611        OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
1612        OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg);
1613      }
1614      StoreValueWide(rl_dest, rl_result);
1615      return;
1616    case Instruction::ADD_LONG:
1617    case Instruction::ADD_LONG_2ADDR:
1618      if (cu_->instruction_set != kThumb2) {
1619        GenAddLong(rl_dest, rl_src1, rl_src2);
1620        return;
1621      }
1622      first_op = kOpAdd;
1623      second_op = kOpAdc;
1624      break;
1625    case Instruction::SUB_LONG:
1626    case Instruction::SUB_LONG_2ADDR:
1627      if (cu_->instruction_set != kThumb2) {
1628        GenSubLong(rl_dest, rl_src1, rl_src2);
1629        return;
1630      }
1631      first_op = kOpSub;
1632      second_op = kOpSbc;
1633      break;
1634    case Instruction::MUL_LONG:
1635    case Instruction::MUL_LONG_2ADDR:
1636      if (cu_->instruction_set == kThumb2) {
1637        GenMulLong(rl_dest, rl_src1, rl_src2);
1638        return;
1639      } else {
1640        call_out = true;
1641        ret_reg = TargetReg(kRet0);
1642        func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
1643      }
1644      break;
1645    case Instruction::DIV_LONG:
1646    case Instruction::DIV_LONG_2ADDR:
1647      call_out = true;
1648      check_zero = true;
1649      ret_reg = TargetReg(kRet0);
1650      func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv);
1651      break;
1652    case Instruction::REM_LONG:
1653    case Instruction::REM_LONG_2ADDR:
1654      call_out = true;
1655      check_zero = true;
1656      func_offset = QUICK_ENTRYPOINT_OFFSET(pLdivmod);
1657      /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
1658      ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
1659      break;
1660    case Instruction::AND_LONG_2ADDR:
1661    case Instruction::AND_LONG:
1662      if (cu_->instruction_set == kX86) {
1663        return GenAndLong(rl_dest, rl_src1, rl_src2);
1664      }
1665      first_op = kOpAnd;
1666      second_op = kOpAnd;
1667      break;
1668    case Instruction::OR_LONG:
1669    case Instruction::OR_LONG_2ADDR:
1670      if (cu_->instruction_set == kX86) {
1671        GenOrLong(rl_dest, rl_src1, rl_src2);
1672        return;
1673      }
1674      first_op = kOpOr;
1675      second_op = kOpOr;
1676      break;
1677    case Instruction::XOR_LONG:
1678    case Instruction::XOR_LONG_2ADDR:
1679      if (cu_->instruction_set == kX86) {
1680        GenXorLong(rl_dest, rl_src1, rl_src2);
1681        return;
1682      }
1683      first_op = kOpXor;
1684      second_op = kOpXor;
1685      break;
1686    case Instruction::NEG_LONG: {
1687      GenNegLong(rl_dest, rl_src2);
1688      return;
1689    }
1690    default:
1691      LOG(FATAL) << "Invalid long arith op";
1692  }
1693  if (!call_out) {
1694    GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
1695  } else {
1696    FlushAllRegs();   /* Send everything to home location */
1697    if (check_zero) {
1698      LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3));
1699      int r_tgt = CallHelperSetup(func_offset);
1700      GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3));
1701      LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1));
1702      // NOTE: callout here is not a safepoint
1703      CallHelper(r_tgt, func_offset, false /* not safepoint */);
1704    } else {
1705      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
1706    }
1707    // Adjust return regs in to handle case of rem returning kArg2/kArg3
1708    if (ret_reg == TargetReg(kRet0))
1709      rl_result = GetReturnWide(false);
1710    else
1711      rl_result = GetReturnWideAlt();
1712    StoreValueWide(rl_dest, rl_result);
1713  }
1714}
1715
1716void Mir2Lir::GenConversionCall(ThreadOffset func_offset,
1717                                RegLocation rl_dest, RegLocation rl_src) {
1718  /*
1719   * Don't optimize the register usage since it calls out to support
1720   * functions
1721   */
1722  FlushAllRegs();   /* Send everything to home location */
1723  if (rl_src.wide) {
1724    LoadValueDirectWideFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
1725                             rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
1726  } else {
1727    LoadValueDirectFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
1728  }
1729  CallRuntimeHelperRegLocation(func_offset, rl_src, false);
1730  if (rl_dest.wide) {
1731    RegLocation rl_result;
1732    rl_result = GetReturnWide(rl_dest.fp);
1733    StoreValueWide(rl_dest, rl_result);
1734  } else {
1735    RegLocation rl_result;
1736    rl_result = GetReturn(rl_dest.fp);
1737    StoreValue(rl_dest, rl_result);
1738  }
1739}
1740
1741/* Check if we need to check for pending suspend request */
1742void Mir2Lir::GenSuspendTest(int opt_flags) {
1743  if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
1744    return;
1745  }
1746  FlushAllRegs();
1747  LIR* branch = OpTestSuspend(NULL);
1748  LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
1749  LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
1750                       reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
1751  branch->target = target;
1752  suspend_launchpads_.Insert(target);
1753}
1754
1755/* Check if we need to check for pending suspend request */
1756void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
1757  if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
1758    OpUnconditionalBranch(target);
1759    return;
1760  }
1761  OpTestSuspend(target);
1762  LIR* launch_pad =
1763      RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
1764             reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
1765  FlushAllRegs();
1766  OpUnconditionalBranch(launch_pad);
1767  suspend_launchpads_.Insert(launch_pad);
1768}
1769
1770}  // namespace art
1771