gen_common.cc revision 43a065ce1dda78e963868f9753a6e263721af927
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include "dex/compiler_ir.h"
17#include "dex/compiler_internals.h"
18#include "dex/quick/arm/arm_lir.h"
19#include "dex/quick/mir_to_lir-inl.h"
20#include "entrypoints/quick/quick_entrypoints.h"
21#include "mirror/array.h"
22#include "mirror/object-inl.h"
23#include "verifier/method_verifier.h"
24#include <functional>
25
26namespace art {
27
28/*
29 * This source files contains "gen" codegen routines that should
30 * be applicable to most targets.  Only mid-level support utilities
31 * and "op" calls may be used here.
32 */
33
34/*
35 * Generate a kPseudoBarrier marker to indicate the boundary of special
36 * blocks.
37 */
38void Mir2Lir::GenBarrier() {
39  LIR* barrier = NewLIR0(kPseudoBarrier);
40  /* Mark all resources as being clobbered */
41  DCHECK(!barrier->flags.use_def_invalid);
42  barrier->u.m.def_mask = ENCODE_ALL;
43}
44
45// TODO: need to do some work to split out targets with
46// condition codes and those without
47LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) {
48  DCHECK_NE(cu_->instruction_set, kMips);
49  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
50  LIR* branch = OpCondBranch(c_code, tgt);
51  // Remember branch target - will process later
52  throw_launchpads_.Insert(tgt);
53  return branch;
54}
55
56LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind) {
57  LIR* tgt;
58  LIR* branch;
59  if (c_code == kCondAl) {
60    tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, RegStorage::kInvalidRegVal,
61                 imm_val);
62    branch = OpUnconditionalBranch(tgt);
63  } else {
64    tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg.GetReg(), imm_val);
65    branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
66  }
67  // Remember branch target - will process later
68  throw_launchpads_.Insert(tgt);
69  return branch;
70}
71
72
73/* Perform null-check on a register.  */
74LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
75  if (Runtime::Current()->ExplicitNullChecks()) {
76    return GenExplicitNullCheck(m_reg, opt_flags);
77  }
78  return nullptr;
79}
80
81/* Perform an explicit null-check on a register.  */
82LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
83  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
84    return NULL;
85  }
86  return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
87}
88
89void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
90  if (!Runtime::Current()->ExplicitNullChecks()) {
91    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
92      return;
93    }
94    MarkSafepointPC(last_lir_insn_);
95  }
96}
97
98void Mir2Lir::MarkPossibleStackOverflowException() {
99  if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
100    MarkSafepointPC(last_lir_insn_);
101  }
102}
103
104void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
105  if (!Runtime::Current()->ExplicitNullChecks()) {
106    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
107      return;
108    }
109    // Force an implicit null check by performing a memory operation (load) from the given
110    // register with offset 0.  This will cause a signal if the register contains 0 (null).
111    RegStorage tmp = AllocTemp();
112    // TODO: for Mips, would be best to use rZERO as the bogus register target.
113    LIR* load = LoadWordDisp(reg, 0, tmp);
114    FreeTemp(tmp);
115    MarkSafepointPC(load);
116  }
117}
118
119/* Perform check on two registers */
120LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2,
121                             ThrowKind kind) {
122  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1.GetReg(),
123                    reg2.GetReg());
124  LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
125  // Remember branch target - will process later
126  throw_launchpads_.Insert(tgt);
127  return branch;
128}
129
130void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
131                                  RegLocation rl_src2, LIR* taken,
132                                  LIR* fall_through) {
133  ConditionCode cond;
134  switch (opcode) {
135    case Instruction::IF_EQ:
136      cond = kCondEq;
137      break;
138    case Instruction::IF_NE:
139      cond = kCondNe;
140      break;
141    case Instruction::IF_LT:
142      cond = kCondLt;
143      break;
144    case Instruction::IF_GE:
145      cond = kCondGe;
146      break;
147    case Instruction::IF_GT:
148      cond = kCondGt;
149      break;
150    case Instruction::IF_LE:
151      cond = kCondLe;
152      break;
153    default:
154      cond = static_cast<ConditionCode>(0);
155      LOG(FATAL) << "Unexpected opcode " << opcode;
156  }
157
158  // Normalize such that if either operand is constant, src2 will be constant
159  if (rl_src1.is_const) {
160    RegLocation rl_temp = rl_src1;
161    rl_src1 = rl_src2;
162    rl_src2 = rl_temp;
163    cond = FlipComparisonOrder(cond);
164  }
165
166  rl_src1 = LoadValue(rl_src1, kCoreReg);
167  // Is this really an immediate comparison?
168  if (rl_src2.is_const) {
169    // If it's already live in a register or not easily materialized, just keep going
170    RegLocation rl_temp = UpdateLoc(rl_src2);
171    if ((rl_temp.location == kLocDalvikFrame) &&
172        InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
173      // OK - convert this to a compare immediate and branch
174      OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
175      return;
176    }
177  }
178  rl_src2 = LoadValue(rl_src2, kCoreReg);
179  OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
180}
181
182void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
183                                      LIR* fall_through) {
184  ConditionCode cond;
185  rl_src = LoadValue(rl_src, kCoreReg);
186  switch (opcode) {
187    case Instruction::IF_EQZ:
188      cond = kCondEq;
189      break;
190    case Instruction::IF_NEZ:
191      cond = kCondNe;
192      break;
193    case Instruction::IF_LTZ:
194      cond = kCondLt;
195      break;
196    case Instruction::IF_GEZ:
197      cond = kCondGe;
198      break;
199    case Instruction::IF_GTZ:
200      cond = kCondGt;
201      break;
202    case Instruction::IF_LEZ:
203      cond = kCondLe;
204      break;
205    default:
206      cond = static_cast<ConditionCode>(0);
207      LOG(FATAL) << "Unexpected opcode " << opcode;
208  }
209  OpCmpImmBranch(cond, rl_src.reg, 0, taken);
210}
211
212void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
213  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
214  if (rl_src.location == kLocPhysReg) {
215    OpRegCopy(rl_result.reg, rl_src.reg);
216  } else {
217    LoadValueDirect(rl_src, rl_result.reg.GetLow());
218  }
219  OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31);
220  StoreValueWide(rl_dest, rl_result);
221}
222
223void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
224                              RegLocation rl_src) {
225  rl_src = LoadValue(rl_src, kCoreReg);
226  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
227  OpKind op = kOpInvalid;
228  switch (opcode) {
229    case Instruction::INT_TO_BYTE:
230      op = kOp2Byte;
231      break;
232    case Instruction::INT_TO_SHORT:
233       op = kOp2Short;
234       break;
235    case Instruction::INT_TO_CHAR:
236       op = kOp2Char;
237       break;
238    default:
239      LOG(ERROR) << "Bad int conversion type";
240  }
241  OpRegReg(op, rl_result.reg, rl_src.reg);
242  StoreValue(rl_dest, rl_result);
243}
244
245/*
246 * Let helper function take care of everything.  Will call
247 * Array::AllocFromCode(type_idx, method, count);
248 * Note: AllocFromCode will handle checks for errNegativeArraySize.
249 */
250void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
251                          RegLocation rl_src) {
252  FlushAllRegs();  /* Everything to home location */
253  ThreadOffset<4> func_offset(-1);
254  const DexFile* dex_file = cu_->dex_file;
255  CompilerDriver* driver = cu_->compiler_driver;
256  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file,
257                                                       type_idx)) {
258    bool is_type_initialized;  // Ignored as an array does not have an initializer.
259    bool use_direct_type_ptr;
260    uintptr_t direct_type_ptr;
261    if (kEmbedClassInCode &&
262        driver->CanEmbedTypeInCode(*dex_file, type_idx,
263                                   &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) {
264      // The fast path.
265      if (!use_direct_type_ptr) {
266        LoadClassType(type_idx, kArg0);
267        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved);
268        CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true);
269      } else {
270        // Use the direct pointer.
271        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved);
272        CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true);
273      }
274    } else {
275      // The slow path.
276      DCHECK_EQ(func_offset.Int32Value(), -1);
277      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArray);
278      CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
279    }
280    DCHECK_NE(func_offset.Int32Value(), -1);
281  } else {
282    func_offset= QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayWithAccessCheck);
283    CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
284  }
285  RegLocation rl_result = GetReturn(false);
286  StoreValue(rl_dest, rl_result);
287}
288
289/*
290 * Similar to GenNewArray, but with post-allocation initialization.
291 * Verifier guarantees we're dealing with an array class.  Current
292 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
293 * Current code also throws internal unimp if not 'L', '[' or 'I'.
294 */
295void Mir2Lir::GenFilledNewArray(CallInfo* info) {
296  int elems = info->num_arg_words;
297  int type_idx = info->index;
298  FlushAllRegs();  /* Everything to home location */
299  ThreadOffset<4> func_offset(-1);
300  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
301                                                       type_idx)) {
302    func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArray);
303  } else {
304    func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArrayWithAccessCheck);
305  }
306  CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
307  FreeTemp(TargetReg(kArg2));
308  FreeTemp(TargetReg(kArg1));
309  /*
310   * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
311   * return region.  Because AllocFromCode placed the new array
312   * in kRet0, we'll just lock it into place.  When debugger support is
313   * added, it may be necessary to additionally copy all return
314   * values to a home location in thread-local storage
315   */
316  LockTemp(TargetReg(kRet0));
317
318  // TODO: use the correct component size, currently all supported types
319  // share array alignment with ints (see comment at head of function)
320  size_t component_size = sizeof(int32_t);
321
322  // Having a range of 0 is legal
323  if (info->is_range && (elems > 0)) {
324    /*
325     * Bit of ugliness here.  We're going generate a mem copy loop
326     * on the register range, but it is possible that some regs
327     * in the range have been promoted.  This is unlikely, but
328     * before generating the copy, we'll just force a flush
329     * of any regs in the source range that have been promoted to
330     * home location.
331     */
332    for (int i = 0; i < elems; i++) {
333      RegLocation loc = UpdateLoc(info->args[i]);
334      if (loc.location == kLocPhysReg) {
335        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
336      }
337    }
338    /*
339     * TUNING note: generated code here could be much improved, but
340     * this is an uncommon operation and isn't especially performance
341     * critical.
342     */
343    RegStorage r_src = AllocTemp();
344    RegStorage r_dst = AllocTemp();
345    RegStorage r_idx = AllocTemp();
346    RegStorage r_val;
347    switch (cu_->instruction_set) {
348      case kThumb2:
349        r_val = TargetReg(kLr);
350        break;
351      case kX86:
352        FreeTemp(TargetReg(kRet0));
353        r_val = AllocTemp();
354        break;
355      case kMips:
356        r_val = AllocTemp();
357        break;
358      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
359    }
360    // Set up source pointer
361    RegLocation rl_first = info->args[0];
362    OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low));
363    // Set up the target pointer
364    OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0),
365                mirror::Array::DataOffset(component_size).Int32Value());
366    // Set up the loop counter (known to be > 0)
367    LoadConstant(r_idx, elems - 1);
368    // Generate the copy loop.  Going backwards for convenience
369    LIR* target = NewLIR0(kPseudoTargetLabel);
370    // Copy next element
371    LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
372    StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
373    FreeTemp(r_val);
374    OpDecAndBranch(kCondGe, r_idx, target);
375    if (cu_->instruction_set == kX86) {
376      // Restore the target pointer
377      OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst,
378                  -mirror::Array::DataOffset(component_size).Int32Value());
379    }
380  } else if (!info->is_range) {
381    // TUNING: interleave
382    for (int i = 0; i < elems; i++) {
383      RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
384      StoreBaseDisp(TargetReg(kRet0),
385                    mirror::Array::DataOffset(component_size).Int32Value() + i * 4,
386                    rl_arg.reg, kWord);
387      // If the LoadValue caused a temp to be allocated, free it
388      if (IsTemp(rl_arg.reg)) {
389        FreeTemp(rl_arg.reg);
390      }
391    }
392  }
393  if (info->result.location != kLocInvalid) {
394    StoreValue(info->result, GetReturn(false /* not fp */));
395  }
396}
397
398//
399// Slow path to ensure a class is initialized for sget/sput.
400//
401class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
402 public:
403  StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
404                      RegStorage r_base) :
405    LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit),
406               storage_index_(storage_index), r_base_(r_base) {
407  }
408
409  void Compile() {
410    LIR* unresolved_target = GenerateTargetLabel();
411    uninit_->target = unresolved_target;
412    m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage),
413                               storage_index_, true);
414    // Copy helper's result into r_base, a no-op on all but MIPS.
415    m2l_->OpRegCopy(r_base_,  m2l_->TargetReg(kRet0));
416
417    m2l_->OpUnconditionalBranch(cont_);
418  }
419
420 private:
421  LIR* const uninit_;
422  const int storage_index_;
423  const RegStorage r_base_;
424};
425
426void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
427                      bool is_object) {
428  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
429  cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
430  if (field_info.FastPut() && !SLOW_FIELD_PATH) {
431    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
432    RegStorage r_base;
433    if (field_info.IsReferrersClass()) {
434      // Fast path, static storage base is this method's class
435      RegLocation rl_method  = LoadCurrMethod();
436      r_base = AllocTemp();
437      LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
438      if (IsTemp(rl_method.reg)) {
439        FreeTemp(rl_method.reg);
440      }
441    } else {
442      // Medium path, static storage base in a different class which requires checks that the other
443      // class is initialized.
444      // TODO: remove initialized check now that we are initializing classes in the compiler driver.
445      DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
446      // May do runtime call so everything to home locations.
447      FlushAllRegs();
448      // Using fixed register to sync with possible call to runtime support.
449      RegStorage r_method = TargetReg(kArg1);
450      LockTemp(r_method);
451      LoadCurrMethodDirect(r_method);
452      r_base = TargetReg(kArg0);
453      LockTemp(r_base);
454      LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
455      LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
456                   sizeof(int32_t*) * field_info.StorageIndex(), r_base);
457      // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
458      if (!field_info.IsInitialized() &&
459          (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
460        // Check if r_base is NULL or a not yet initialized class.
461
462        // The slow path is invoked if the r_base is NULL or the class pointed
463        // to by it is not initialized.
464        LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
465        RegStorage r_tmp = TargetReg(kArg2);
466        LockTemp(r_tmp);
467        LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
468                                          mirror::Class::StatusOffset().Int32Value(),
469                                          mirror::Class::kStatusInitialized, NULL);
470        LIR* cont = NewLIR0(kPseudoTargetLabel);
471
472        AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
473                                                     field_info.StorageIndex(), r_base));
474
475        FreeTemp(r_tmp);
476      }
477      FreeTemp(r_method);
478    }
479    // rBase now holds static storage base
480    if (is_long_or_double) {
481      rl_src = LoadValueWide(rl_src, kAnyReg);
482    } else {
483      rl_src = LoadValue(rl_src, kAnyReg);
484    }
485    if (field_info.IsVolatile()) {
486      // There might have been a store before this volatile one so insert StoreStore barrier.
487      GenMemBarrier(kStoreStore);
488    }
489    if (is_long_or_double) {
490      StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
491    } else {
492      StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
493    }
494    if (field_info.IsVolatile()) {
495      // A load might follow the volatile store so insert a StoreLoad barrier.
496      GenMemBarrier(kStoreLoad);
497    }
498    if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
499      MarkGCCard(rl_src.reg, r_base);
500    }
501    FreeTemp(r_base);
502  } else {
503    FlushAllRegs();  // Everything to home locations
504    ThreadOffset<4> setter_offset =
505        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Static)
506                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjStatic)
507                                       : QUICK_ENTRYPOINT_OFFSET(4, pSet32Static));
508    CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true);
509  }
510}
511
512void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
513                      bool is_long_or_double, bool is_object) {
514  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
515  cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
516  if (field_info.FastGet() && !SLOW_FIELD_PATH) {
517    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
518    RegStorage r_base;
519    if (field_info.IsReferrersClass()) {
520      // Fast path, static storage base is this method's class
521      RegLocation rl_method  = LoadCurrMethod();
522      r_base = AllocTemp();
523      LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
524    } else {
525      // Medium path, static storage base in a different class which requires checks that the other
526      // class is initialized
527      DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
528      // May do runtime call so everything to home locations.
529      FlushAllRegs();
530      // Using fixed register to sync with possible call to runtime support.
531      RegStorage r_method = TargetReg(kArg1);
532      LockTemp(r_method);
533      LoadCurrMethodDirect(r_method);
534      r_base = TargetReg(kArg0);
535      LockTemp(r_base);
536      LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
537      LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
538                   sizeof(int32_t*) * field_info.StorageIndex(), r_base);
539      // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
540      if (!field_info.IsInitialized() &&
541          (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
542        // Check if r_base is NULL or a not yet initialized class.
543
544        // The slow path is invoked if the r_base is NULL or the class pointed
545        // to by it is not initialized.
546        LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
547        RegStorage r_tmp = TargetReg(kArg2);
548        LockTemp(r_tmp);
549        LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
550                                          mirror::Class::StatusOffset().Int32Value(),
551                                          mirror::Class::kStatusInitialized, NULL);
552        LIR* cont = NewLIR0(kPseudoTargetLabel);
553
554        AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
555                                                     field_info.StorageIndex(), r_base));
556
557        FreeTemp(r_tmp);
558      }
559      FreeTemp(r_method);
560    }
561    // r_base now holds static storage base
562    RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
563
564    if (is_long_or_double) {
565      LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG);
566    } else {
567      LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
568    }
569    FreeTemp(r_base);
570
571    if (field_info.IsVolatile()) {
572      // Without context sensitive analysis, we must issue the most conservative barriers.
573      // In this case, either a load or store may follow so we issue both barriers.
574      GenMemBarrier(kLoadLoad);
575      GenMemBarrier(kLoadStore);
576    }
577
578    if (is_long_or_double) {
579      StoreValueWide(rl_dest, rl_result);
580    } else {
581      StoreValue(rl_dest, rl_result);
582    }
583  } else {
584    FlushAllRegs();  // Everything to home locations
585    ThreadOffset<4> getterOffset =
586        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Static)
587                          :(is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjStatic)
588                                      : QUICK_ENTRYPOINT_OFFSET(4, pGet32Static));
589    CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true);
590    if (is_long_or_double) {
591      RegLocation rl_result = GetReturnWide(rl_dest.fp);
592      StoreValueWide(rl_dest, rl_result);
593    } else {
594      RegLocation rl_result = GetReturn(rl_dest.fp);
595      StoreValue(rl_dest, rl_result);
596    }
597  }
598}
599
600// Generate code for all slow paths.
601void Mir2Lir::HandleSlowPaths() {
602  int n = slow_paths_.Size();
603  for (int i = 0; i < n; ++i) {
604    LIRSlowPath* slowpath = slow_paths_.Get(i);
605    slowpath->Compile();
606  }
607  slow_paths_.Reset();
608}
609
610void Mir2Lir::HandleSuspendLaunchPads() {
611  int num_elems = suspend_launchpads_.Size();
612  ThreadOffset<4> helper_offset = QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend);
613  for (int i = 0; i < num_elems; i++) {
614    ResetRegPool();
615    ResetDefTracking();
616    LIR* lab = suspend_launchpads_.Get(i);
617    LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0]));
618    current_dalvik_offset_ = lab->operands[1];
619    AppendLIR(lab);
620    RegStorage r_tgt = CallHelperSetup(helper_offset);
621    CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
622    OpUnconditionalBranch(resume_lab);
623  }
624}
625
626void Mir2Lir::HandleThrowLaunchPads() {
627  int num_elems = throw_launchpads_.Size();
628  for (int i = 0; i < num_elems; i++) {
629    ResetRegPool();
630    ResetDefTracking();
631    LIR* lab = throw_launchpads_.Get(i);
632    current_dalvik_offset_ = lab->operands[1];
633    AppendLIR(lab);
634    ThreadOffset<4> func_offset(-1);
635    int v1 = lab->operands[2];
636    int v2 = lab->operands[3];
637    const bool target_x86 = cu_->instruction_set == kX86;
638    switch (lab->operands[0]) {
639      case kThrowNullPointer:
640        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer);
641        break;
642      case kThrowConstantArrayBounds:  // v1 is length reg (for Arm/Mips), v2 constant index
643        // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
644        if (target_x86) {
645          OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v1),
646                   mirror::Array::LengthOffset().Int32Value());
647        } else {
648          OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v1));
649        }
650        // Make sure the following LoadConstant doesn't mess with kArg1.
651        LockTemp(TargetReg(kArg1));
652        LoadConstant(TargetReg(kArg0), v2);
653        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds);
654        break;
655      case kThrowArrayBounds:
656        // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
657        if (v2 != TargetReg(kArg0).GetReg()) {
658          OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
659          if (target_x86) {
660            // x86 leaves the array pointer in v2, so load the array length that the handler expects
661            OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
662                     mirror::Array::LengthOffset().Int32Value());
663          } else {
664            OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
665          }
666        } else {
667          if (v1 == TargetReg(kArg1).GetReg()) {
668            // Swap v1 and v2, using kArg2 as a temp
669            OpRegCopy(TargetReg(kArg2), RegStorage::Solo32(v1));
670            if (target_x86) {
671              // x86 leaves the array pointer in v2; load the array length that the handler expects
672              OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
673                       mirror::Array::LengthOffset().Int32Value());
674            } else {
675              OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
676            }
677            OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
678          } else {
679            if (target_x86) {
680              // x86 leaves the array pointer in v2; load the array length that the handler expects
681              OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
682                       mirror::Array::LengthOffset().Int32Value());
683            } else {
684              OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
685            }
686            OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
687          }
688        }
689        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds);
690        break;
691      case kThrowDivZero:
692        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero);
693        break;
694      case kThrowNoSuchMethod:
695        OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
696        func_offset =
697          QUICK_ENTRYPOINT_OFFSET(4, pThrowNoSuchMethod);
698        break;
699      default:
700        LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
701    }
702    ClobberCallerSave();
703    RegStorage r_tgt = CallHelperSetup(func_offset);
704    CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */);
705  }
706}
707
708void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
709                      RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
710                      bool is_object) {
711  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
712  cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
713  if (field_info.FastGet() && !SLOW_FIELD_PATH) {
714    RegLocation rl_result;
715    RegisterClass reg_class = oat_reg_class_by_size(size);
716    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
717    rl_obj = LoadValue(rl_obj, kCoreReg);
718    if (is_long_or_double) {
719      DCHECK(rl_dest.wide);
720      GenNullCheck(rl_obj.reg, opt_flags);
721      if (cu_->instruction_set == kX86) {
722        rl_result = EvalLoc(rl_dest, reg_class, true);
723        // FIXME?  duplicate null check?
724        GenNullCheck(rl_obj.reg, opt_flags);
725        LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg,
726                         rl_obj.s_reg_low);
727        MarkPossibleNullPointerException(opt_flags);
728        if (field_info.IsVolatile()) {
729          // Without context sensitive analysis, we must issue the most conservative barriers.
730          // In this case, either a load or store may follow so we issue both barriers.
731          GenMemBarrier(kLoadLoad);
732          GenMemBarrier(kLoadStore);
733        }
734      } else {
735        RegStorage reg_ptr = AllocTemp();
736        OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
737        rl_result = EvalLoc(rl_dest, reg_class, true);
738        LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG);
739        MarkPossibleNullPointerException(opt_flags);
740        if (field_info.IsVolatile()) {
741          // Without context sensitive analysis, we must issue the most conservative barriers.
742          // In this case, either a load or store may follow so we issue both barriers.
743          GenMemBarrier(kLoadLoad);
744          GenMemBarrier(kLoadStore);
745        }
746        FreeTemp(reg_ptr);
747      }
748      StoreValueWide(rl_dest, rl_result);
749    } else {
750      rl_result = EvalLoc(rl_dest, reg_class, true);
751      GenNullCheck(rl_obj.reg, opt_flags);
752      LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord,
753                   rl_obj.s_reg_low);
754      MarkPossibleNullPointerException(opt_flags);
755      if (field_info.IsVolatile()) {
756        // Without context sensitive analysis, we must issue the most conservative barriers.
757        // In this case, either a load or store may follow so we issue both barriers.
758        GenMemBarrier(kLoadLoad);
759        GenMemBarrier(kLoadStore);
760      }
761      StoreValue(rl_dest, rl_result);
762    }
763  } else {
764    ThreadOffset<4> getterOffset =
765        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Instance)
766                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjInstance)
767                                       : QUICK_ENTRYPOINT_OFFSET(4, pGet32Instance));
768    CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true);
769    if (is_long_or_double) {
770      RegLocation rl_result = GetReturnWide(rl_dest.fp);
771      StoreValueWide(rl_dest, rl_result);
772    } else {
773      RegLocation rl_result = GetReturn(rl_dest.fp);
774      StoreValue(rl_dest, rl_result);
775    }
776  }
777}
778
779void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
780                      RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
781                      bool is_object) {
782  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
783  cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
784  if (field_info.FastPut() && !SLOW_FIELD_PATH) {
785    RegisterClass reg_class = oat_reg_class_by_size(size);
786    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
787    rl_obj = LoadValue(rl_obj, kCoreReg);
788    if (is_long_or_double) {
789      rl_src = LoadValueWide(rl_src, kAnyReg);
790      GenNullCheck(rl_obj.reg, opt_flags);
791      RegStorage reg_ptr = AllocTemp();
792      OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
793      if (field_info.IsVolatile()) {
794        // There might have been a store before this volatile one so insert StoreStore barrier.
795        GenMemBarrier(kStoreStore);
796      }
797      StoreBaseDispWide(reg_ptr, 0, rl_src.reg);
798      MarkPossibleNullPointerException(opt_flags);
799      if (field_info.IsVolatile()) {
800        // A load might follow the volatile store so insert a StoreLoad barrier.
801        GenMemBarrier(kStoreLoad);
802      }
803      FreeTemp(reg_ptr);
804    } else {
805      rl_src = LoadValue(rl_src, reg_class);
806      GenNullCheck(rl_obj.reg, opt_flags);
807      if (field_info.IsVolatile()) {
808        // There might have been a store before this volatile one so insert StoreStore barrier.
809        GenMemBarrier(kStoreStore);
810      }
811      StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord);
812      MarkPossibleNullPointerException(opt_flags);
813      if (field_info.IsVolatile()) {
814        // A load might follow the volatile store so insert a StoreLoad barrier.
815        GenMemBarrier(kStoreLoad);
816      }
817      if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
818        MarkGCCard(rl_src.reg, rl_obj.reg);
819      }
820    }
821  } else {
822    ThreadOffset<4> setter_offset =
823        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Instance)
824                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjInstance)
825                                       : QUICK_ENTRYPOINT_OFFSET(4, pSet32Instance));
826    CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(),
827                                               rl_obj, rl_src, true);
828  }
829}
830
831void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
832                             RegLocation rl_src) {
833  bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK);
834  bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) &&
835      (opt_flags & MIR_IGNORE_NULL_CHECK));
836  ThreadOffset<4> helper = needs_range_check
837      ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithNullAndBoundCheck)
838                          : QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithBoundCheck))
839      : QUICK_ENTRYPOINT_OFFSET(4, pAputObject);
840  CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true);
841}
842
843void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
844  RegLocation rl_method = LoadCurrMethod();
845  RegStorage res_reg = AllocTemp();
846  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
847  if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
848                                                   *cu_->dex_file,
849                                                   type_idx)) {
850    // Call out to helper which resolves type and verifies access.
851    // Resolved type returned in kRet0.
852    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
853                            type_idx, rl_method.reg, true);
854    RegLocation rl_result = GetReturn(false);
855    StoreValue(rl_dest, rl_result);
856  } else {
857    // We're don't need access checks, load type from dex cache
858    int32_t dex_cache_offset =
859        mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
860    LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg);
861    int32_t offset_of_type =
862        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
863                          * type_idx);
864    LoadWordDisp(res_reg, offset_of_type, rl_result.reg);
865    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
866        type_idx) || SLOW_TYPE_PATH) {
867      // Slow path, at runtime test if type is null and if so initialize
868      FlushAllRegs();
869      LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL);
870      LIR* cont = NewLIR0(kPseudoTargetLabel);
871
872      // Object to generate the slow path for class resolution.
873      class SlowPath : public LIRSlowPath {
874       public:
875        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
876                 const RegLocation& rl_method, const RegLocation& rl_result) :
877                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
878                   rl_method_(rl_method), rl_result_(rl_result) {
879        }
880
881        void Compile() {
882          GenerateTargetLabel();
883
884          m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_,
885                                        rl_method_.reg, true);
886          m2l_->OpRegCopy(rl_result_.reg,  m2l_->TargetReg(kRet0));
887
888          m2l_->OpUnconditionalBranch(cont_);
889        }
890
891       private:
892        const int type_idx_;
893        const RegLocation rl_method_;
894        const RegLocation rl_result_;
895      };
896
897      // Add to list for future.
898      AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result));
899
900      StoreValue(rl_dest, rl_result);
901     } else {
902      // Fast path, we're done - just store result
903      StoreValue(rl_dest, rl_result);
904    }
905  }
906}
907
908void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
909  /* NOTE: Most strings should be available at compile time */
910  int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
911                 (sizeof(mirror::String*) * string_idx);
912  if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
913      *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
914    // slow path, resolve string if not in dex cache
915    FlushAllRegs();
916    LockCallTemps();  // Using explicit registers
917
918    // If the Method* is already in a register, we can save a copy.
919    RegLocation rl_method = mir_graph_->GetMethodLoc();
920    RegStorage r_method;
921    if (rl_method.location == kLocPhysReg) {
922      // A temp would conflict with register use below.
923      DCHECK(!IsTemp(rl_method.reg));
924      r_method = rl_method.reg;
925    } else {
926      r_method = TargetReg(kArg2);
927      LoadCurrMethodDirect(r_method);
928    }
929    LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(),
930                 TargetReg(kArg0));
931
932    // Might call out to helper, which will return resolved string in kRet0
933    LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
934    if (cu_->instruction_set == kThumb2 ||
935        cu_->instruction_set == kMips) {
936      //  OpRegImm(kOpCmp, TargetReg(kRet0), 0);  // Is resolved?
937      LoadConstant(TargetReg(kArg1), string_idx);
938      LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL);
939      LIR* cont = NewLIR0(kPseudoTargetLabel);
940      GenBarrier();
941
942      // Object to generate the slow path for string resolution.
943      class SlowPath : public LIRSlowPath {
944       public:
945        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method) :
946          LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) {
947        }
948
949        void Compile() {
950          GenerateTargetLabel();
951
952          RegStorage r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pResolveString));
953
954          m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_);   // .eq
955          LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt);
956          m2l_->MarkSafepointPC(call_inst);
957          m2l_->FreeTemp(r_tgt);
958
959          m2l_->OpUnconditionalBranch(cont_);
960        }
961
962       private:
963         RegStorage r_method_;
964      };
965
966      // Add to list for future.
967      AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method));
968    } else {
969      DCHECK_EQ(cu_->instruction_set, kX86);
970      LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
971      LoadConstant(TargetReg(kArg1), string_idx);
972      CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), r_method, TargetReg(kArg1),
973                              true);
974      LIR* target = NewLIR0(kPseudoTargetLabel);
975      branch->target = target;
976    }
977    GenBarrier();
978    StoreValue(rl_dest, GetReturn(false));
979  } else {
980    RegLocation rl_method = LoadCurrMethod();
981    RegStorage res_reg = AllocTemp();
982    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
983    LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
984    LoadWordDisp(res_reg, offset_of_string, rl_result.reg);
985    StoreValue(rl_dest, rl_result);
986  }
987}
988
989/*
990 * Let helper function take care of everything.  Will
991 * call Class::NewInstanceFromCode(type_idx, method);
992 */
993void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
994  FlushAllRegs();  /* Everything to home location */
995  // alloc will always check for resolution, do we also need to verify
996  // access because the verifier was unable to?
997  ThreadOffset<4> func_offset(-1);
998  const DexFile* dex_file = cu_->dex_file;
999  CompilerDriver* driver = cu_->compiler_driver;
1000  if (driver->CanAccessInstantiableTypeWithoutChecks(
1001      cu_->method_idx, *dex_file, type_idx)) {
1002    bool is_type_initialized;
1003    bool use_direct_type_ptr;
1004    uintptr_t direct_type_ptr;
1005    if (kEmbedClassInCode &&
1006        driver->CanEmbedTypeInCode(*dex_file, type_idx,
1007                                   &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) {
1008      // The fast path.
1009      if (!use_direct_type_ptr) {
1010        LoadClassType(type_idx, kArg0);
1011        if (!is_type_initialized) {
1012          func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved);
1013          CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true);
1014        } else {
1015          func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized);
1016          CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true);
1017        }
1018      } else {
1019        // Use the direct pointer.
1020        if (!is_type_initialized) {
1021          func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved);
1022          CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
1023        } else {
1024          func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized);
1025          CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
1026        }
1027      }
1028    } else {
1029      // The slow path.
1030      DCHECK_EQ(func_offset.Int32Value(), -1);
1031      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObject);
1032      CallRuntimeHelperImmMethod(func_offset, type_idx, true);
1033    }
1034    DCHECK_NE(func_offset.Int32Value(), -1);
1035  } else {
1036    func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectWithAccessCheck);
1037    CallRuntimeHelperImmMethod(func_offset, type_idx, true);
1038  }
1039  RegLocation rl_result = GetReturn(false);
1040  StoreValue(rl_dest, rl_result);
1041}
1042
1043void Mir2Lir::GenThrow(RegLocation rl_src) {
1044  FlushAllRegs();
1045  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true);
1046}
1047
1048// For final classes there are no sub-classes to check and so we can answer the instance-of
1049// question with simple comparisons.
1050void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
1051                                 RegLocation rl_src) {
1052  // X86 has its own implementation.
1053  DCHECK_NE(cu_->instruction_set, kX86);
1054
1055  RegLocation object = LoadValue(rl_src, kCoreReg);
1056  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1057  RegStorage result_reg = rl_result.reg;
1058  if (result_reg == object.reg) {
1059    result_reg = AllocTypedTemp(false, kCoreReg);
1060  }
1061  LoadConstant(result_reg, 0);     // assume false
1062  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
1063
1064  RegStorage check_class = AllocTypedTemp(false, kCoreReg);
1065  RegStorage object_class = AllocTypedTemp(false, kCoreReg);
1066
1067  LoadCurrMethodDirect(check_class);
1068  if (use_declaring_class) {
1069    LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
1070    LoadWordDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
1071  } else {
1072    LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1073                 check_class);
1074    LoadWordDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
1075    int32_t offset_of_type =
1076      mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
1077      (sizeof(mirror::Class*) * type_idx);
1078    LoadWordDisp(check_class, offset_of_type, check_class);
1079  }
1080
1081  LIR* ne_branchover = NULL;
1082  if (cu_->instruction_set == kThumb2) {
1083    OpRegReg(kOpCmp, check_class, object_class);  // Same?
1084    OpIT(kCondEq, "");   // if-convert the test
1085    LoadConstant(result_reg, 1);     // .eq case - load true
1086    GenBarrier();
1087  } else {
1088    ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
1089    LoadConstant(result_reg, 1);     // eq case - load true
1090  }
1091  LIR* target = NewLIR0(kPseudoTargetLabel);
1092  null_branchover->target = target;
1093  if (ne_branchover != NULL) {
1094    ne_branchover->target = target;
1095  }
1096  FreeTemp(object_class);
1097  FreeTemp(check_class);
1098  if (IsTemp(result_reg)) {
1099    OpRegCopy(rl_result.reg, result_reg);
1100    FreeTemp(result_reg);
1101  }
1102  StoreValue(rl_dest, rl_result);
1103}
1104
1105void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
1106                                         bool type_known_abstract, bool use_declaring_class,
1107                                         bool can_assume_type_is_in_dex_cache,
1108                                         uint32_t type_idx, RegLocation rl_dest,
1109                                         RegLocation rl_src) {
1110  // X86 has its own implementation.
1111  DCHECK_NE(cu_->instruction_set, kX86);
1112
1113  FlushAllRegs();
1114  // May generate a call - use explicit registers
1115  LockCallTemps();
1116  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
1117  RegStorage class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
1118  if (needs_access_check) {
1119    // Check we have access to type_idx and if not throw IllegalAccessError,
1120    // returns Class* in kArg0
1121    CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
1122                         type_idx, true);
1123    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
1124    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1125  } else if (use_declaring_class) {
1126    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1127    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
1128                 class_reg);
1129  } else {
1130    // Load dex cache entry into class_reg (kArg2)
1131    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1132    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1133                 class_reg);
1134    int32_t offset_of_type =
1135        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
1136        * type_idx);
1137    LoadWordDisp(class_reg, offset_of_type, class_reg);
1138    if (!can_assume_type_is_in_dex_cache) {
1139      // Need to test presence of type in dex cache at runtime
1140      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
1141      // Not resolved
1142      // Call out to helper, which will return resolved type in kRet0
1143      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true);
1144      OpRegCopy(TargetReg(kArg2), TargetReg(kRet0));  // Align usage with fast path
1145      LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
1146      // Rejoin code paths
1147      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
1148      hop_branch->target = hop_target;
1149    }
1150  }
1151  /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
1152  RegLocation rl_result = GetReturn(false);
1153  if (cu_->instruction_set == kMips) {
1154    // On MIPS rArg0 != rl_result, place false in result if branch is taken.
1155    LoadConstant(rl_result.reg, 0);
1156  }
1157  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
1158
1159  /* load object->klass_ */
1160  DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1161  LoadWordDisp(TargetReg(kArg0),  mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
1162  /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
1163  LIR* branchover = NULL;
1164  if (type_known_final) {
1165    // rl_result == ref == null == 0.
1166    if (cu_->instruction_set == kThumb2) {
1167      OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
1168      OpIT(kCondEq, "E");   // if-convert the test
1169      LoadConstant(rl_result.reg, 1);     // .eq case - load true
1170      LoadConstant(rl_result.reg, 0);     // .ne case - load false
1171      GenBarrier();
1172    } else {
1173      LoadConstant(rl_result.reg, 0);     // ne case - load false
1174      branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
1175      LoadConstant(rl_result.reg, 1);     // eq case - load true
1176    }
1177  } else {
1178    if (cu_->instruction_set == kThumb2) {
1179      RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
1180      if (!type_known_abstract) {
1181      /* Uses conditional nullification */
1182        OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
1183        OpIT(kCondEq, "EE");   // if-convert the test
1184        LoadConstant(TargetReg(kArg0), 1);     // .eq case - load true
1185      }
1186      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
1187      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
1188      GenBarrier();
1189      FreeTemp(r_tgt);
1190    } else {
1191      if (!type_known_abstract) {
1192        /* Uses branchovers */
1193        LoadConstant(rl_result.reg, 1);     // assume true
1194        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
1195      }
1196      RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
1197      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
1198      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
1199      FreeTemp(r_tgt);
1200    }
1201  }
1202  // TODO: only clobber when type isn't final?
1203  ClobberCallerSave();
1204  /* branch targets here */
1205  LIR* target = NewLIR0(kPseudoTargetLabel);
1206  StoreValue(rl_dest, rl_result);
1207  branch1->target = target;
1208  if (branchover != NULL) {
1209    branchover->target = target;
1210  }
1211}
1212
1213void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
1214  bool type_known_final, type_known_abstract, use_declaring_class;
1215  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1216                                                                              *cu_->dex_file,
1217                                                                              type_idx,
1218                                                                              &type_known_final,
1219                                                                              &type_known_abstract,
1220                                                                              &use_declaring_class);
1221  bool can_assume_type_is_in_dex_cache = !needs_access_check &&
1222      cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
1223
1224  if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
1225    GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
1226  } else {
1227    GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
1228                               use_declaring_class, can_assume_type_is_in_dex_cache,
1229                               type_idx, rl_dest, rl_src);
1230  }
1231}
1232
1233void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) {
1234  bool type_known_final, type_known_abstract, use_declaring_class;
1235  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1236                                                                              *cu_->dex_file,
1237                                                                              type_idx,
1238                                                                              &type_known_final,
1239                                                                              &type_known_abstract,
1240                                                                              &use_declaring_class);
1241  // Note: currently type_known_final is unused, as optimizing will only improve the performance
1242  // of the exception throw path.
1243  DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
1244  if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) {
1245    // Verifier type analysis proved this check cast would never cause an exception.
1246    return;
1247  }
1248  FlushAllRegs();
1249  // May generate a call - use explicit registers
1250  LockCallTemps();
1251  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
1252  RegStorage class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
1253  if (needs_access_check) {
1254    // Check we have access to type_idx and if not throw IllegalAccessError,
1255    // returns Class* in kRet0
1256    // InitializeTypeAndVerifyAccess(idx, method)
1257    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
1258                            type_idx, TargetReg(kArg1), true);
1259    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
1260  } else if (use_declaring_class) {
1261    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
1262                 class_reg);
1263  } else {
1264    // Load dex cache entry into class_reg (kArg2)
1265    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1266                 class_reg);
1267    int32_t offset_of_type =
1268        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
1269        (sizeof(mirror::Class*) * type_idx);
1270    LoadWordDisp(class_reg, offset_of_type, class_reg);
1271    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
1272      // Need to test presence of type in dex cache at runtime
1273      LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL);
1274      LIR* cont = NewLIR0(kPseudoTargetLabel);
1275
1276      // Slow path to initialize the type.  Executed if the type is NULL.
1277      class SlowPath : public LIRSlowPath {
1278       public:
1279        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
1280                 const RegStorage class_reg) :
1281                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
1282                   class_reg_(class_reg) {
1283        }
1284
1285        void Compile() {
1286          GenerateTargetLabel();
1287
1288          // Call out to helper, which will return resolved type in kArg0
1289          // InitializeTypeFromCode(idx, method)
1290          m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_,
1291                                        m2l_->TargetReg(kArg1), true);
1292          m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0));  // Align usage with fast path
1293          m2l_->OpUnconditionalBranch(cont_);
1294        }
1295       public:
1296        const int type_idx_;
1297        const RegStorage class_reg_;
1298      };
1299
1300      AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg));
1301    }
1302  }
1303  // At this point, class_reg (kArg2) has class
1304  LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1305
1306  // Slow path for the case where the classes are not equal.  In this case we need
1307  // to call a helper function to do the check.
1308  class SlowPath : public LIRSlowPath {
1309   public:
1310    SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load):
1311               LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) {
1312    }
1313
1314    void Compile() {
1315      GenerateTargetLabel();
1316
1317      if (load_) {
1318        m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
1319                           m2l_->TargetReg(kArg1));
1320      }
1321      m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2),
1322                                    m2l_->TargetReg(kArg1), true);
1323
1324      m2l_->OpUnconditionalBranch(cont_);
1325    }
1326
1327   private:
1328    bool load_;
1329  };
1330
1331  if (type_known_abstract) {
1332    // Easier case, run slow path if target is non-null (slow path will load from target)
1333    LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL);
1334    LIR* cont = NewLIR0(kPseudoTargetLabel);
1335    AddSlowPath(new (arena_) SlowPath(this, branch, cont, true));
1336  } else {
1337    // Harder, more common case.  We need to generate a forward branch over the load
1338    // if the target is null.  If it's non-null we perform the load and branch to the
1339    // slow path if the classes are not equal.
1340
1341    /* Null is OK - continue */
1342    LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
1343    /* load object->klass_ */
1344    DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1345    LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
1346
1347    LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL);
1348    LIR* cont = NewLIR0(kPseudoTargetLabel);
1349
1350    // Add the slow path that will not perform load since this is already done.
1351    AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false));
1352
1353    // Set the null check to branch to the continuation.
1354    branch1->target = cont;
1355  }
1356}
1357
1358void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
1359                           RegLocation rl_src1, RegLocation rl_src2) {
1360  RegLocation rl_result;
1361  if (cu_->instruction_set == kThumb2) {
1362    /*
1363     * NOTE:  This is the one place in the code in which we might have
1364     * as many as six live temporary registers.  There are 5 in the normal
1365     * set for Arm.  Until we have spill capabilities, temporarily add
1366     * lr to the temp set.  It is safe to do this locally, but note that
1367     * lr is used explicitly elsewhere in the code generator and cannot
1368     * normally be used as a general temp register.
1369     */
1370    MarkTemp(TargetReg(kLr));   // Add lr to the temp pool
1371    FreeTemp(TargetReg(kLr));   // and make it available
1372  }
1373  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1374  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1375  rl_result = EvalLoc(rl_dest, kCoreReg, true);
1376  // The longs may overlap - use intermediate temp if so
1377  if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) {
1378    RegStorage t_reg = AllocTemp();
1379    OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
1380    OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
1381    OpRegCopy(rl_result.reg.GetLow(), t_reg);
1382    FreeTemp(t_reg);
1383  } else {
1384    OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
1385    OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
1386  }
1387  /*
1388   * NOTE: If rl_dest refers to a frame variable in a large frame, the
1389   * following StoreValueWide might need to allocate a temp register.
1390   * To further work around the lack of a spill capability, explicitly
1391   * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
1392   * Remove when spill is functional.
1393   */
1394  FreeRegLocTemps(rl_result, rl_src1);
1395  FreeRegLocTemps(rl_result, rl_src2);
1396  StoreValueWide(rl_dest, rl_result);
1397  if (cu_->instruction_set == kThumb2) {
1398    Clobber(TargetReg(kLr));
1399    UnmarkTemp(TargetReg(kLr));  // Remove lr from the temp pool
1400  }
1401}
1402
1403
1404void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
1405                             RegLocation rl_src1, RegLocation rl_shift) {
1406  ThreadOffset<4> func_offset(-1);
1407
1408  switch (opcode) {
1409    case Instruction::SHL_LONG:
1410    case Instruction::SHL_LONG_2ADDR:
1411      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShlLong);
1412      break;
1413    case Instruction::SHR_LONG:
1414    case Instruction::SHR_LONG_2ADDR:
1415      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShrLong);
1416      break;
1417    case Instruction::USHR_LONG:
1418    case Instruction::USHR_LONG_2ADDR:
1419      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pUshrLong);
1420      break;
1421    default:
1422      LOG(FATAL) << "Unexpected case";
1423  }
1424  FlushAllRegs();   /* Send everything to home location */
1425  CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
1426  RegLocation rl_result = GetReturnWide(false);
1427  StoreValueWide(rl_dest, rl_result);
1428}
1429
1430
1431void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
1432                            RegLocation rl_src1, RegLocation rl_src2) {
1433  DCHECK_NE(cu_->instruction_set, kX86);
1434  OpKind op = kOpBkpt;
1435  bool is_div_rem = false;
1436  bool check_zero = false;
1437  bool unary = false;
1438  RegLocation rl_result;
1439  bool shift_op = false;
1440  switch (opcode) {
1441    case Instruction::NEG_INT:
1442      op = kOpNeg;
1443      unary = true;
1444      break;
1445    case Instruction::NOT_INT:
1446      op = kOpMvn;
1447      unary = true;
1448      break;
1449    case Instruction::ADD_INT:
1450    case Instruction::ADD_INT_2ADDR:
1451      op = kOpAdd;
1452      break;
1453    case Instruction::SUB_INT:
1454    case Instruction::SUB_INT_2ADDR:
1455      op = kOpSub;
1456      break;
1457    case Instruction::MUL_INT:
1458    case Instruction::MUL_INT_2ADDR:
1459      op = kOpMul;
1460      break;
1461    case Instruction::DIV_INT:
1462    case Instruction::DIV_INT_2ADDR:
1463      check_zero = true;
1464      op = kOpDiv;
1465      is_div_rem = true;
1466      break;
1467    /* NOTE: returns in kArg1 */
1468    case Instruction::REM_INT:
1469    case Instruction::REM_INT_2ADDR:
1470      check_zero = true;
1471      op = kOpRem;
1472      is_div_rem = true;
1473      break;
1474    case Instruction::AND_INT:
1475    case Instruction::AND_INT_2ADDR:
1476      op = kOpAnd;
1477      break;
1478    case Instruction::OR_INT:
1479    case Instruction::OR_INT_2ADDR:
1480      op = kOpOr;
1481      break;
1482    case Instruction::XOR_INT:
1483    case Instruction::XOR_INT_2ADDR:
1484      op = kOpXor;
1485      break;
1486    case Instruction::SHL_INT:
1487    case Instruction::SHL_INT_2ADDR:
1488      shift_op = true;
1489      op = kOpLsl;
1490      break;
1491    case Instruction::SHR_INT:
1492    case Instruction::SHR_INT_2ADDR:
1493      shift_op = true;
1494      op = kOpAsr;
1495      break;
1496    case Instruction::USHR_INT:
1497    case Instruction::USHR_INT_2ADDR:
1498      shift_op = true;
1499      op = kOpLsr;
1500      break;
1501    default:
1502      LOG(FATAL) << "Invalid word arith op: " << opcode;
1503  }
1504  if (!is_div_rem) {
1505    if (unary) {
1506      rl_src1 = LoadValue(rl_src1, kCoreReg);
1507      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1508      OpRegReg(op, rl_result.reg, rl_src1.reg);
1509    } else {
1510      if (shift_op) {
1511        rl_src2 = LoadValue(rl_src2, kCoreReg);
1512        RegStorage t_reg = AllocTemp();
1513        OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31);
1514        rl_src1 = LoadValue(rl_src1, kCoreReg);
1515        rl_result = EvalLoc(rl_dest, kCoreReg, true);
1516        OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg);
1517        FreeTemp(t_reg);
1518      } else {
1519        rl_src1 = LoadValue(rl_src1, kCoreReg);
1520        rl_src2 = LoadValue(rl_src2, kCoreReg);
1521        rl_result = EvalLoc(rl_dest, kCoreReg, true);
1522        OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
1523      }
1524    }
1525    StoreValue(rl_dest, rl_result);
1526  } else {
1527    bool done = false;      // Set to true if we happen to find a way to use a real instruction.
1528    if (cu_->instruction_set == kMips) {
1529      rl_src1 = LoadValue(rl_src1, kCoreReg);
1530      rl_src2 = LoadValue(rl_src2, kCoreReg);
1531      if (check_zero) {
1532          GenImmedCheck(kCondEq, rl_src2.reg, 0, kThrowDivZero);
1533      }
1534      rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
1535      done = true;
1536    } else if (cu_->instruction_set == kThumb2) {
1537      if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
1538        // Use ARM SDIV instruction for division.  For remainder we also need to
1539        // calculate using a MUL and subtract.
1540        rl_src1 = LoadValue(rl_src1, kCoreReg);
1541        rl_src2 = LoadValue(rl_src2, kCoreReg);
1542        if (check_zero) {
1543            GenImmedCheck(kCondEq, rl_src2.reg, 0, kThrowDivZero);
1544        }
1545        rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
1546        done = true;
1547      }
1548    }
1549
1550    // If we haven't already generated the code use the callout function.
1551    if (!done) {
1552      ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod);
1553      FlushAllRegs();   /* Send everything to home location */
1554      LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
1555      RegStorage r_tgt = CallHelperSetup(func_offset);
1556      LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
1557      if (check_zero) {
1558        GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
1559      }
1560      // NOTE: callout here is not a safepoint.
1561      CallHelper(r_tgt, func_offset, false /* not a safepoint */);
1562      if (op == kOpDiv)
1563        rl_result = GetReturn(false);
1564      else
1565        rl_result = GetReturnAlt();
1566    }
1567    StoreValue(rl_dest, rl_result);
1568  }
1569}
1570
1571/*
1572 * The following are the first-level codegen routines that analyze the format
1573 * of each bytecode then either dispatch special purpose codegen routines
1574 * or produce corresponding Thumb instructions directly.
1575 */
1576
1577// Returns true if no more than two bits are set in 'x'.
1578static bool IsPopCountLE2(unsigned int x) {
1579  x &= x - 1;
1580  return (x & (x - 1)) == 0;
1581}
1582
1583// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
1584// and store the result in 'rl_dest'.
1585bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
1586                               RegLocation rl_src, RegLocation rl_dest, int lit) {
1587  if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
1588    return false;
1589  }
1590  // No divide instruction for Arm, so check for more special cases
1591  if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
1592    return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
1593  }
1594  int k = LowestSetBit(lit);
1595  if (k >= 30) {
1596    // Avoid special cases.
1597    return false;
1598  }
1599  rl_src = LoadValue(rl_src, kCoreReg);
1600  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1601  if (is_div) {
1602    RegStorage t_reg = AllocTemp();
1603    if (lit == 2) {
1604      // Division by 2 is by far the most common division by constant.
1605      OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k);
1606      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
1607      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
1608    } else {
1609      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
1610      OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
1611      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
1612      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
1613    }
1614  } else {
1615    RegStorage t_reg1 = AllocTemp();
1616    RegStorage t_reg2 = AllocTemp();
1617    if (lit == 2) {
1618      OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k);
1619      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
1620      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
1621      OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
1622    } else {
1623      OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31);
1624      OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
1625      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
1626      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
1627      OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
1628    }
1629  }
1630  StoreValue(rl_dest, rl_result);
1631  return true;
1632}
1633
1634// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
1635// and store the result in 'rl_dest'.
1636bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
1637  if (lit < 0) {
1638    return false;
1639  }
1640  if (lit == 0) {
1641    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1642    LoadConstant(rl_result.reg, 0);
1643    StoreValue(rl_dest, rl_result);
1644    return true;
1645  }
1646  if (lit == 1) {
1647    rl_src = LoadValue(rl_src, kCoreReg);
1648    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1649    OpRegCopy(rl_result.reg, rl_src.reg);
1650    StoreValue(rl_dest, rl_result);
1651    return true;
1652  }
1653  // There is RegRegRegShift on Arm, so check for more special cases.
1654  // TODO: disabled, need to handle case of "dest == src" properly.
1655  if (false && cu_->instruction_set == kThumb2) {
1656    return EasyMultiply(rl_src, rl_dest, lit);
1657  }
1658  // Can we simplify this multiplication?
1659  bool power_of_two = false;
1660  bool pop_count_le2 = false;
1661  bool power_of_two_minus_one = false;
1662  if (IsPowerOfTwo(lit)) {
1663    power_of_two = true;
1664  } else if (IsPopCountLE2(lit)) {
1665    pop_count_le2 = true;
1666  } else if (IsPowerOfTwo(lit + 1)) {
1667    power_of_two_minus_one = true;
1668  } else {
1669    return false;
1670  }
1671  rl_src = LoadValue(rl_src, kCoreReg);
1672  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1673  if (power_of_two) {
1674    // Shift.
1675    OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit));
1676  } else if (pop_count_le2) {
1677    // Shift and add and shift.
1678    int first_bit = LowestSetBit(lit);
1679    int second_bit = LowestSetBit(lit ^ (1 << first_bit));
1680    GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
1681  } else {
1682    // Reverse subtract: (src << (shift + 1)) - src.
1683    DCHECK(power_of_two_minus_one);
1684    // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
1685    RegStorage t_reg = AllocTemp();
1686    OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1));
1687    OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg);
1688  }
1689  StoreValue(rl_dest, rl_result);
1690  return true;
1691}
1692
1693void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
1694                               int lit) {
1695  RegLocation rl_result;
1696  OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
1697  int shift_op = false;
1698  bool is_div = false;
1699
1700  switch (opcode) {
1701    case Instruction::RSUB_INT_LIT8:
1702    case Instruction::RSUB_INT: {
1703      rl_src = LoadValue(rl_src, kCoreReg);
1704      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1705      if (cu_->instruction_set == kThumb2) {
1706        OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit);
1707      } else {
1708        OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
1709        OpRegImm(kOpAdd, rl_result.reg, lit);
1710      }
1711      StoreValue(rl_dest, rl_result);
1712      return;
1713    }
1714
1715    case Instruction::SUB_INT:
1716    case Instruction::SUB_INT_2ADDR:
1717      lit = -lit;
1718      // Intended fallthrough
1719    case Instruction::ADD_INT:
1720    case Instruction::ADD_INT_2ADDR:
1721    case Instruction::ADD_INT_LIT8:
1722    case Instruction::ADD_INT_LIT16:
1723      op = kOpAdd;
1724      break;
1725    case Instruction::MUL_INT:
1726    case Instruction::MUL_INT_2ADDR:
1727    case Instruction::MUL_INT_LIT8:
1728    case Instruction::MUL_INT_LIT16: {
1729      if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
1730        return;
1731      }
1732      op = kOpMul;
1733      break;
1734    }
1735    case Instruction::AND_INT:
1736    case Instruction::AND_INT_2ADDR:
1737    case Instruction::AND_INT_LIT8:
1738    case Instruction::AND_INT_LIT16:
1739      op = kOpAnd;
1740      break;
1741    case Instruction::OR_INT:
1742    case Instruction::OR_INT_2ADDR:
1743    case Instruction::OR_INT_LIT8:
1744    case Instruction::OR_INT_LIT16:
1745      op = kOpOr;
1746      break;
1747    case Instruction::XOR_INT:
1748    case Instruction::XOR_INT_2ADDR:
1749    case Instruction::XOR_INT_LIT8:
1750    case Instruction::XOR_INT_LIT16:
1751      op = kOpXor;
1752      break;
1753    case Instruction::SHL_INT_LIT8:
1754    case Instruction::SHL_INT:
1755    case Instruction::SHL_INT_2ADDR:
1756      lit &= 31;
1757      shift_op = true;
1758      op = kOpLsl;
1759      break;
1760    case Instruction::SHR_INT_LIT8:
1761    case Instruction::SHR_INT:
1762    case Instruction::SHR_INT_2ADDR:
1763      lit &= 31;
1764      shift_op = true;
1765      op = kOpAsr;
1766      break;
1767    case Instruction::USHR_INT_LIT8:
1768    case Instruction::USHR_INT:
1769    case Instruction::USHR_INT_2ADDR:
1770      lit &= 31;
1771      shift_op = true;
1772      op = kOpLsr;
1773      break;
1774
1775    case Instruction::DIV_INT:
1776    case Instruction::DIV_INT_2ADDR:
1777    case Instruction::DIV_INT_LIT8:
1778    case Instruction::DIV_INT_LIT16:
1779    case Instruction::REM_INT:
1780    case Instruction::REM_INT_2ADDR:
1781    case Instruction::REM_INT_LIT8:
1782    case Instruction::REM_INT_LIT16: {
1783      if (lit == 0) {
1784        GenImmedCheck(kCondAl, RegStorage::InvalidReg(), 0, kThrowDivZero);
1785        return;
1786      }
1787      if ((opcode == Instruction::DIV_INT) ||
1788          (opcode == Instruction::DIV_INT_2ADDR) ||
1789          (opcode == Instruction::DIV_INT_LIT8) ||
1790          (opcode == Instruction::DIV_INT_LIT16)) {
1791        is_div = true;
1792      } else {
1793        is_div = false;
1794      }
1795      if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) {
1796        return;
1797      }
1798
1799      bool done = false;
1800      if (cu_->instruction_set == kMips) {
1801        rl_src = LoadValue(rl_src, kCoreReg);
1802        rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
1803        done = true;
1804      } else if (cu_->instruction_set == kX86) {
1805        rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
1806        done = true;
1807      } else if (cu_->instruction_set == kThumb2) {
1808        if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
1809          // Use ARM SDIV instruction for division.  For remainder we also need to
1810          // calculate using a MUL and subtract.
1811          rl_src = LoadValue(rl_src, kCoreReg);
1812          rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
1813          done = true;
1814        }
1815      }
1816
1817      if (!done) {
1818        FlushAllRegs();   /* Everything to home location. */
1819        LoadValueDirectFixed(rl_src, TargetReg(kArg0));
1820        Clobber(TargetReg(kArg0));
1821        ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod);
1822        CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
1823        if (is_div)
1824          rl_result = GetReturn(false);
1825        else
1826          rl_result = GetReturnAlt();
1827      }
1828      StoreValue(rl_dest, rl_result);
1829      return;
1830    }
1831    default:
1832      LOG(FATAL) << "Unexpected opcode " << opcode;
1833  }
1834  rl_src = LoadValue(rl_src, kCoreReg);
1835  rl_result = EvalLoc(rl_dest, kCoreReg, true);
1836  // Avoid shifts by literal 0 - no support in Thumb.  Change to copy.
1837  if (shift_op && (lit == 0)) {
1838    OpRegCopy(rl_result.reg, rl_src.reg);
1839  } else {
1840    OpRegRegImm(op, rl_result.reg, rl_src.reg, lit);
1841  }
1842  StoreValue(rl_dest, rl_result);
1843}
1844
1845void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
1846                             RegLocation rl_src1, RegLocation rl_src2) {
1847  RegLocation rl_result;
1848  OpKind first_op = kOpBkpt;
1849  OpKind second_op = kOpBkpt;
1850  bool call_out = false;
1851  bool check_zero = false;
1852  ThreadOffset<4> func_offset(-1);
1853  int ret_reg = TargetReg(kRet0).GetReg();
1854
1855  switch (opcode) {
1856    case Instruction::NOT_LONG:
1857      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1858      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1859      // Check for destructive overlap
1860      if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) {
1861        RegStorage t_reg = AllocTemp();
1862        OpRegCopy(t_reg, rl_src2.reg.GetHigh());
1863        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
1864        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
1865        FreeTemp(t_reg);
1866      } else {
1867        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
1868        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
1869      }
1870      StoreValueWide(rl_dest, rl_result);
1871      return;
1872    case Instruction::ADD_LONG:
1873    case Instruction::ADD_LONG_2ADDR:
1874      if (cu_->instruction_set != kThumb2) {
1875        GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
1876        return;
1877      }
1878      first_op = kOpAdd;
1879      second_op = kOpAdc;
1880      break;
1881    case Instruction::SUB_LONG:
1882    case Instruction::SUB_LONG_2ADDR:
1883      if (cu_->instruction_set != kThumb2) {
1884        GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
1885        return;
1886      }
1887      first_op = kOpSub;
1888      second_op = kOpSbc;
1889      break;
1890    case Instruction::MUL_LONG:
1891    case Instruction::MUL_LONG_2ADDR:
1892      if (cu_->instruction_set != kMips) {
1893        GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
1894        return;
1895      } else {
1896        call_out = true;
1897        ret_reg = TargetReg(kRet0).GetReg();
1898        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul);
1899      }
1900      break;
1901    case Instruction::DIV_LONG:
1902    case Instruction::DIV_LONG_2ADDR:
1903      call_out = true;
1904      check_zero = true;
1905      ret_reg = TargetReg(kRet0).GetReg();
1906      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLdiv);
1907      break;
1908    case Instruction::REM_LONG:
1909    case Instruction::REM_LONG_2ADDR:
1910      call_out = true;
1911      check_zero = true;
1912      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmod);
1913      /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
1914      ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2).GetReg() : TargetReg(kRet0).GetReg();
1915      break;
1916    case Instruction::AND_LONG_2ADDR:
1917    case Instruction::AND_LONG:
1918      if (cu_->instruction_set == kX86) {
1919        return GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
1920      }
1921      first_op = kOpAnd;
1922      second_op = kOpAnd;
1923      break;
1924    case Instruction::OR_LONG:
1925    case Instruction::OR_LONG_2ADDR:
1926      if (cu_->instruction_set == kX86) {
1927        GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
1928        return;
1929      }
1930      first_op = kOpOr;
1931      second_op = kOpOr;
1932      break;
1933    case Instruction::XOR_LONG:
1934    case Instruction::XOR_LONG_2ADDR:
1935      if (cu_->instruction_set == kX86) {
1936        GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
1937        return;
1938      }
1939      first_op = kOpXor;
1940      second_op = kOpXor;
1941      break;
1942    case Instruction::NEG_LONG: {
1943      GenNegLong(rl_dest, rl_src2);
1944      return;
1945    }
1946    default:
1947      LOG(FATAL) << "Invalid long arith op";
1948  }
1949  if (!call_out) {
1950    GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
1951  } else {
1952    FlushAllRegs();   /* Send everything to home location */
1953    if (check_zero) {
1954      RegStorage r_tmp1 = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
1955      RegStorage r_tmp2 = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
1956      LoadValueDirectWideFixed(rl_src2, r_tmp2);
1957      RegStorage r_tgt = CallHelperSetup(func_offset);
1958      GenDivZeroCheck(RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)));
1959      LoadValueDirectWideFixed(rl_src1, r_tmp1);
1960      // NOTE: callout here is not a safepoint
1961      CallHelper(r_tgt, func_offset, false /* not safepoint */);
1962    } else {
1963      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
1964    }
1965    // Adjust return regs in to handle case of rem returning kArg2/kArg3
1966    if (ret_reg == TargetReg(kRet0).GetReg())
1967      rl_result = GetReturnWide(false);
1968    else
1969      rl_result = GetReturnWideAlt();
1970    StoreValueWide(rl_dest, rl_result);
1971  }
1972}
1973
1974void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset,
1975                                RegLocation rl_dest, RegLocation rl_src) {
1976  /*
1977   * Don't optimize the register usage since it calls out to support
1978   * functions
1979   */
1980  FlushAllRegs();   /* Send everything to home location */
1981  CallRuntimeHelperRegLocation(func_offset, rl_src, false);
1982  if (rl_dest.wide) {
1983    RegLocation rl_result;
1984    rl_result = GetReturnWide(rl_dest.fp);
1985    StoreValueWide(rl_dest, rl_result);
1986  } else {
1987    RegLocation rl_result;
1988    rl_result = GetReturn(rl_dest.fp);
1989    StoreValue(rl_dest, rl_result);
1990  }
1991}
1992
1993/* Check if we need to check for pending suspend request */
1994void Mir2Lir::GenSuspendTest(int opt_flags) {
1995  if (Runtime::Current()->ExplicitSuspendChecks()) {
1996    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
1997      return;
1998    }
1999    FlushAllRegs();
2000    LIR* branch = OpTestSuspend(NULL);
2001    LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
2002    LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
2003                         current_dalvik_offset_);
2004    branch->target = target;
2005    suspend_launchpads_.Insert(target);
2006  } else {
2007    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2008      return;
2009    }
2010    FlushAllRegs();     // TODO: needed?
2011    LIR* inst = CheckSuspendUsingLoad();
2012    MarkSafepointPC(inst);
2013  }
2014}
2015
2016/* Check if we need to check for pending suspend request */
2017void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
2018  if (Runtime::Current()->ExplicitSuspendChecks()) {
2019    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2020      OpUnconditionalBranch(target);
2021      return;
2022    }
2023    OpTestSuspend(target);
2024    LIR* launch_pad =
2025        RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
2026               current_dalvik_offset_);
2027    FlushAllRegs();
2028    OpUnconditionalBranch(launch_pad);
2029    suspend_launchpads_.Insert(launch_pad);
2030  } else {
2031    // For the implicit suspend check, just perform the trigger
2032    // load and branch to the target.
2033    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2034      OpUnconditionalBranch(target);
2035      return;
2036    }
2037    FlushAllRegs();
2038    LIR* inst = CheckSuspendUsingLoad();
2039    MarkSafepointPC(inst);
2040    OpUnconditionalBranch(target);
2041  }
2042}
2043
2044/* Call out to helper assembly routine that will null check obj and then lock it. */
2045void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
2046  FlushAllRegs();
2047  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true);
2048}
2049
2050/* Call out to helper assembly routine that will null check obj and then unlock it. */
2051void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
2052  FlushAllRegs();
2053  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true);
2054}
2055
2056/* Generic code for generating a wide constant into a VR. */
2057void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
2058  RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
2059  LoadConstantWide(rl_result.reg, value);
2060  StoreValueWide(rl_dest, rl_result);
2061}
2062
2063}  // namespace art
2064