gen_common.cc revision e2143c0a4af68c08e811885eb2f3ea5bfdb21ab6
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/compiler_internals.h"
19#include "dex/quick/arm/arm_lir.h"
20#include "dex/quick/mir_to_lir-inl.h"
21#include "entrypoints/quick/quick_entrypoints.h"
22#include "mirror/array.h"
23#include "mirror/object-inl.h"
24#include "verifier/method_verifier.h"
25#include <functional>
26
27namespace art {
28
29/*
30 * This source files contains "gen" codegen routines that should
31 * be applicable to most targets.  Only mid-level support utilities
32 * and "op" calls may be used here.
33 */
34
35/*
36 * Generate a kPseudoBarrier marker to indicate the boundary of special
37 * blocks.
38 */
39void Mir2Lir::GenBarrier() {
40  LIR* barrier = NewLIR0(kPseudoBarrier);
41  /* Mark all resources as being clobbered */
42  DCHECK(!barrier->flags.use_def_invalid);
43  barrier->u.m.def_mask = ENCODE_ALL;
44}
45
46// TODO: need to do some work to split out targets with
47// condition codes and those without
48LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) {
49  DCHECK_NE(cu_->instruction_set, kMips);
50  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
51  LIR* branch = OpCondBranch(c_code, tgt);
52  // Remember branch target - will process later
53  throw_launchpads_.Insert(tgt);
54  return branch;
55}
56
57LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind) {
58  LIR* tgt;
59  LIR* branch;
60  if (c_code == kCondAl) {
61    tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, RegStorage::kInvalidRegVal,
62                 imm_val);
63    branch = OpUnconditionalBranch(tgt);
64  } else {
65    tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg.GetReg(), imm_val);
66    branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
67  }
68  // Remember branch target - will process later
69  throw_launchpads_.Insert(tgt);
70  return branch;
71}
72
73
74/* Perform null-check on a register.  */
75LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
76  if (Runtime::Current()->ExplicitNullChecks()) {
77    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
78      return NULL;
79    }
80    return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
81  }
82  return nullptr;
83}
84
85void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
86  if (!Runtime::Current()->ExplicitNullChecks()) {
87    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
88      return;
89    }
90    MarkSafepointPC(last_lir_insn_);
91  }
92}
93
94void Mir2Lir::MarkPossibleStackOverflowException() {
95  if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
96    MarkSafepointPC(last_lir_insn_);
97  }
98}
99
100void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
101  if (!Runtime::Current()->ExplicitNullChecks()) {
102    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
103      return;
104    }
105    // Force an implicit null check by performing a memory operation (load) from the given
106    // register with offset 0.  This will cause a signal if the register contains 0 (null).
107    RegStorage tmp = AllocTemp();
108    // TODO: for Mips, would be best to use rZERO as the bogus register target.
109    LIR* load = LoadWordDisp(reg, 0, tmp);
110    FreeTemp(tmp);
111    MarkSafepointPC(load);
112  }
113}
114
115/* Perform check on two registers */
116LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2,
117                             ThrowKind kind) {
118  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1.GetReg(),
119                    reg2.GetReg());
120  LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
121  // Remember branch target - will process later
122  throw_launchpads_.Insert(tgt);
123  return branch;
124}
125
126void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
127                                  RegLocation rl_src2, LIR* taken,
128                                  LIR* fall_through) {
129  ConditionCode cond;
130  switch (opcode) {
131    case Instruction::IF_EQ:
132      cond = kCondEq;
133      break;
134    case Instruction::IF_NE:
135      cond = kCondNe;
136      break;
137    case Instruction::IF_LT:
138      cond = kCondLt;
139      break;
140    case Instruction::IF_GE:
141      cond = kCondGe;
142      break;
143    case Instruction::IF_GT:
144      cond = kCondGt;
145      break;
146    case Instruction::IF_LE:
147      cond = kCondLe;
148      break;
149    default:
150      cond = static_cast<ConditionCode>(0);
151      LOG(FATAL) << "Unexpected opcode " << opcode;
152  }
153
154  // Normalize such that if either operand is constant, src2 will be constant
155  if (rl_src1.is_const) {
156    RegLocation rl_temp = rl_src1;
157    rl_src1 = rl_src2;
158    rl_src2 = rl_temp;
159    cond = FlipComparisonOrder(cond);
160  }
161
162  rl_src1 = LoadValue(rl_src1, kCoreReg);
163  // Is this really an immediate comparison?
164  if (rl_src2.is_const) {
165    // If it's already live in a register or not easily materialized, just keep going
166    RegLocation rl_temp = UpdateLoc(rl_src2);
167    if ((rl_temp.location == kLocDalvikFrame) &&
168        InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
169      // OK - convert this to a compare immediate and branch
170      OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
171      return;
172    }
173  }
174  rl_src2 = LoadValue(rl_src2, kCoreReg);
175  OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
176}
177
178void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
179                                      LIR* fall_through) {
180  ConditionCode cond;
181  rl_src = LoadValue(rl_src, kCoreReg);
182  switch (opcode) {
183    case Instruction::IF_EQZ:
184      cond = kCondEq;
185      break;
186    case Instruction::IF_NEZ:
187      cond = kCondNe;
188      break;
189    case Instruction::IF_LTZ:
190      cond = kCondLt;
191      break;
192    case Instruction::IF_GEZ:
193      cond = kCondGe;
194      break;
195    case Instruction::IF_GTZ:
196      cond = kCondGt;
197      break;
198    case Instruction::IF_LEZ:
199      cond = kCondLe;
200      break;
201    default:
202      cond = static_cast<ConditionCode>(0);
203      LOG(FATAL) << "Unexpected opcode " << opcode;
204  }
205  OpCmpImmBranch(cond, rl_src.reg, 0, taken);
206}
207
208void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
209  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
210  if (rl_src.location == kLocPhysReg) {
211    OpRegCopy(rl_result.reg, rl_src.reg);
212  } else {
213    LoadValueDirect(rl_src, rl_result.reg.GetLow());
214  }
215  OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31);
216  StoreValueWide(rl_dest, rl_result);
217}
218
219void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
220                              RegLocation rl_src) {
221  rl_src = LoadValue(rl_src, kCoreReg);
222  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
223  OpKind op = kOpInvalid;
224  switch (opcode) {
225    case Instruction::INT_TO_BYTE:
226      op = kOp2Byte;
227      break;
228    case Instruction::INT_TO_SHORT:
229       op = kOp2Short;
230       break;
231    case Instruction::INT_TO_CHAR:
232       op = kOp2Char;
233       break;
234    default:
235      LOG(ERROR) << "Bad int conversion type";
236  }
237  OpRegReg(op, rl_result.reg, rl_src.reg);
238  StoreValue(rl_dest, rl_result);
239}
240
241/*
242 * Let helper function take care of everything.  Will call
243 * Array::AllocFromCode(type_idx, method, count);
244 * Note: AllocFromCode will handle checks for errNegativeArraySize.
245 */
246void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
247                          RegLocation rl_src) {
248  FlushAllRegs();  /* Everything to home location */
249  ThreadOffset func_offset(-1);
250  const DexFile* dex_file = cu_->dex_file;
251  CompilerDriver* driver = cu_->compiler_driver;
252  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file,
253                                                       type_idx)) {
254    bool is_type_initialized;  // Ignored as an array does not have an initializer.
255    bool use_direct_type_ptr;
256    uintptr_t direct_type_ptr;
257    if (kEmbedClassInCode &&
258        driver->CanEmbedTypeInCode(*dex_file, type_idx,
259                                   &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) {
260      // The fast path.
261      if (!use_direct_type_ptr) {
262        LoadClassType(type_idx, kArg0);
263        func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayResolved);
264        CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true);
265      } else {
266        // Use the direct pointer.
267        func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayResolved);
268        CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true);
269      }
270    } else {
271      // The slow path.
272      DCHECK_EQ(func_offset.Int32Value(), -1);
273      func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray);
274      CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
275    }
276    DCHECK_NE(func_offset.Int32Value(), -1);
277  } else {
278    func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck);
279    CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
280  }
281  RegLocation rl_result = GetReturn(false);
282  StoreValue(rl_dest, rl_result);
283}
284
285/*
286 * Similar to GenNewArray, but with post-allocation initialization.
287 * Verifier guarantees we're dealing with an array class.  Current
288 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
289 * Current code also throws internal unimp if not 'L', '[' or 'I'.
290 */
291void Mir2Lir::GenFilledNewArray(CallInfo* info) {
292  int elems = info->num_arg_words;
293  int type_idx = info->index;
294  FlushAllRegs();  /* Everything to home location */
295  ThreadOffset func_offset(-1);
296  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
297                                                       type_idx)) {
298    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArray);
299  } else {
300    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayWithAccessCheck);
301  }
302  CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
303  FreeTemp(TargetReg(kArg2));
304  FreeTemp(TargetReg(kArg1));
305  /*
306   * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
307   * return region.  Because AllocFromCode placed the new array
308   * in kRet0, we'll just lock it into place.  When debugger support is
309   * added, it may be necessary to additionally copy all return
310   * values to a home location in thread-local storage
311   */
312  LockTemp(TargetReg(kRet0));
313
314  // TODO: use the correct component size, currently all supported types
315  // share array alignment with ints (see comment at head of function)
316  size_t component_size = sizeof(int32_t);
317
318  // Having a range of 0 is legal
319  if (info->is_range && (elems > 0)) {
320    /*
321     * Bit of ugliness here.  We're going generate a mem copy loop
322     * on the register range, but it is possible that some regs
323     * in the range have been promoted.  This is unlikely, but
324     * before generating the copy, we'll just force a flush
325     * of any regs in the source range that have been promoted to
326     * home location.
327     */
328    for (int i = 0; i < elems; i++) {
329      RegLocation loc = UpdateLoc(info->args[i]);
330      if (loc.location == kLocPhysReg) {
331        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
332      }
333    }
334    /*
335     * TUNING note: generated code here could be much improved, but
336     * this is an uncommon operation and isn't especially performance
337     * critical.
338     */
339    RegStorage r_src = AllocTemp();
340    RegStorage r_dst = AllocTemp();
341    RegStorage r_idx = AllocTemp();
342    RegStorage r_val;
343    switch (cu_->instruction_set) {
344      case kThumb2:
345        r_val = TargetReg(kLr);
346        break;
347      case kX86:
348        FreeTemp(TargetReg(kRet0));
349        r_val = AllocTemp();
350        break;
351      case kMips:
352        r_val = AllocTemp();
353        break;
354      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
355    }
356    // Set up source pointer
357    RegLocation rl_first = info->args[0];
358    OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low));
359    // Set up the target pointer
360    OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0),
361                mirror::Array::DataOffset(component_size).Int32Value());
362    // Set up the loop counter (known to be > 0)
363    LoadConstant(r_idx, elems - 1);
364    // Generate the copy loop.  Going backwards for convenience
365    LIR* target = NewLIR0(kPseudoTargetLabel);
366    // Copy next element
367    LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
368    StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
369    FreeTemp(r_val);
370    OpDecAndBranch(kCondGe, r_idx, target);
371    if (cu_->instruction_set == kX86) {
372      // Restore the target pointer
373      OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst,
374                  -mirror::Array::DataOffset(component_size).Int32Value());
375    }
376  } else if (!info->is_range) {
377    // TUNING: interleave
378    for (int i = 0; i < elems; i++) {
379      RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
380      StoreBaseDisp(TargetReg(kRet0),
381                    mirror::Array::DataOffset(component_size).Int32Value() + i * 4,
382                    rl_arg.reg, kWord);
383      // If the LoadValue caused a temp to be allocated, free it
384      if (IsTemp(rl_arg.reg)) {
385        FreeTemp(rl_arg.reg);
386      }
387    }
388  }
389  if (info->result.location != kLocInvalid) {
390    StoreValue(info->result, GetReturn(false /* not fp */));
391  }
392}
393
394//
395// Slow path to ensure a class is initialized for sget/sput.
396//
397class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
398 public:
399  StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
400                      RegStorage r_base) :
401    LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit),
402               storage_index_(storage_index), r_base_(r_base) {
403  }
404
405  void Compile() {
406    LIR* unresolved_target = GenerateTargetLabel();
407    uninit_->target = unresolved_target;
408    m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage),
409                               storage_index_, true);
410    // Copy helper's result into r_base, a no-op on all but MIPS.
411    m2l_->OpRegCopy(r_base_,  m2l_->TargetReg(kRet0));
412
413    m2l_->OpUnconditionalBranch(cont_);
414  }
415
416 private:
417  LIR* const uninit_;
418  const int storage_index_;
419  const RegStorage r_base_;
420};
421
422void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
423                      bool is_object) {
424  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
425  cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
426  if (field_info.FastPut() && !SLOW_FIELD_PATH) {
427    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
428    RegStorage r_base;
429    if (field_info.IsReferrersClass()) {
430      // Fast path, static storage base is this method's class
431      RegLocation rl_method  = LoadCurrMethod();
432      r_base = AllocTemp();
433      LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
434      if (IsTemp(rl_method.reg)) {
435        FreeTemp(rl_method.reg);
436      }
437    } else {
438      // Medium path, static storage base in a different class which requires checks that the other
439      // class is initialized.
440      // TODO: remove initialized check now that we are initializing classes in the compiler driver.
441      DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
442      // May do runtime call so everything to home locations.
443      FlushAllRegs();
444      // Using fixed register to sync with possible call to runtime support.
445      RegStorage r_method = TargetReg(kArg1);
446      LockTemp(r_method);
447      LoadCurrMethodDirect(r_method);
448      r_base = TargetReg(kArg0);
449      LockTemp(r_base);
450      LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
451      LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
452                   sizeof(int32_t*) * field_info.StorageIndex(), r_base);
453      // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
454      if (!field_info.IsInitialized() &&
455          (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
456        // Check if r_base is NULL or a not yet initialized class.
457
458        // The slow path is invoked if the r_base is NULL or the class pointed
459        // to by it is not initialized.
460        LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
461        RegStorage r_tmp = TargetReg(kArg2);
462        LockTemp(r_tmp);
463        LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
464                                          mirror::Class::StatusOffset().Int32Value(),
465                                          mirror::Class::kStatusInitialized, NULL);
466        LIR* cont = NewLIR0(kPseudoTargetLabel);
467
468        AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
469                                                     field_info.StorageIndex(), r_base));
470
471        FreeTemp(r_tmp);
472      }
473      FreeTemp(r_method);
474    }
475    // rBase now holds static storage base
476    if (is_long_or_double) {
477      rl_src = LoadValueWide(rl_src, kAnyReg);
478    } else {
479      rl_src = LoadValue(rl_src, kAnyReg);
480    }
481    if (field_info.IsVolatile()) {
482      // There might have been a store before this volatile one so insert StoreStore barrier.
483      GenMemBarrier(kStoreStore);
484    }
485    if (is_long_or_double) {
486      StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
487    } else {
488      StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
489    }
490    if (field_info.IsVolatile()) {
491      // A load might follow the volatile store so insert a StoreLoad barrier.
492      GenMemBarrier(kStoreLoad);
493    }
494    if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
495      MarkGCCard(rl_src.reg, r_base);
496    }
497    FreeTemp(r_base);
498  } else {
499    FlushAllRegs();  // Everything to home locations
500    ThreadOffset setter_offset =
501        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
502                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
503                                       : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
504    CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true);
505  }
506}
507
508void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
509                      bool is_long_or_double, bool is_object) {
510  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
511  cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
512  if (field_info.FastGet() && !SLOW_FIELD_PATH) {
513    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
514    RegStorage r_base;
515    if (field_info.IsReferrersClass()) {
516      // Fast path, static storage base is this method's class
517      RegLocation rl_method  = LoadCurrMethod();
518      r_base = AllocTemp();
519      LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
520    } else {
521      // Medium path, static storage base in a different class which requires checks that the other
522      // class is initialized
523      DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
524      // May do runtime call so everything to home locations.
525      FlushAllRegs();
526      // Using fixed register to sync with possible call to runtime support.
527      RegStorage r_method = TargetReg(kArg1);
528      LockTemp(r_method);
529      LoadCurrMethodDirect(r_method);
530      r_base = TargetReg(kArg0);
531      LockTemp(r_base);
532      LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
533      LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
534                   sizeof(int32_t*) * field_info.StorageIndex(), r_base);
535      // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
536      if (!field_info.IsInitialized() &&
537          (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
538        // Check if r_base is NULL or a not yet initialized class.
539
540        // The slow path is invoked if the r_base is NULL or the class pointed
541        // to by it is not initialized.
542        LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
543        RegStorage r_tmp = TargetReg(kArg2);
544        LockTemp(r_tmp);
545        LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
546                                          mirror::Class::StatusOffset().Int32Value(),
547                                          mirror::Class::kStatusInitialized, NULL);
548        LIR* cont = NewLIR0(kPseudoTargetLabel);
549
550        AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
551                                                     field_info.StorageIndex(), r_base));
552
553        FreeTemp(r_tmp);
554      }
555      FreeTemp(r_method);
556    }
557    // r_base now holds static storage base
558    RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
559
560    if (is_long_or_double) {
561      LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG);
562    } else {
563      LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
564    }
565    FreeTemp(r_base);
566
567    if (field_info.IsVolatile()) {
568      // Without context sensitive analysis, we must issue the most conservative barriers.
569      // In this case, either a load or store may follow so we issue both barriers.
570      GenMemBarrier(kLoadLoad);
571      GenMemBarrier(kLoadStore);
572    }
573
574    if (is_long_or_double) {
575      StoreValueWide(rl_dest, rl_result);
576    } else {
577      StoreValue(rl_dest, rl_result);
578    }
579  } else {
580    FlushAllRegs();  // Everything to home locations
581    ThreadOffset getterOffset =
582        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
583                          :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
584                                      : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
585    CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true);
586    if (is_long_or_double) {
587      RegLocation rl_result = GetReturnWide(rl_dest.fp);
588      StoreValueWide(rl_dest, rl_result);
589    } else {
590      RegLocation rl_result = GetReturn(rl_dest.fp);
591      StoreValue(rl_dest, rl_result);
592    }
593  }
594}
595
596// Generate code for all slow paths.
597void Mir2Lir::HandleSlowPaths() {
598  int n = slow_paths_.Size();
599  for (int i = 0; i < n; ++i) {
600    LIRSlowPath* slowpath = slow_paths_.Get(i);
601    slowpath->Compile();
602  }
603  slow_paths_.Reset();
604}
605
606void Mir2Lir::HandleSuspendLaunchPads() {
607  int num_elems = suspend_launchpads_.Size();
608  ThreadOffset helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspend);
609  for (int i = 0; i < num_elems; i++) {
610    ResetRegPool();
611    ResetDefTracking();
612    LIR* lab = suspend_launchpads_.Get(i);
613    LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0]));
614    current_dalvik_offset_ = lab->operands[1];
615    AppendLIR(lab);
616    RegStorage r_tgt = CallHelperSetup(helper_offset);
617    CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
618    OpUnconditionalBranch(resume_lab);
619  }
620}
621
622void Mir2Lir::HandleThrowLaunchPads() {
623  int num_elems = throw_launchpads_.Size();
624  for (int i = 0; i < num_elems; i++) {
625    ResetRegPool();
626    ResetDefTracking();
627    LIR* lab = throw_launchpads_.Get(i);
628    current_dalvik_offset_ = lab->operands[1];
629    AppendLIR(lab);
630    ThreadOffset func_offset(-1);
631    int v1 = lab->operands[2];
632    int v2 = lab->operands[3];
633    const bool target_x86 = cu_->instruction_set == kX86;
634    switch (lab->operands[0]) {
635      case kThrowNullPointer:
636        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointer);
637        break;
638      case kThrowConstantArrayBounds:  // v1 is length reg (for Arm/Mips), v2 constant index
639        // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
640        if (target_x86) {
641          OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v1),
642                   mirror::Array::LengthOffset().Int32Value());
643        } else {
644          OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v1));
645        }
646        // Make sure the following LoadConstant doesn't mess with kArg1.
647        LockTemp(TargetReg(kArg1));
648        LoadConstant(TargetReg(kArg0), v2);
649        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
650        break;
651      case kThrowArrayBounds:
652        // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
653        if (v2 != TargetReg(kArg0).GetReg()) {
654          OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
655          if (target_x86) {
656            // x86 leaves the array pointer in v2, so load the array length that the handler expects
657            OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
658                     mirror::Array::LengthOffset().Int32Value());
659          } else {
660            OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
661          }
662        } else {
663          if (v1 == TargetReg(kArg1).GetReg()) {
664            // Swap v1 and v2, using kArg2 as a temp
665            OpRegCopy(TargetReg(kArg2), RegStorage::Solo32(v1));
666            if (target_x86) {
667              // x86 leaves the array pointer in v2; load the array length that the handler expects
668              OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
669                       mirror::Array::LengthOffset().Int32Value());
670            } else {
671              OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
672            }
673            OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
674          } else {
675            if (target_x86) {
676              // x86 leaves the array pointer in v2; load the array length that the handler expects
677              OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
678                       mirror::Array::LengthOffset().Int32Value());
679            } else {
680              OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
681            }
682            OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
683          }
684        }
685        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
686        break;
687      case kThrowDivZero:
688        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero);
689        break;
690      case kThrowNoSuchMethod:
691        OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
692        func_offset =
693          QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod);
694        break;
695      default:
696        LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
697    }
698    ClobberCallerSave();
699    RegStorage r_tgt = CallHelperSetup(func_offset);
700    CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */);
701  }
702}
703
704void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
705                      RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
706                      bool is_object) {
707  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
708  cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
709  if (field_info.FastGet() && !SLOW_FIELD_PATH) {
710    RegLocation rl_result;
711    RegisterClass reg_class = oat_reg_class_by_size(size);
712    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
713    rl_obj = LoadValue(rl_obj, kCoreReg);
714    if (is_long_or_double) {
715      DCHECK(rl_dest.wide);
716      GenNullCheck(rl_obj.reg, opt_flags);
717      if (cu_->instruction_set == kX86) {
718        rl_result = EvalLoc(rl_dest, reg_class, true);
719        // FIXME?  duplicate null check?
720        GenNullCheck(rl_obj.reg, opt_flags);
721        LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg,
722                         rl_obj.s_reg_low);
723        MarkPossibleNullPointerException(opt_flags);
724        if (field_info.IsVolatile()) {
725          // Without context sensitive analysis, we must issue the most conservative barriers.
726          // In this case, either a load or store may follow so we issue both barriers.
727          GenMemBarrier(kLoadLoad);
728          GenMemBarrier(kLoadStore);
729        }
730      } else {
731        RegStorage reg_ptr = AllocTemp();
732        OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
733        rl_result = EvalLoc(rl_dest, reg_class, true);
734        LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG);
735        if (field_info.IsVolatile()) {
736          // Without context sensitive analysis, we must issue the most conservative barriers.
737          // In this case, either a load or store may follow so we issue both barriers.
738          GenMemBarrier(kLoadLoad);
739          GenMemBarrier(kLoadStore);
740        }
741        FreeTemp(reg_ptr);
742      }
743      StoreValueWide(rl_dest, rl_result);
744    } else {
745      rl_result = EvalLoc(rl_dest, reg_class, true);
746      GenNullCheck(rl_obj.reg, opt_flags);
747      LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord,
748                   rl_obj.s_reg_low);
749      MarkPossibleNullPointerException(opt_flags);
750      if (field_info.IsVolatile()) {
751        // Without context sensitive analysis, we must issue the most conservative barriers.
752        // In this case, either a load or store may follow so we issue both barriers.
753        GenMemBarrier(kLoadLoad);
754        GenMemBarrier(kLoadStore);
755      }
756      StoreValue(rl_dest, rl_result);
757    }
758  } else {
759    ThreadOffset getterOffset =
760        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
761                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
762                                       : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
763    CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true);
764    if (is_long_or_double) {
765      RegLocation rl_result = GetReturnWide(rl_dest.fp);
766      StoreValueWide(rl_dest, rl_result);
767    } else {
768      RegLocation rl_result = GetReturn(rl_dest.fp);
769      StoreValue(rl_dest, rl_result);
770    }
771  }
772}
773
774void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
775                      RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
776                      bool is_object) {
777  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
778  cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
779  if (field_info.FastPut() && !SLOW_FIELD_PATH) {
780    RegisterClass reg_class = oat_reg_class_by_size(size);
781    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
782    rl_obj = LoadValue(rl_obj, kCoreReg);
783    if (is_long_or_double) {
784      rl_src = LoadValueWide(rl_src, kAnyReg);
785      GenNullCheck(rl_obj.reg, opt_flags);
786      RegStorage reg_ptr = AllocTemp();
787      OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
788      if (field_info.IsVolatile()) {
789        // There might have been a store before this volatile one so insert StoreStore barrier.
790        GenMemBarrier(kStoreStore);
791      }
792      StoreBaseDispWide(reg_ptr, 0, rl_src.reg);
793      MarkPossibleNullPointerException(opt_flags);
794      if (field_info.IsVolatile()) {
795        // A load might follow the volatile store so insert a StoreLoad barrier.
796        GenMemBarrier(kStoreLoad);
797      }
798      FreeTemp(reg_ptr);
799    } else {
800      rl_src = LoadValue(rl_src, reg_class);
801      GenNullCheck(rl_obj.reg, opt_flags);
802      if (field_info.IsVolatile()) {
803        // There might have been a store before this volatile one so insert StoreStore barrier.
804        GenMemBarrier(kStoreStore);
805      }
806      StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord);
807      MarkPossibleNullPointerException(opt_flags);
808      if (field_info.IsVolatile()) {
809        // A load might follow the volatile store so insert a StoreLoad barrier.
810        GenMemBarrier(kStoreLoad);
811      }
812      if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
813        MarkGCCard(rl_src.reg, rl_obj.reg);
814      }
815    }
816  } else {
817    ThreadOffset setter_offset =
818        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
819                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
820                                       : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
821    CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(),
822                                               rl_obj, rl_src, true);
823  }
824}
825
826void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
827                             RegLocation rl_src) {
828  bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK);
829  bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) &&
830      (opt_flags & MIR_IGNORE_NULL_CHECK));
831  ThreadOffset helper = needs_range_check
832      ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pAputObjectWithNullAndBoundCheck)
833                          : QUICK_ENTRYPOINT_OFFSET(pAputObjectWithBoundCheck))
834      : QUICK_ENTRYPOINT_OFFSET(pAputObject);
835  CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true);
836}
837
838void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
839  RegLocation rl_method = LoadCurrMethod();
840  RegStorage res_reg = AllocTemp();
841  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
842  if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
843                                                   *cu_->dex_file,
844                                                   type_idx)) {
845    // Call out to helper which resolves type and verifies access.
846    // Resolved type returned in kRet0.
847    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
848                            type_idx, rl_method.reg, true);
849    RegLocation rl_result = GetReturn(false);
850    StoreValue(rl_dest, rl_result);
851  } else {
852    // We're don't need access checks, load type from dex cache
853    int32_t dex_cache_offset =
854        mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
855    LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg);
856    int32_t offset_of_type =
857        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
858                          * type_idx);
859    LoadWordDisp(res_reg, offset_of_type, rl_result.reg);
860    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
861        type_idx) || SLOW_TYPE_PATH) {
862      // Slow path, at runtime test if type is null and if so initialize
863      FlushAllRegs();
864      LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL);
865      LIR* cont = NewLIR0(kPseudoTargetLabel);
866
867      // Object to generate the slow path for class resolution.
868      class SlowPath : public LIRSlowPath {
869       public:
870        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
871                 const RegLocation& rl_method, const RegLocation& rl_result) :
872                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
873                   rl_method_(rl_method), rl_result_(rl_result) {
874        }
875
876        void Compile() {
877          GenerateTargetLabel();
878
879          m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_,
880                                        rl_method_.reg, true);
881          m2l_->OpRegCopy(rl_result_.reg,  m2l_->TargetReg(kRet0));
882
883          m2l_->OpUnconditionalBranch(cont_);
884        }
885
886       private:
887        const int type_idx_;
888        const RegLocation rl_method_;
889        const RegLocation rl_result_;
890      };
891
892      // Add to list for future.
893      AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result));
894
895      StoreValue(rl_dest, rl_result);
896     } else {
897      // Fast path, we're done - just store result
898      StoreValue(rl_dest, rl_result);
899    }
900  }
901}
902
903void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
904  /* NOTE: Most strings should be available at compile time */
905  int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
906                 (sizeof(mirror::String*) * string_idx);
907  if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
908      *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
909    // slow path, resolve string if not in dex cache
910    FlushAllRegs();
911    LockCallTemps();  // Using explicit registers
912
913    // If the Method* is already in a register, we can save a copy.
914    RegLocation rl_method = mir_graph_->GetMethodLoc();
915    RegStorage r_method;
916    if (rl_method.location == kLocPhysReg) {
917      // A temp would conflict with register use below.
918      DCHECK(!IsTemp(rl_method.reg));
919      r_method = rl_method.reg;
920    } else {
921      r_method = TargetReg(kArg2);
922      LoadCurrMethodDirect(r_method);
923    }
924    LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(),
925                 TargetReg(kArg0));
926
927    // Might call out to helper, which will return resolved string in kRet0
928    LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
929    if (cu_->instruction_set == kThumb2 ||
930        cu_->instruction_set == kMips) {
931      //  OpRegImm(kOpCmp, TargetReg(kRet0), 0);  // Is resolved?
932      LoadConstant(TargetReg(kArg1), string_idx);
933      LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL);
934      LIR* cont = NewLIR0(kPseudoTargetLabel);
935      GenBarrier();
936
937      // Object to generate the slow path for string resolution.
938      class SlowPath : public LIRSlowPath {
939       public:
940        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method) :
941          LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) {
942        }
943
944        void Compile() {
945          GenerateTargetLabel();
946
947          RegStorage r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString));
948
949          m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_);   // .eq
950          LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt);
951          m2l_->MarkSafepointPC(call_inst);
952          m2l_->FreeTemp(r_tgt);
953
954          m2l_->OpUnconditionalBranch(cont_);
955        }
956
957       private:
958         RegStorage r_method_;
959      };
960
961      // Add to list for future.
962      AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method));
963    } else {
964      DCHECK_EQ(cu_->instruction_set, kX86);
965      LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
966      LoadConstant(TargetReg(kArg1), string_idx);
967      CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), r_method, TargetReg(kArg1),
968                              true);
969      LIR* target = NewLIR0(kPseudoTargetLabel);
970      branch->target = target;
971    }
972    GenBarrier();
973    StoreValue(rl_dest, GetReturn(false));
974  } else {
975    RegLocation rl_method = LoadCurrMethod();
976    RegStorage res_reg = AllocTemp();
977    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
978    LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
979    LoadWordDisp(res_reg, offset_of_string, rl_result.reg);
980    StoreValue(rl_dest, rl_result);
981  }
982}
983
984/*
985 * Let helper function take care of everything.  Will
986 * call Class::NewInstanceFromCode(type_idx, method);
987 */
988void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
989  FlushAllRegs();  /* Everything to home location */
990  // alloc will always check for resolution, do we also need to verify
991  // access because the verifier was unable to?
992  ThreadOffset func_offset(-1);
993  const DexFile* dex_file = cu_->dex_file;
994  CompilerDriver* driver = cu_->compiler_driver;
995  if (driver->CanAccessInstantiableTypeWithoutChecks(
996      cu_->method_idx, *dex_file, type_idx)) {
997    bool is_type_initialized;
998    bool use_direct_type_ptr;
999    uintptr_t direct_type_ptr;
1000    if (kEmbedClassInCode &&
1001        driver->CanEmbedTypeInCode(*dex_file, type_idx,
1002                                   &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) {
1003      // The fast path.
1004      if (!use_direct_type_ptr) {
1005        LoadClassType(type_idx, kArg0);
1006        if (!is_type_initialized) {
1007          func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectResolved);
1008          CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true);
1009        } else {
1010          func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectInitialized);
1011          CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true);
1012        }
1013      } else {
1014        // Use the direct pointer.
1015        if (!is_type_initialized) {
1016          func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectResolved);
1017          CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
1018        } else {
1019          func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectInitialized);
1020          CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
1021        }
1022      }
1023    } else {
1024      // The slow path.
1025      DCHECK_EQ(func_offset.Int32Value(), -1);
1026      func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject);
1027      CallRuntimeHelperImmMethod(func_offset, type_idx, true);
1028    }
1029    DCHECK_NE(func_offset.Int32Value(), -1);
1030  } else {
1031    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck);
1032    CallRuntimeHelperImmMethod(func_offset, type_idx, true);
1033  }
1034  RegLocation rl_result = GetReturn(false);
1035  StoreValue(rl_dest, rl_result);
1036}
1037
1038void Mir2Lir::GenThrow(RegLocation rl_src) {
1039  FlushAllRegs();
1040  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
1041}
1042
1043// For final classes there are no sub-classes to check and so we can answer the instance-of
1044// question with simple comparisons.
1045void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
1046                                 RegLocation rl_src) {
1047  // X86 has its own implementation.
1048  DCHECK_NE(cu_->instruction_set, kX86);
1049
1050  RegLocation object = LoadValue(rl_src, kCoreReg);
1051  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1052  RegStorage result_reg = rl_result.reg;
1053  if (result_reg == object.reg) {
1054    result_reg = AllocTypedTemp(false, kCoreReg);
1055  }
1056  LoadConstant(result_reg, 0);     // assume false
1057  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
1058
1059  RegStorage check_class = AllocTypedTemp(false, kCoreReg);
1060  RegStorage object_class = AllocTypedTemp(false, kCoreReg);
1061
1062  LoadCurrMethodDirect(check_class);
1063  if (use_declaring_class) {
1064    LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
1065    LoadWordDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
1066  } else {
1067    LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1068                 check_class);
1069    LoadWordDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
1070    int32_t offset_of_type =
1071      mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
1072      (sizeof(mirror::Class*) * type_idx);
1073    LoadWordDisp(check_class, offset_of_type, check_class);
1074  }
1075
1076  LIR* ne_branchover = NULL;
1077  if (cu_->instruction_set == kThumb2) {
1078    OpRegReg(kOpCmp, check_class, object_class);  // Same?
1079    OpIT(kCondEq, "");   // if-convert the test
1080    LoadConstant(result_reg, 1);     // .eq case - load true
1081  } else {
1082    ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
1083    LoadConstant(result_reg, 1);     // eq case - load true
1084  }
1085  LIR* target = NewLIR0(kPseudoTargetLabel);
1086  null_branchover->target = target;
1087  if (ne_branchover != NULL) {
1088    ne_branchover->target = target;
1089  }
1090  FreeTemp(object_class);
1091  FreeTemp(check_class);
1092  if (IsTemp(result_reg)) {
1093    OpRegCopy(rl_result.reg, result_reg);
1094    FreeTemp(result_reg);
1095  }
1096  StoreValue(rl_dest, rl_result);
1097}
1098
1099void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
1100                                         bool type_known_abstract, bool use_declaring_class,
1101                                         bool can_assume_type_is_in_dex_cache,
1102                                         uint32_t type_idx, RegLocation rl_dest,
1103                                         RegLocation rl_src) {
1104  // X86 has its own implementation.
1105  DCHECK_NE(cu_->instruction_set, kX86);
1106
1107  FlushAllRegs();
1108  // May generate a call - use explicit registers
1109  LockCallTemps();
1110  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
1111  RegStorage class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
1112  if (needs_access_check) {
1113    // Check we have access to type_idx and if not throw IllegalAccessError,
1114    // returns Class* in kArg0
1115    CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
1116                         type_idx, true);
1117    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
1118    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1119  } else if (use_declaring_class) {
1120    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1121    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
1122                 class_reg);
1123  } else {
1124    // Load dex cache entry into class_reg (kArg2)
1125    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1126    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1127                 class_reg);
1128    int32_t offset_of_type =
1129        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
1130        * type_idx);
1131    LoadWordDisp(class_reg, offset_of_type, class_reg);
1132    if (!can_assume_type_is_in_dex_cache) {
1133      // Need to test presence of type in dex cache at runtime
1134      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
1135      // Not resolved
1136      // Call out to helper, which will return resolved type in kRet0
1137      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true);
1138      OpRegCopy(TargetReg(kArg2), TargetReg(kRet0));  // Align usage with fast path
1139      LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
1140      // Rejoin code paths
1141      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
1142      hop_branch->target = hop_target;
1143    }
1144  }
1145  /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
1146  RegLocation rl_result = GetReturn(false);
1147  if (cu_->instruction_set == kMips) {
1148    // On MIPS rArg0 != rl_result, place false in result if branch is taken.
1149    LoadConstant(rl_result.reg, 0);
1150  }
1151  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
1152
1153  /* load object->klass_ */
1154  DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1155  LoadWordDisp(TargetReg(kArg0),  mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
1156  /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
1157  LIR* branchover = NULL;
1158  if (type_known_final) {
1159    // rl_result == ref == null == 0.
1160    if (cu_->instruction_set == kThumb2) {
1161      OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
1162      OpIT(kCondEq, "E");   // if-convert the test
1163      LoadConstant(rl_result.reg, 1);     // .eq case - load true
1164      LoadConstant(rl_result.reg, 0);     // .ne case - load false
1165    } else {
1166      LoadConstant(rl_result.reg, 0);     // ne case - load false
1167      branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
1168      LoadConstant(rl_result.reg, 1);     // eq case - load true
1169    }
1170  } else {
1171    if (cu_->instruction_set == kThumb2) {
1172      RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
1173      if (!type_known_abstract) {
1174      /* Uses conditional nullification */
1175        OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
1176        OpIT(kCondEq, "EE");   // if-convert the test
1177        LoadConstant(TargetReg(kArg0), 1);     // .eq case - load true
1178      }
1179      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
1180      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
1181      FreeTemp(r_tgt);
1182    } else {
1183      if (!type_known_abstract) {
1184        /* Uses branchovers */
1185        LoadConstant(rl_result.reg, 1);     // assume true
1186        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
1187      }
1188      RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
1189      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
1190      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
1191      FreeTemp(r_tgt);
1192    }
1193  }
1194  // TODO: only clobber when type isn't final?
1195  ClobberCallerSave();
1196  /* branch targets here */
1197  LIR* target = NewLIR0(kPseudoTargetLabel);
1198  StoreValue(rl_dest, rl_result);
1199  branch1->target = target;
1200  if (branchover != NULL) {
1201    branchover->target = target;
1202  }
1203}
1204
1205void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
1206  bool type_known_final, type_known_abstract, use_declaring_class;
1207  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1208                                                                              *cu_->dex_file,
1209                                                                              type_idx,
1210                                                                              &type_known_final,
1211                                                                              &type_known_abstract,
1212                                                                              &use_declaring_class);
1213  bool can_assume_type_is_in_dex_cache = !needs_access_check &&
1214      cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
1215
1216  if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
1217    GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
1218  } else {
1219    GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
1220                               use_declaring_class, can_assume_type_is_in_dex_cache,
1221                               type_idx, rl_dest, rl_src);
1222  }
1223}
1224
1225void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) {
1226  bool type_known_final, type_known_abstract, use_declaring_class;
1227  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1228                                                                              *cu_->dex_file,
1229                                                                              type_idx,
1230                                                                              &type_known_final,
1231                                                                              &type_known_abstract,
1232                                                                              &use_declaring_class);
1233  // Note: currently type_known_final is unused, as optimizing will only improve the performance
1234  // of the exception throw path.
1235  DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
1236  if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) {
1237    // Verifier type analysis proved this check cast would never cause an exception.
1238    return;
1239  }
1240  FlushAllRegs();
1241  // May generate a call - use explicit registers
1242  LockCallTemps();
1243  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
1244  RegStorage class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
1245  if (needs_access_check) {
1246    // Check we have access to type_idx and if not throw IllegalAccessError,
1247    // returns Class* in kRet0
1248    // InitializeTypeAndVerifyAccess(idx, method)
1249    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
1250                            type_idx, TargetReg(kArg1), true);
1251    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
1252  } else if (use_declaring_class) {
1253    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
1254                 class_reg);
1255  } else {
1256    // Load dex cache entry into class_reg (kArg2)
1257    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1258                 class_reg);
1259    int32_t offset_of_type =
1260        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
1261        (sizeof(mirror::Class*) * type_idx);
1262    LoadWordDisp(class_reg, offset_of_type, class_reg);
1263    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
1264      // Need to test presence of type in dex cache at runtime
1265      LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL);
1266      LIR* cont = NewLIR0(kPseudoTargetLabel);
1267
1268      // Slow path to initialize the type.  Executed if the type is NULL.
1269      class SlowPath : public LIRSlowPath {
1270       public:
1271        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
1272                 const RegStorage class_reg) :
1273                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
1274                   class_reg_(class_reg) {
1275        }
1276
1277        void Compile() {
1278          GenerateTargetLabel();
1279
1280          // Call out to helper, which will return resolved type in kArg0
1281          // InitializeTypeFromCode(idx, method)
1282          m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_,
1283                                        m2l_->TargetReg(kArg1), true);
1284          m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0));  // Align usage with fast path
1285          m2l_->OpUnconditionalBranch(cont_);
1286        }
1287       public:
1288        const int type_idx_;
1289        const RegStorage class_reg_;
1290      };
1291
1292      AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg));
1293    }
1294  }
1295  // At this point, class_reg (kArg2) has class
1296  LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1297
1298  // Slow path for the case where the classes are not equal.  In this case we need
1299  // to call a helper function to do the check.
1300  class SlowPath : public LIRSlowPath {
1301   public:
1302    SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load):
1303               LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) {
1304    }
1305
1306    void Compile() {
1307      GenerateTargetLabel();
1308
1309      if (load_) {
1310        m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
1311                           m2l_->TargetReg(kArg1));
1312      }
1313      m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCast), m2l_->TargetReg(kArg2),
1314                                    m2l_->TargetReg(kArg1), true);
1315
1316      m2l_->OpUnconditionalBranch(cont_);
1317    }
1318
1319   private:
1320    bool load_;
1321  };
1322
1323  if (type_known_abstract) {
1324    // Easier case, run slow path if target is non-null (slow path will load from target)
1325    LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL);
1326    LIR* cont = NewLIR0(kPseudoTargetLabel);
1327    AddSlowPath(new (arena_) SlowPath(this, branch, cont, true));
1328  } else {
1329    // Harder, more common case.  We need to generate a forward branch over the load
1330    // if the target is null.  If it's non-null we perform the load and branch to the
1331    // slow path if the classes are not equal.
1332
1333    /* Null is OK - continue */
1334    LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
1335    /* load object->klass_ */
1336    DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1337    LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
1338
1339    LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL);
1340    LIR* cont = NewLIR0(kPseudoTargetLabel);
1341
1342    // Add the slow path that will not perform load since this is already done.
1343    AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false));
1344
1345    // Set the null check to branch to the continuation.
1346    branch1->target = cont;
1347  }
1348}
1349
1350void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
1351                           RegLocation rl_src1, RegLocation rl_src2) {
1352  RegLocation rl_result;
1353  if (cu_->instruction_set == kThumb2) {
1354    /*
1355     * NOTE:  This is the one place in the code in which we might have
1356     * as many as six live temporary registers.  There are 5 in the normal
1357     * set for Arm.  Until we have spill capabilities, temporarily add
1358     * lr to the temp set.  It is safe to do this locally, but note that
1359     * lr is used explicitly elsewhere in the code generator and cannot
1360     * normally be used as a general temp register.
1361     */
1362    MarkTemp(TargetReg(kLr));   // Add lr to the temp pool
1363    FreeTemp(TargetReg(kLr));   // and make it available
1364  }
1365  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1366  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1367  rl_result = EvalLoc(rl_dest, kCoreReg, true);
1368  // The longs may overlap - use intermediate temp if so
1369  if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) {
1370    RegStorage t_reg = AllocTemp();
1371    OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
1372    OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
1373    OpRegCopy(rl_result.reg.GetLow(), t_reg);
1374    FreeTemp(t_reg);
1375  } else {
1376    OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
1377    OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
1378  }
1379  /*
1380   * NOTE: If rl_dest refers to a frame variable in a large frame, the
1381   * following StoreValueWide might need to allocate a temp register.
1382   * To further work around the lack of a spill capability, explicitly
1383   * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
1384   * Remove when spill is functional.
1385   */
1386  FreeRegLocTemps(rl_result, rl_src1);
1387  FreeRegLocTemps(rl_result, rl_src2);
1388  StoreValueWide(rl_dest, rl_result);
1389  if (cu_->instruction_set == kThumb2) {
1390    Clobber(TargetReg(kLr));
1391    UnmarkTemp(TargetReg(kLr));  // Remove lr from the temp pool
1392  }
1393}
1394
1395
1396void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
1397                             RegLocation rl_src1, RegLocation rl_shift) {
1398  ThreadOffset func_offset(-1);
1399
1400  switch (opcode) {
1401    case Instruction::SHL_LONG:
1402    case Instruction::SHL_LONG_2ADDR:
1403      func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong);
1404      break;
1405    case Instruction::SHR_LONG:
1406    case Instruction::SHR_LONG_2ADDR:
1407      func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong);
1408      break;
1409    case Instruction::USHR_LONG:
1410    case Instruction::USHR_LONG_2ADDR:
1411      func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong);
1412      break;
1413    default:
1414      LOG(FATAL) << "Unexpected case";
1415  }
1416  FlushAllRegs();   /* Send everything to home location */
1417  CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
1418  RegLocation rl_result = GetReturnWide(false);
1419  StoreValueWide(rl_dest, rl_result);
1420}
1421
1422
1423void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
1424                            RegLocation rl_src1, RegLocation rl_src2) {
1425  DCHECK_NE(cu_->instruction_set, kX86);
1426  OpKind op = kOpBkpt;
1427  bool is_div_rem = false;
1428  bool check_zero = false;
1429  bool unary = false;
1430  RegLocation rl_result;
1431  bool shift_op = false;
1432  switch (opcode) {
1433    case Instruction::NEG_INT:
1434      op = kOpNeg;
1435      unary = true;
1436      break;
1437    case Instruction::NOT_INT:
1438      op = kOpMvn;
1439      unary = true;
1440      break;
1441    case Instruction::ADD_INT:
1442    case Instruction::ADD_INT_2ADDR:
1443      op = kOpAdd;
1444      break;
1445    case Instruction::SUB_INT:
1446    case Instruction::SUB_INT_2ADDR:
1447      op = kOpSub;
1448      break;
1449    case Instruction::MUL_INT:
1450    case Instruction::MUL_INT_2ADDR:
1451      op = kOpMul;
1452      break;
1453    case Instruction::DIV_INT:
1454    case Instruction::DIV_INT_2ADDR:
1455      check_zero = true;
1456      op = kOpDiv;
1457      is_div_rem = true;
1458      break;
1459    /* NOTE: returns in kArg1 */
1460    case Instruction::REM_INT:
1461    case Instruction::REM_INT_2ADDR:
1462      check_zero = true;
1463      op = kOpRem;
1464      is_div_rem = true;
1465      break;
1466    case Instruction::AND_INT:
1467    case Instruction::AND_INT_2ADDR:
1468      op = kOpAnd;
1469      break;
1470    case Instruction::OR_INT:
1471    case Instruction::OR_INT_2ADDR:
1472      op = kOpOr;
1473      break;
1474    case Instruction::XOR_INT:
1475    case Instruction::XOR_INT_2ADDR:
1476      op = kOpXor;
1477      break;
1478    case Instruction::SHL_INT:
1479    case Instruction::SHL_INT_2ADDR:
1480      shift_op = true;
1481      op = kOpLsl;
1482      break;
1483    case Instruction::SHR_INT:
1484    case Instruction::SHR_INT_2ADDR:
1485      shift_op = true;
1486      op = kOpAsr;
1487      break;
1488    case Instruction::USHR_INT:
1489    case Instruction::USHR_INT_2ADDR:
1490      shift_op = true;
1491      op = kOpLsr;
1492      break;
1493    default:
1494      LOG(FATAL) << "Invalid word arith op: " << opcode;
1495  }
1496  if (!is_div_rem) {
1497    if (unary) {
1498      rl_src1 = LoadValue(rl_src1, kCoreReg);
1499      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1500      OpRegReg(op, rl_result.reg, rl_src1.reg);
1501    } else {
1502      if (shift_op) {
1503        rl_src2 = LoadValue(rl_src2, kCoreReg);
1504        RegStorage t_reg = AllocTemp();
1505        OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31);
1506        rl_src1 = LoadValue(rl_src1, kCoreReg);
1507        rl_result = EvalLoc(rl_dest, kCoreReg, true);
1508        OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg);
1509        FreeTemp(t_reg);
1510      } else {
1511        rl_src1 = LoadValue(rl_src1, kCoreReg);
1512        rl_src2 = LoadValue(rl_src2, kCoreReg);
1513        rl_result = EvalLoc(rl_dest, kCoreReg, true);
1514        OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
1515      }
1516    }
1517    StoreValue(rl_dest, rl_result);
1518  } else {
1519    bool done = false;      // Set to true if we happen to find a way to use a real instruction.
1520    if (cu_->instruction_set == kMips) {
1521      rl_src1 = LoadValue(rl_src1, kCoreReg);
1522      rl_src2 = LoadValue(rl_src2, kCoreReg);
1523      if (check_zero) {
1524          GenImmedCheck(kCondEq, rl_src2.reg, 0, kThrowDivZero);
1525      }
1526      rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
1527      done = true;
1528    } else if (cu_->instruction_set == kThumb2) {
1529      if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
1530        // Use ARM SDIV instruction for division.  For remainder we also need to
1531        // calculate using a MUL and subtract.
1532        rl_src1 = LoadValue(rl_src1, kCoreReg);
1533        rl_src2 = LoadValue(rl_src2, kCoreReg);
1534        if (check_zero) {
1535            GenImmedCheck(kCondEq, rl_src2.reg, 0, kThrowDivZero);
1536        }
1537        rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
1538        done = true;
1539      }
1540    }
1541
1542    // If we haven't already generated the code use the callout function.
1543    if (!done) {
1544      ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
1545      FlushAllRegs();   /* Send everything to home location */
1546      LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
1547      RegStorage r_tgt = CallHelperSetup(func_offset);
1548      LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
1549      if (check_zero) {
1550        GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
1551      }
1552      // NOTE: callout here is not a safepoint.
1553      CallHelper(r_tgt, func_offset, false /* not a safepoint */);
1554      if (op == kOpDiv)
1555        rl_result = GetReturn(false);
1556      else
1557        rl_result = GetReturnAlt();
1558    }
1559    StoreValue(rl_dest, rl_result);
1560  }
1561}
1562
1563/*
1564 * The following are the first-level codegen routines that analyze the format
1565 * of each bytecode then either dispatch special purpose codegen routines
1566 * or produce corresponding Thumb instructions directly.
1567 */
1568
1569// Returns true if no more than two bits are set in 'x'.
1570static bool IsPopCountLE2(unsigned int x) {
1571  x &= x - 1;
1572  return (x & (x - 1)) == 0;
1573}
1574
1575// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
1576// and store the result in 'rl_dest'.
1577bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
1578                               RegLocation rl_src, RegLocation rl_dest, int lit) {
1579  if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
1580    return false;
1581  }
1582  // No divide instruction for Arm, so check for more special cases
1583  if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
1584    return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
1585  }
1586  int k = LowestSetBit(lit);
1587  if (k >= 30) {
1588    // Avoid special cases.
1589    return false;
1590  }
1591  rl_src = LoadValue(rl_src, kCoreReg);
1592  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1593  if (is_div) {
1594    RegStorage t_reg = AllocTemp();
1595    if (lit == 2) {
1596      // Division by 2 is by far the most common division by constant.
1597      OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k);
1598      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
1599      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
1600    } else {
1601      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
1602      OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
1603      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
1604      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
1605    }
1606  } else {
1607    RegStorage t_reg1 = AllocTemp();
1608    RegStorage t_reg2 = AllocTemp();
1609    if (lit == 2) {
1610      OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k);
1611      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
1612      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
1613      OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
1614    } else {
1615      OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31);
1616      OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
1617      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
1618      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
1619      OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
1620    }
1621  }
1622  StoreValue(rl_dest, rl_result);
1623  return true;
1624}
1625
1626// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
1627// and store the result in 'rl_dest'.
1628bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
1629  if (lit < 0) {
1630    return false;
1631  }
1632  if (lit == 0) {
1633    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1634    LoadConstant(rl_result.reg, 0);
1635    StoreValue(rl_dest, rl_result);
1636    return true;
1637  }
1638  if (lit == 1) {
1639    rl_src = LoadValue(rl_src, kCoreReg);
1640    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1641    OpRegCopy(rl_result.reg, rl_src.reg);
1642    StoreValue(rl_dest, rl_result);
1643    return true;
1644  }
1645  // There is RegRegRegShift on Arm, so check for more special cases.
1646  // TODO: disabled, need to handle case of "dest == src" properly.
1647  if (false && cu_->instruction_set == kThumb2) {
1648    return EasyMultiply(rl_src, rl_dest, lit);
1649  }
1650  // Can we simplify this multiplication?
1651  bool power_of_two = false;
1652  bool pop_count_le2 = false;
1653  bool power_of_two_minus_one = false;
1654  if (IsPowerOfTwo(lit)) {
1655    power_of_two = true;
1656  } else if (IsPopCountLE2(lit)) {
1657    pop_count_le2 = true;
1658  } else if (IsPowerOfTwo(lit + 1)) {
1659    power_of_two_minus_one = true;
1660  } else {
1661    return false;
1662  }
1663  rl_src = LoadValue(rl_src, kCoreReg);
1664  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1665  if (power_of_two) {
1666    // Shift.
1667    OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit));
1668  } else if (pop_count_le2) {
1669    // Shift and add and shift.
1670    int first_bit = LowestSetBit(lit);
1671    int second_bit = LowestSetBit(lit ^ (1 << first_bit));
1672    GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
1673  } else {
1674    // Reverse subtract: (src << (shift + 1)) - src.
1675    DCHECK(power_of_two_minus_one);
1676    // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
1677    RegStorage t_reg = AllocTemp();
1678    OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1));
1679    OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg);
1680  }
1681  StoreValue(rl_dest, rl_result);
1682  return true;
1683}
1684
1685void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
1686                               int lit) {
1687  RegLocation rl_result;
1688  OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
1689  int shift_op = false;
1690  bool is_div = false;
1691
1692  switch (opcode) {
1693    case Instruction::RSUB_INT_LIT8:
1694    case Instruction::RSUB_INT: {
1695      rl_src = LoadValue(rl_src, kCoreReg);
1696      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1697      if (cu_->instruction_set == kThumb2) {
1698        OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit);
1699      } else {
1700        OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
1701        OpRegImm(kOpAdd, rl_result.reg, lit);
1702      }
1703      StoreValue(rl_dest, rl_result);
1704      return;
1705    }
1706
1707    case Instruction::SUB_INT:
1708    case Instruction::SUB_INT_2ADDR:
1709      lit = -lit;
1710      // Intended fallthrough
1711    case Instruction::ADD_INT:
1712    case Instruction::ADD_INT_2ADDR:
1713    case Instruction::ADD_INT_LIT8:
1714    case Instruction::ADD_INT_LIT16:
1715      op = kOpAdd;
1716      break;
1717    case Instruction::MUL_INT:
1718    case Instruction::MUL_INT_2ADDR:
1719    case Instruction::MUL_INT_LIT8:
1720    case Instruction::MUL_INT_LIT16: {
1721      if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
1722        return;
1723      }
1724      op = kOpMul;
1725      break;
1726    }
1727    case Instruction::AND_INT:
1728    case Instruction::AND_INT_2ADDR:
1729    case Instruction::AND_INT_LIT8:
1730    case Instruction::AND_INT_LIT16:
1731      op = kOpAnd;
1732      break;
1733    case Instruction::OR_INT:
1734    case Instruction::OR_INT_2ADDR:
1735    case Instruction::OR_INT_LIT8:
1736    case Instruction::OR_INT_LIT16:
1737      op = kOpOr;
1738      break;
1739    case Instruction::XOR_INT:
1740    case Instruction::XOR_INT_2ADDR:
1741    case Instruction::XOR_INT_LIT8:
1742    case Instruction::XOR_INT_LIT16:
1743      op = kOpXor;
1744      break;
1745    case Instruction::SHL_INT_LIT8:
1746    case Instruction::SHL_INT:
1747    case Instruction::SHL_INT_2ADDR:
1748      lit &= 31;
1749      shift_op = true;
1750      op = kOpLsl;
1751      break;
1752    case Instruction::SHR_INT_LIT8:
1753    case Instruction::SHR_INT:
1754    case Instruction::SHR_INT_2ADDR:
1755      lit &= 31;
1756      shift_op = true;
1757      op = kOpAsr;
1758      break;
1759    case Instruction::USHR_INT_LIT8:
1760    case Instruction::USHR_INT:
1761    case Instruction::USHR_INT_2ADDR:
1762      lit &= 31;
1763      shift_op = true;
1764      op = kOpLsr;
1765      break;
1766
1767    case Instruction::DIV_INT:
1768    case Instruction::DIV_INT_2ADDR:
1769    case Instruction::DIV_INT_LIT8:
1770    case Instruction::DIV_INT_LIT16:
1771    case Instruction::REM_INT:
1772    case Instruction::REM_INT_2ADDR:
1773    case Instruction::REM_INT_LIT8:
1774    case Instruction::REM_INT_LIT16: {
1775      if (lit == 0) {
1776        GenImmedCheck(kCondAl, RegStorage::InvalidReg(), 0, kThrowDivZero);
1777        return;
1778      }
1779      if ((opcode == Instruction::DIV_INT) ||
1780          (opcode == Instruction::DIV_INT_2ADDR) ||
1781          (opcode == Instruction::DIV_INT_LIT8) ||
1782          (opcode == Instruction::DIV_INT_LIT16)) {
1783        is_div = true;
1784      } else {
1785        is_div = false;
1786      }
1787      if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) {
1788        return;
1789      }
1790
1791      bool done = false;
1792      if (cu_->instruction_set == kMips) {
1793        rl_src = LoadValue(rl_src, kCoreReg);
1794        rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
1795        done = true;
1796      } else if (cu_->instruction_set == kX86) {
1797        rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
1798        done = true;
1799      } else if (cu_->instruction_set == kThumb2) {
1800        if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
1801          // Use ARM SDIV instruction for division.  For remainder we also need to
1802          // calculate using a MUL and subtract.
1803          rl_src = LoadValue(rl_src, kCoreReg);
1804          rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
1805          done = true;
1806        }
1807      }
1808
1809      if (!done) {
1810        FlushAllRegs();   /* Everything to home location. */
1811        LoadValueDirectFixed(rl_src, TargetReg(kArg0));
1812        Clobber(TargetReg(kArg0));
1813        ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
1814        CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
1815        if (is_div)
1816          rl_result = GetReturn(false);
1817        else
1818          rl_result = GetReturnAlt();
1819      }
1820      StoreValue(rl_dest, rl_result);
1821      return;
1822    }
1823    default:
1824      LOG(FATAL) << "Unexpected opcode " << opcode;
1825  }
1826  rl_src = LoadValue(rl_src, kCoreReg);
1827  rl_result = EvalLoc(rl_dest, kCoreReg, true);
1828  // Avoid shifts by literal 0 - no support in Thumb.  Change to copy.
1829  if (shift_op && (lit == 0)) {
1830    OpRegCopy(rl_result.reg, rl_src.reg);
1831  } else {
1832    OpRegRegImm(op, rl_result.reg, rl_src.reg, lit);
1833  }
1834  StoreValue(rl_dest, rl_result);
1835}
1836
1837void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
1838                             RegLocation rl_src1, RegLocation rl_src2) {
1839  RegLocation rl_result;
1840  OpKind first_op = kOpBkpt;
1841  OpKind second_op = kOpBkpt;
1842  bool call_out = false;
1843  bool check_zero = false;
1844  ThreadOffset func_offset(-1);
1845  int ret_reg = TargetReg(kRet0).GetReg();
1846
1847  switch (opcode) {
1848    case Instruction::NOT_LONG:
1849      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1850      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1851      // Check for destructive overlap
1852      if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) {
1853        RegStorage t_reg = AllocTemp();
1854        OpRegCopy(t_reg, rl_src2.reg.GetHigh());
1855        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
1856        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
1857        FreeTemp(t_reg);
1858      } else {
1859        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
1860        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
1861      }
1862      StoreValueWide(rl_dest, rl_result);
1863      return;
1864    case Instruction::ADD_LONG:
1865    case Instruction::ADD_LONG_2ADDR:
1866      if (cu_->instruction_set != kThumb2) {
1867        GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
1868        return;
1869      }
1870      first_op = kOpAdd;
1871      second_op = kOpAdc;
1872      break;
1873    case Instruction::SUB_LONG:
1874    case Instruction::SUB_LONG_2ADDR:
1875      if (cu_->instruction_set != kThumb2) {
1876        GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
1877        return;
1878      }
1879      first_op = kOpSub;
1880      second_op = kOpSbc;
1881      break;
1882    case Instruction::MUL_LONG:
1883    case Instruction::MUL_LONG_2ADDR:
1884      if (cu_->instruction_set != kMips) {
1885        GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
1886        return;
1887      } else {
1888        call_out = true;
1889        ret_reg = TargetReg(kRet0).GetReg();
1890        func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
1891      }
1892      break;
1893    case Instruction::DIV_LONG:
1894    case Instruction::DIV_LONG_2ADDR:
1895      call_out = true;
1896      check_zero = true;
1897      ret_reg = TargetReg(kRet0).GetReg();
1898      func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv);
1899      break;
1900    case Instruction::REM_LONG:
1901    case Instruction::REM_LONG_2ADDR:
1902      call_out = true;
1903      check_zero = true;
1904      func_offset = QUICK_ENTRYPOINT_OFFSET(pLmod);
1905      /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
1906      ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2).GetReg() : TargetReg(kRet0).GetReg();
1907      break;
1908    case Instruction::AND_LONG_2ADDR:
1909    case Instruction::AND_LONG:
1910      if (cu_->instruction_set == kX86) {
1911        return GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
1912      }
1913      first_op = kOpAnd;
1914      second_op = kOpAnd;
1915      break;
1916    case Instruction::OR_LONG:
1917    case Instruction::OR_LONG_2ADDR:
1918      if (cu_->instruction_set == kX86) {
1919        GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
1920        return;
1921      }
1922      first_op = kOpOr;
1923      second_op = kOpOr;
1924      break;
1925    case Instruction::XOR_LONG:
1926    case Instruction::XOR_LONG_2ADDR:
1927      if (cu_->instruction_set == kX86) {
1928        GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
1929        return;
1930      }
1931      first_op = kOpXor;
1932      second_op = kOpXor;
1933      break;
1934    case Instruction::NEG_LONG: {
1935      GenNegLong(rl_dest, rl_src2);
1936      return;
1937    }
1938    default:
1939      LOG(FATAL) << "Invalid long arith op";
1940  }
1941  if (!call_out) {
1942    GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
1943  } else {
1944    FlushAllRegs();   /* Send everything to home location */
1945    if (check_zero) {
1946      RegStorage r_tmp1 = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
1947      RegStorage r_tmp2 = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
1948      LoadValueDirectWideFixed(rl_src2, r_tmp2);
1949      RegStorage r_tgt = CallHelperSetup(func_offset);
1950      GenDivZeroCheck(RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)));
1951      LoadValueDirectWideFixed(rl_src1, r_tmp1);
1952      // NOTE: callout here is not a safepoint
1953      CallHelper(r_tgt, func_offset, false /* not safepoint */);
1954    } else {
1955      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
1956    }
1957    // Adjust return regs in to handle case of rem returning kArg2/kArg3
1958    if (ret_reg == TargetReg(kRet0).GetReg())
1959      rl_result = GetReturnWide(false);
1960    else
1961      rl_result = GetReturnWideAlt();
1962    StoreValueWide(rl_dest, rl_result);
1963  }
1964}
1965
1966void Mir2Lir::GenConversionCall(ThreadOffset func_offset,
1967                                RegLocation rl_dest, RegLocation rl_src) {
1968  /*
1969   * Don't optimize the register usage since it calls out to support
1970   * functions
1971   */
1972  FlushAllRegs();   /* Send everything to home location */
1973  CallRuntimeHelperRegLocation(func_offset, rl_src, false);
1974  if (rl_dest.wide) {
1975    RegLocation rl_result;
1976    rl_result = GetReturnWide(rl_dest.fp);
1977    StoreValueWide(rl_dest, rl_result);
1978  } else {
1979    RegLocation rl_result;
1980    rl_result = GetReturn(rl_dest.fp);
1981    StoreValue(rl_dest, rl_result);
1982  }
1983}
1984
1985/* Check if we need to check for pending suspend request */
1986void Mir2Lir::GenSuspendTest(int opt_flags) {
1987  if (Runtime::Current()->ExplicitSuspendChecks()) {
1988    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
1989      return;
1990    }
1991    FlushAllRegs();
1992    LIR* branch = OpTestSuspend(NULL);
1993    LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
1994    LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
1995                         current_dalvik_offset_);
1996    branch->target = target;
1997    suspend_launchpads_.Insert(target);
1998  } else {
1999    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2000      return;
2001    }
2002    FlushAllRegs();     // TODO: needed?
2003    LIR* inst = CheckSuspendUsingLoad();
2004    MarkSafepointPC(inst);
2005  }
2006}
2007
2008/* Check if we need to check for pending suspend request */
2009void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
2010  if (Runtime::Current()->ExplicitSuspendChecks()) {
2011    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2012      OpUnconditionalBranch(target);
2013      return;
2014    }
2015    OpTestSuspend(target);
2016    LIR* launch_pad =
2017        RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
2018               current_dalvik_offset_);
2019    FlushAllRegs();
2020    OpUnconditionalBranch(launch_pad);
2021    suspend_launchpads_.Insert(launch_pad);
2022  } else {
2023    // For the implicit suspend check, just perform the trigger
2024    // load and branch to the target.
2025    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2026      OpUnconditionalBranch(target);
2027      return;
2028    }
2029    FlushAllRegs();
2030    LIR* inst = CheckSuspendUsingLoad();
2031    MarkSafepointPC(inst);
2032    OpUnconditionalBranch(target);
2033  }
2034}
2035
2036/* Call out to helper assembly routine that will null check obj and then lock it. */
2037void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
2038  FlushAllRegs();
2039  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pLockObject), rl_src, true);
2040}
2041
2042/* Call out to helper assembly routine that will null check obj and then unlock it. */
2043void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
2044  FlushAllRegs();
2045  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pUnlockObject), rl_src, true);
2046}
2047
2048/* Generic code for generating a wide constant into a VR. */
2049void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
2050  RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
2051  LoadConstantWide(rl_result.reg, value);
2052  StoreValueWide(rl_dest, rl_result);
2053}
2054
2055}  // namespace art
2056