gen_common.cc revision e643a179cf5585ba6bafdd4fa51730d9f50c06f6
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include "dex/compiler_ir.h"
17#include "dex/compiler_internals.h"
18#include "dex/quick/arm/arm_lir.h"
19#include "dex/quick/mir_to_lir-inl.h"
20#include "entrypoints/quick/quick_entrypoints.h"
21#include "mirror/array.h"
22#include "mirror/object-inl.h"
23#include "verifier/method_verifier.h"
24#include <functional>
25
26namespace art {
27
28/*
29 * This source files contains "gen" codegen routines that should
30 * be applicable to most targets.  Only mid-level support utilities
31 * and "op" calls may be used here.
32 */
33
34/*
35 * Generate a kPseudoBarrier marker to indicate the boundary of special
36 * blocks.
37 */
38void Mir2Lir::GenBarrier() {
39  LIR* barrier = NewLIR0(kPseudoBarrier);
40  /* Mark all resources as being clobbered */
41  DCHECK(!barrier->flags.use_def_invalid);
42  barrier->u.m.def_mask = ENCODE_ALL;
43}
44
45LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind) {
46  LIR* tgt;
47  LIR* branch;
48  if (c_code == kCondAl) {
49    tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, RegStorage::kInvalidRegVal,
50                 imm_val);
51    branch = OpUnconditionalBranch(tgt);
52  } else {
53    tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg.GetReg(), imm_val);
54    branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
55  }
56  // Remember branch target - will process later
57  throw_launchpads_.Insert(tgt);
58  return branch;
59}
60
61void Mir2Lir::GenDivZeroException() {
62  LIR* branch = OpUnconditionalBranch(nullptr);
63  AddDivZeroCheckSlowPath(branch);
64}
65
66void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) {
67  LIR* branch = OpCondBranch(c_code, nullptr);
68  AddDivZeroCheckSlowPath(branch);
69}
70
71void Mir2Lir::GenDivZeroCheck(RegStorage reg) {
72  LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr);
73  AddDivZeroCheckSlowPath(branch);
74}
75
76void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) {
77  class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath {
78   public:
79    DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch)
80        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) {
81    }
82
83    void Compile() OVERRIDE {
84      m2l_->ResetRegPool();
85      m2l_->ResetDefTracking();
86      GenerateTargetLabel();
87      m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true);
88    }
89  };
90
91  AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch));
92}
93
94LIR* Mir2Lir::GenNullCheck(RegStorage reg) {
95  class NullCheckSlowPath : public Mir2Lir::LIRSlowPath {
96   public:
97    NullCheckSlowPath(Mir2Lir* m2l, LIR* branch)
98        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) {
99    }
100
101    void Compile() OVERRIDE {
102      m2l_->ResetRegPool();
103      m2l_->ResetDefTracking();
104      GenerateTargetLabel();
105      m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true);
106    }
107  };
108
109  LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr);
110  AddSlowPath(new (arena_) NullCheckSlowPath(this, branch));
111  return branch;
112}
113
114/* Perform null-check on a register.  */
115LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
116  if (Runtime::Current()->ExplicitNullChecks()) {
117    return GenExplicitNullCheck(m_reg, opt_flags);
118  }
119  return nullptr;
120}
121
122/* Perform an explicit null-check on a register.  */
123LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
124  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
125    return NULL;
126  }
127  return GenNullCheck(m_reg);
128}
129
130void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
131  if (!Runtime::Current()->ExplicitNullChecks()) {
132    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
133      return;
134    }
135    MarkSafepointPC(last_lir_insn_);
136  }
137}
138
139void Mir2Lir::MarkPossibleStackOverflowException() {
140  if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
141    MarkSafepointPC(last_lir_insn_);
142  }
143}
144
145void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
146  if (!Runtime::Current()->ExplicitNullChecks()) {
147    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
148      return;
149    }
150    // Force an implicit null check by performing a memory operation (load) from the given
151    // register with offset 0.  This will cause a signal if the register contains 0 (null).
152    RegStorage tmp = AllocTemp();
153    // TODO: for Mips, would be best to use rZERO as the bogus register target.
154    LIR* load = LoadWordDisp(reg, 0, tmp);
155    FreeTemp(tmp);
156    MarkSafepointPC(load);
157  }
158}
159
160/* Perform check on two registers */
161LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2,
162                             ThrowKind kind) {
163  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1.GetReg(),
164                    reg2.GetReg());
165  LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
166  // Remember branch target - will process later
167  throw_launchpads_.Insert(tgt);
168  return branch;
169}
170
171void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
172                                  RegLocation rl_src2, LIR* taken,
173                                  LIR* fall_through) {
174  ConditionCode cond;
175  switch (opcode) {
176    case Instruction::IF_EQ:
177      cond = kCondEq;
178      break;
179    case Instruction::IF_NE:
180      cond = kCondNe;
181      break;
182    case Instruction::IF_LT:
183      cond = kCondLt;
184      break;
185    case Instruction::IF_GE:
186      cond = kCondGe;
187      break;
188    case Instruction::IF_GT:
189      cond = kCondGt;
190      break;
191    case Instruction::IF_LE:
192      cond = kCondLe;
193      break;
194    default:
195      cond = static_cast<ConditionCode>(0);
196      LOG(FATAL) << "Unexpected opcode " << opcode;
197  }
198
199  // Normalize such that if either operand is constant, src2 will be constant
200  if (rl_src1.is_const) {
201    RegLocation rl_temp = rl_src1;
202    rl_src1 = rl_src2;
203    rl_src2 = rl_temp;
204    cond = FlipComparisonOrder(cond);
205  }
206
207  rl_src1 = LoadValue(rl_src1, kCoreReg);
208  // Is this really an immediate comparison?
209  if (rl_src2.is_const) {
210    // If it's already live in a register or not easily materialized, just keep going
211    RegLocation rl_temp = UpdateLoc(rl_src2);
212    if ((rl_temp.location == kLocDalvikFrame) &&
213        InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
214      // OK - convert this to a compare immediate and branch
215      OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
216      return;
217    }
218  }
219  rl_src2 = LoadValue(rl_src2, kCoreReg);
220  OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
221}
222
223void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
224                                      LIR* fall_through) {
225  ConditionCode cond;
226  rl_src = LoadValue(rl_src, kCoreReg);
227  switch (opcode) {
228    case Instruction::IF_EQZ:
229      cond = kCondEq;
230      break;
231    case Instruction::IF_NEZ:
232      cond = kCondNe;
233      break;
234    case Instruction::IF_LTZ:
235      cond = kCondLt;
236      break;
237    case Instruction::IF_GEZ:
238      cond = kCondGe;
239      break;
240    case Instruction::IF_GTZ:
241      cond = kCondGt;
242      break;
243    case Instruction::IF_LEZ:
244      cond = kCondLe;
245      break;
246    default:
247      cond = static_cast<ConditionCode>(0);
248      LOG(FATAL) << "Unexpected opcode " << opcode;
249  }
250  OpCmpImmBranch(cond, rl_src.reg, 0, taken);
251}
252
253void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
254  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
255  if (rl_src.location == kLocPhysReg) {
256    OpRegCopy(rl_result.reg, rl_src.reg);
257  } else {
258    LoadValueDirect(rl_src, rl_result.reg.GetLow());
259  }
260  OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31);
261  StoreValueWide(rl_dest, rl_result);
262}
263
264void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
265                              RegLocation rl_src) {
266  rl_src = LoadValue(rl_src, kCoreReg);
267  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
268  OpKind op = kOpInvalid;
269  switch (opcode) {
270    case Instruction::INT_TO_BYTE:
271      op = kOp2Byte;
272      break;
273    case Instruction::INT_TO_SHORT:
274       op = kOp2Short;
275       break;
276    case Instruction::INT_TO_CHAR:
277       op = kOp2Char;
278       break;
279    default:
280      LOG(ERROR) << "Bad int conversion type";
281  }
282  OpRegReg(op, rl_result.reg, rl_src.reg);
283  StoreValue(rl_dest, rl_result);
284}
285
286/*
287 * Let helper function take care of everything.  Will call
288 * Array::AllocFromCode(type_idx, method, count);
289 * Note: AllocFromCode will handle checks for errNegativeArraySize.
290 */
291void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
292                          RegLocation rl_src) {
293  FlushAllRegs();  /* Everything to home location */
294  ThreadOffset<4> func_offset(-1);
295  const DexFile* dex_file = cu_->dex_file;
296  CompilerDriver* driver = cu_->compiler_driver;
297  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file,
298                                                       type_idx)) {
299    bool is_type_initialized;  // Ignored as an array does not have an initializer.
300    bool use_direct_type_ptr;
301    uintptr_t direct_type_ptr;
302    if (kEmbedClassInCode &&
303        driver->CanEmbedTypeInCode(*dex_file, type_idx,
304                                   &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) {
305      // The fast path.
306      if (!use_direct_type_ptr) {
307        LoadClassType(type_idx, kArg0);
308        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved);
309        CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true);
310      } else {
311        // Use the direct pointer.
312        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayResolved);
313        CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true);
314      }
315    } else {
316      // The slow path.
317      DCHECK_EQ(func_offset.Int32Value(), -1);
318      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocArray);
319      CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
320    }
321    DCHECK_NE(func_offset.Int32Value(), -1);
322  } else {
323    func_offset= QUICK_ENTRYPOINT_OFFSET(4, pAllocArrayWithAccessCheck);
324    CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
325  }
326  RegLocation rl_result = GetReturn(false);
327  StoreValue(rl_dest, rl_result);
328}
329
330/*
331 * Similar to GenNewArray, but with post-allocation initialization.
332 * Verifier guarantees we're dealing with an array class.  Current
333 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
334 * Current code also throws internal unimp if not 'L', '[' or 'I'.
335 */
336void Mir2Lir::GenFilledNewArray(CallInfo* info) {
337  int elems = info->num_arg_words;
338  int type_idx = info->index;
339  FlushAllRegs();  /* Everything to home location */
340  ThreadOffset<4> func_offset(-1);
341  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
342                                                       type_idx)) {
343    func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArray);
344  } else {
345    func_offset = QUICK_ENTRYPOINT_OFFSET(4, pCheckAndAllocArrayWithAccessCheck);
346  }
347  CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
348  FreeTemp(TargetReg(kArg2));
349  FreeTemp(TargetReg(kArg1));
350  /*
351   * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
352   * return region.  Because AllocFromCode placed the new array
353   * in kRet0, we'll just lock it into place.  When debugger support is
354   * added, it may be necessary to additionally copy all return
355   * values to a home location in thread-local storage
356   */
357  LockTemp(TargetReg(kRet0));
358
359  // TODO: use the correct component size, currently all supported types
360  // share array alignment with ints (see comment at head of function)
361  size_t component_size = sizeof(int32_t);
362
363  // Having a range of 0 is legal
364  if (info->is_range && (elems > 0)) {
365    /*
366     * Bit of ugliness here.  We're going generate a mem copy loop
367     * on the register range, but it is possible that some regs
368     * in the range have been promoted.  This is unlikely, but
369     * before generating the copy, we'll just force a flush
370     * of any regs in the source range that have been promoted to
371     * home location.
372     */
373    for (int i = 0; i < elems; i++) {
374      RegLocation loc = UpdateLoc(info->args[i]);
375      if (loc.location == kLocPhysReg) {
376        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
377      }
378    }
379    /*
380     * TUNING note: generated code here could be much improved, but
381     * this is an uncommon operation and isn't especially performance
382     * critical.
383     */
384    RegStorage r_src = AllocTemp();
385    RegStorage r_dst = AllocTemp();
386    RegStorage r_idx = AllocTemp();
387    RegStorage r_val;
388    switch (cu_->instruction_set) {
389      case kThumb2:
390        r_val = TargetReg(kLr);
391        break;
392      case kX86:
393      case kX86_64:
394        FreeTemp(TargetReg(kRet0));
395        r_val = AllocTemp();
396        break;
397      case kMips:
398        r_val = AllocTemp();
399        break;
400      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
401    }
402    // Set up source pointer
403    RegLocation rl_first = info->args[0];
404    OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low));
405    // Set up the target pointer
406    OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0),
407                mirror::Array::DataOffset(component_size).Int32Value());
408    // Set up the loop counter (known to be > 0)
409    LoadConstant(r_idx, elems - 1);
410    // Generate the copy loop.  Going backwards for convenience
411    LIR* target = NewLIR0(kPseudoTargetLabel);
412    // Copy next element
413    LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
414    StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
415    FreeTemp(r_val);
416    OpDecAndBranch(kCondGe, r_idx, target);
417    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
418      // Restore the target pointer
419      OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst,
420                  -mirror::Array::DataOffset(component_size).Int32Value());
421    }
422  } else if (!info->is_range) {
423    // TUNING: interleave
424    for (int i = 0; i < elems; i++) {
425      RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
426      StoreBaseDisp(TargetReg(kRet0),
427                    mirror::Array::DataOffset(component_size).Int32Value() + i * 4,
428                    rl_arg.reg, kWord);
429      // If the LoadValue caused a temp to be allocated, free it
430      if (IsTemp(rl_arg.reg)) {
431        FreeTemp(rl_arg.reg);
432      }
433    }
434  }
435  if (info->result.location != kLocInvalid) {
436    StoreValue(info->result, GetReturn(false /* not fp */));
437  }
438}
439
440//
441// Slow path to ensure a class is initialized for sget/sput.
442//
443class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
444 public:
445  StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
446                      RegStorage r_base) :
447    LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit),
448               storage_index_(storage_index), r_base_(r_base) {
449  }
450
451  void Compile() {
452    LIR* unresolved_target = GenerateTargetLabel();
453    uninit_->target = unresolved_target;
454    m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage),
455                               storage_index_, true);
456    // Copy helper's result into r_base, a no-op on all but MIPS.
457    m2l_->OpRegCopy(r_base_,  m2l_->TargetReg(kRet0));
458
459    m2l_->OpUnconditionalBranch(cont_);
460  }
461
462 private:
463  LIR* const uninit_;
464  const int storage_index_;
465  const RegStorage r_base_;
466};
467
468void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
469                      bool is_object) {
470  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
471  cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
472  if (field_info.FastPut() && !SLOW_FIELD_PATH) {
473    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
474    RegStorage r_base;
475    if (field_info.IsReferrersClass()) {
476      // Fast path, static storage base is this method's class
477      RegLocation rl_method  = LoadCurrMethod();
478      r_base = AllocTemp();
479      LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
480      if (IsTemp(rl_method.reg)) {
481        FreeTemp(rl_method.reg);
482      }
483    } else {
484      // Medium path, static storage base in a different class which requires checks that the other
485      // class is initialized.
486      // TODO: remove initialized check now that we are initializing classes in the compiler driver.
487      DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
488      // May do runtime call so everything to home locations.
489      FlushAllRegs();
490      // Using fixed register to sync with possible call to runtime support.
491      RegStorage r_method = TargetReg(kArg1);
492      LockTemp(r_method);
493      LoadCurrMethodDirect(r_method);
494      r_base = TargetReg(kArg0);
495      LockTemp(r_base);
496      LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
497      LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
498                   sizeof(int32_t*) * field_info.StorageIndex(), r_base);
499      // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
500      if (!field_info.IsInitialized() &&
501          (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
502        // Check if r_base is NULL or a not yet initialized class.
503
504        // The slow path is invoked if the r_base is NULL or the class pointed
505        // to by it is not initialized.
506        LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
507        RegStorage r_tmp = TargetReg(kArg2);
508        LockTemp(r_tmp);
509        LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
510                                          mirror::Class::StatusOffset().Int32Value(),
511                                          mirror::Class::kStatusInitialized, NULL);
512        LIR* cont = NewLIR0(kPseudoTargetLabel);
513
514        AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
515                                                     field_info.StorageIndex(), r_base));
516
517        FreeTemp(r_tmp);
518      }
519      FreeTemp(r_method);
520    }
521    // rBase now holds static storage base
522    if (is_long_or_double) {
523      rl_src = LoadValueWide(rl_src, kAnyReg);
524    } else {
525      rl_src = LoadValue(rl_src, kAnyReg);
526    }
527    if (field_info.IsVolatile()) {
528      // There might have been a store before this volatile one so insert StoreStore barrier.
529      GenMemBarrier(kStoreStore);
530    }
531    if (is_long_or_double) {
532      StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
533    } else {
534      StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
535    }
536    if (field_info.IsVolatile()) {
537      // A load might follow the volatile store so insert a StoreLoad barrier.
538      GenMemBarrier(kStoreLoad);
539    }
540    if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
541      MarkGCCard(rl_src.reg, r_base);
542    }
543    FreeTemp(r_base);
544  } else {
545    FlushAllRegs();  // Everything to home locations
546    ThreadOffset<4> setter_offset =
547        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Static)
548                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjStatic)
549                                       : QUICK_ENTRYPOINT_OFFSET(4, pSet32Static));
550    CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true);
551  }
552}
553
554void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
555                      bool is_long_or_double, bool is_object) {
556  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
557  cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
558  if (field_info.FastGet() && !SLOW_FIELD_PATH) {
559    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
560    RegStorage r_base;
561    if (field_info.IsReferrersClass()) {
562      // Fast path, static storage base is this method's class
563      RegLocation rl_method  = LoadCurrMethod();
564      r_base = AllocTemp();
565      LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
566    } else {
567      // Medium path, static storage base in a different class which requires checks that the other
568      // class is initialized
569      DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
570      // May do runtime call so everything to home locations.
571      FlushAllRegs();
572      // Using fixed register to sync with possible call to runtime support.
573      RegStorage r_method = TargetReg(kArg1);
574      LockTemp(r_method);
575      LoadCurrMethodDirect(r_method);
576      r_base = TargetReg(kArg0);
577      LockTemp(r_base);
578      LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
579      LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
580                   sizeof(int32_t*) * field_info.StorageIndex(), r_base);
581      // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
582      if (!field_info.IsInitialized() &&
583          (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
584        // Check if r_base is NULL or a not yet initialized class.
585
586        // The slow path is invoked if the r_base is NULL or the class pointed
587        // to by it is not initialized.
588        LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
589        RegStorage r_tmp = TargetReg(kArg2);
590        LockTemp(r_tmp);
591        LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
592                                          mirror::Class::StatusOffset().Int32Value(),
593                                          mirror::Class::kStatusInitialized, NULL);
594        LIR* cont = NewLIR0(kPseudoTargetLabel);
595
596        AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
597                                                     field_info.StorageIndex(), r_base));
598
599        FreeTemp(r_tmp);
600      }
601      FreeTemp(r_method);
602    }
603    // r_base now holds static storage base
604    RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
605
606    if (is_long_or_double) {
607      LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG);
608    } else {
609      LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
610    }
611    FreeTemp(r_base);
612
613    if (field_info.IsVolatile()) {
614      // Without context sensitive analysis, we must issue the most conservative barriers.
615      // In this case, either a load or store may follow so we issue both barriers.
616      GenMemBarrier(kLoadLoad);
617      GenMemBarrier(kLoadStore);
618    }
619
620    if (is_long_or_double) {
621      StoreValueWide(rl_dest, rl_result);
622    } else {
623      StoreValue(rl_dest, rl_result);
624    }
625  } else {
626    FlushAllRegs();  // Everything to home locations
627    ThreadOffset<4> getterOffset =
628        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Static)
629                          :(is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjStatic)
630                                      : QUICK_ENTRYPOINT_OFFSET(4, pGet32Static));
631    CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true);
632    if (is_long_or_double) {
633      RegLocation rl_result = GetReturnWide(rl_dest.fp);
634      StoreValueWide(rl_dest, rl_result);
635    } else {
636      RegLocation rl_result = GetReturn(rl_dest.fp);
637      StoreValue(rl_dest, rl_result);
638    }
639  }
640}
641
642// Generate code for all slow paths.
643void Mir2Lir::HandleSlowPaths() {
644  int n = slow_paths_.Size();
645  for (int i = 0; i < n; ++i) {
646    LIRSlowPath* slowpath = slow_paths_.Get(i);
647    slowpath->Compile();
648  }
649  slow_paths_.Reset();
650}
651
652void Mir2Lir::HandleSuspendLaunchPads() {
653  int num_elems = suspend_launchpads_.Size();
654  ThreadOffset<4> helper_offset = QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend);
655  for (int i = 0; i < num_elems; i++) {
656    ResetRegPool();
657    ResetDefTracking();
658    LIR* lab = suspend_launchpads_.Get(i);
659    LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0]));
660    current_dalvik_offset_ = lab->operands[1];
661    AppendLIR(lab);
662    RegStorage r_tgt = CallHelperSetup(helper_offset);
663    CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
664    OpUnconditionalBranch(resume_lab);
665  }
666}
667
668void Mir2Lir::HandleThrowLaunchPads() {
669  int num_elems = throw_launchpads_.Size();
670  for (int i = 0; i < num_elems; i++) {
671    ResetRegPool();
672    ResetDefTracking();
673    LIR* lab = throw_launchpads_.Get(i);
674    current_dalvik_offset_ = lab->operands[1];
675    AppendLIR(lab);
676    ThreadOffset<4> func_offset(-1);
677    int v1 = lab->operands[2];
678    int v2 = lab->operands[3];
679    const bool target_x86 = cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64;
680    switch (lab->operands[0]) {
681      case kThrowConstantArrayBounds:  // v1 is length reg (for Arm/Mips), v2 constant index
682        // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
683        if (target_x86) {
684          OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v1),
685                   mirror::Array::LengthOffset().Int32Value());
686        } else {
687          OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v1));
688        }
689        // Make sure the following LoadConstant doesn't mess with kArg1.
690        LockTemp(TargetReg(kArg1));
691        LoadConstant(TargetReg(kArg0), v2);
692        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds);
693        break;
694      case kThrowArrayBounds:
695        // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
696        if (v2 != TargetReg(kArg0).GetReg()) {
697          OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
698          if (target_x86) {
699            // x86 leaves the array pointer in v2, so load the array length that the handler expects
700            OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
701                     mirror::Array::LengthOffset().Int32Value());
702          } else {
703            OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
704          }
705        } else {
706          if (v1 == TargetReg(kArg1).GetReg()) {
707            // Swap v1 and v2, using kArg2 as a temp
708            OpRegCopy(TargetReg(kArg2), RegStorage::Solo32(v1));
709            if (target_x86) {
710              // x86 leaves the array pointer in v2; load the array length that the handler expects
711              OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
712                       mirror::Array::LengthOffset().Int32Value());
713            } else {
714              OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
715            }
716            OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
717          } else {
718            if (target_x86) {
719              // x86 leaves the array pointer in v2; load the array length that the handler expects
720              OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
721                       mirror::Array::LengthOffset().Int32Value());
722            } else {
723              OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
724            }
725            OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
726          }
727        }
728        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds);
729        break;
730      case kThrowNoSuchMethod:
731        OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
732        func_offset =
733          QUICK_ENTRYPOINT_OFFSET(4, pThrowNoSuchMethod);
734        break;
735      default:
736        LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
737    }
738    ClobberCallerSave();
739    RegStorage r_tgt = CallHelperSetup(func_offset);
740    CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */);
741  }
742}
743
744void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
745                      RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
746                      bool is_object) {
747  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
748  cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
749  if (field_info.FastGet() && !SLOW_FIELD_PATH) {
750    RegLocation rl_result;
751    RegisterClass reg_class = oat_reg_class_by_size(size);
752    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
753    rl_obj = LoadValue(rl_obj, kCoreReg);
754    if (is_long_or_double) {
755      DCHECK(rl_dest.wide);
756      GenNullCheck(rl_obj.reg, opt_flags);
757      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
758        rl_result = EvalLoc(rl_dest, reg_class, true);
759        // FIXME?  duplicate null check?
760        GenNullCheck(rl_obj.reg, opt_flags);
761        LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg,
762                         rl_obj.s_reg_low);
763        MarkPossibleNullPointerException(opt_flags);
764        if (field_info.IsVolatile()) {
765          // Without context sensitive analysis, we must issue the most conservative barriers.
766          // In this case, either a load or store may follow so we issue both barriers.
767          GenMemBarrier(kLoadLoad);
768          GenMemBarrier(kLoadStore);
769        }
770      } else {
771        RegStorage reg_ptr = AllocTemp();
772        OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
773        rl_result = EvalLoc(rl_dest, reg_class, true);
774        LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG);
775        MarkPossibleNullPointerException(opt_flags);
776        if (field_info.IsVolatile()) {
777          // Without context sensitive analysis, we must issue the most conservative barriers.
778          // In this case, either a load or store may follow so we issue both barriers.
779          GenMemBarrier(kLoadLoad);
780          GenMemBarrier(kLoadStore);
781        }
782        FreeTemp(reg_ptr);
783      }
784      StoreValueWide(rl_dest, rl_result);
785    } else {
786      rl_result = EvalLoc(rl_dest, reg_class, true);
787      GenNullCheck(rl_obj.reg, opt_flags);
788      LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord,
789                   rl_obj.s_reg_low);
790      MarkPossibleNullPointerException(opt_flags);
791      if (field_info.IsVolatile()) {
792        // Without context sensitive analysis, we must issue the most conservative barriers.
793        // In this case, either a load or store may follow so we issue both barriers.
794        GenMemBarrier(kLoadLoad);
795        GenMemBarrier(kLoadStore);
796      }
797      StoreValue(rl_dest, rl_result);
798    }
799  } else {
800    ThreadOffset<4> getterOffset =
801        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pGet64Instance)
802                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pGetObjInstance)
803                                       : QUICK_ENTRYPOINT_OFFSET(4, pGet32Instance));
804    CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true);
805    if (is_long_or_double) {
806      RegLocation rl_result = GetReturnWide(rl_dest.fp);
807      StoreValueWide(rl_dest, rl_result);
808    } else {
809      RegLocation rl_result = GetReturn(rl_dest.fp);
810      StoreValue(rl_dest, rl_result);
811    }
812  }
813}
814
815void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
816                      RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
817                      bool is_object) {
818  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
819  cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
820  if (field_info.FastPut() && !SLOW_FIELD_PATH) {
821    RegisterClass reg_class = oat_reg_class_by_size(size);
822    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
823    rl_obj = LoadValue(rl_obj, kCoreReg);
824    if (is_long_or_double) {
825      rl_src = LoadValueWide(rl_src, kAnyReg);
826      GenNullCheck(rl_obj.reg, opt_flags);
827      RegStorage reg_ptr = AllocTemp();
828      OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
829      if (field_info.IsVolatile()) {
830        // There might have been a store before this volatile one so insert StoreStore barrier.
831        GenMemBarrier(kStoreStore);
832      }
833      StoreBaseDispWide(reg_ptr, 0, rl_src.reg);
834      MarkPossibleNullPointerException(opt_flags);
835      if (field_info.IsVolatile()) {
836        // A load might follow the volatile store so insert a StoreLoad barrier.
837        GenMemBarrier(kStoreLoad);
838      }
839      FreeTemp(reg_ptr);
840    } else {
841      rl_src = LoadValue(rl_src, reg_class);
842      GenNullCheck(rl_obj.reg, opt_flags);
843      if (field_info.IsVolatile()) {
844        // There might have been a store before this volatile one so insert StoreStore barrier.
845        GenMemBarrier(kStoreStore);
846      }
847      StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord);
848      MarkPossibleNullPointerException(opt_flags);
849      if (field_info.IsVolatile()) {
850        // A load might follow the volatile store so insert a StoreLoad barrier.
851        GenMemBarrier(kStoreLoad);
852      }
853      if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
854        MarkGCCard(rl_src.reg, rl_obj.reg);
855      }
856    }
857  } else {
858    ThreadOffset<4> setter_offset =
859        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(4, pSet64Instance)
860                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(4, pSetObjInstance)
861                                       : QUICK_ENTRYPOINT_OFFSET(4, pSet32Instance));
862    CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(),
863                                               rl_obj, rl_src, true);
864  }
865}
866
867void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
868                             RegLocation rl_src) {
869  bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK);
870  bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) &&
871      (opt_flags & MIR_IGNORE_NULL_CHECK));
872  ThreadOffset<4> helper = needs_range_check
873      ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithNullAndBoundCheck)
874                          : QUICK_ENTRYPOINT_OFFSET(4, pAputObjectWithBoundCheck))
875      : QUICK_ENTRYPOINT_OFFSET(4, pAputObject);
876  CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src, true);
877}
878
879void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
880  RegLocation rl_method = LoadCurrMethod();
881  RegStorage res_reg = AllocTemp();
882  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
883  if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
884                                                   *cu_->dex_file,
885                                                   type_idx)) {
886    // Call out to helper which resolves type and verifies access.
887    // Resolved type returned in kRet0.
888    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
889                            type_idx, rl_method.reg, true);
890    RegLocation rl_result = GetReturn(false);
891    StoreValue(rl_dest, rl_result);
892  } else {
893    // We're don't need access checks, load type from dex cache
894    int32_t dex_cache_offset =
895        mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
896    LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg);
897    int32_t offset_of_type =
898        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
899                          * type_idx);
900    LoadWordDisp(res_reg, offset_of_type, rl_result.reg);
901    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
902        type_idx) || SLOW_TYPE_PATH) {
903      // Slow path, at runtime test if type is null and if so initialize
904      FlushAllRegs();
905      LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL);
906      LIR* cont = NewLIR0(kPseudoTargetLabel);
907
908      // Object to generate the slow path for class resolution.
909      class SlowPath : public LIRSlowPath {
910       public:
911        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
912                 const RegLocation& rl_method, const RegLocation& rl_result) :
913                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
914                   rl_method_(rl_method), rl_result_(rl_result) {
915        }
916
917        void Compile() {
918          GenerateTargetLabel();
919
920          m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_,
921                                        rl_method_.reg, true);
922          m2l_->OpRegCopy(rl_result_.reg,  m2l_->TargetReg(kRet0));
923
924          m2l_->OpUnconditionalBranch(cont_);
925        }
926
927       private:
928        const int type_idx_;
929        const RegLocation rl_method_;
930        const RegLocation rl_result_;
931      };
932
933      // Add to list for future.
934      AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result));
935
936      StoreValue(rl_dest, rl_result);
937     } else {
938      // Fast path, we're done - just store result
939      StoreValue(rl_dest, rl_result);
940    }
941  }
942}
943
944void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
945  /* NOTE: Most strings should be available at compile time */
946  int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
947                 (sizeof(mirror::String*) * string_idx);
948  if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
949      *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
950    // slow path, resolve string if not in dex cache
951    FlushAllRegs();
952    LockCallTemps();  // Using explicit registers
953
954    // If the Method* is already in a register, we can save a copy.
955    RegLocation rl_method = mir_graph_->GetMethodLoc();
956    RegStorage r_method;
957    if (rl_method.location == kLocPhysReg) {
958      // A temp would conflict with register use below.
959      DCHECK(!IsTemp(rl_method.reg));
960      r_method = rl_method.reg;
961    } else {
962      r_method = TargetReg(kArg2);
963      LoadCurrMethodDirect(r_method);
964    }
965    LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(),
966                 TargetReg(kArg0));
967
968    // Might call out to helper, which will return resolved string in kRet0
969    LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
970    if (cu_->instruction_set == kThumb2 ||
971        cu_->instruction_set == kMips) {
972      //  OpRegImm(kOpCmp, TargetReg(kRet0), 0);  // Is resolved?
973      LoadConstant(TargetReg(kArg1), string_idx);
974      LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0), 0, NULL);
975      LIR* cont = NewLIR0(kPseudoTargetLabel);
976      GenBarrier();
977
978      // Object to generate the slow path for string resolution.
979      class SlowPath : public LIRSlowPath {
980       public:
981        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method) :
982          LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), r_method_(r_method) {
983        }
984
985        void Compile() {
986          GenerateTargetLabel();
987
988          RegStorage r_tgt = m2l_->CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pResolveString));
989
990          m2l_->OpRegCopy(m2l_->TargetReg(kArg0), r_method_);   // .eq
991          LIR* call_inst = m2l_->OpReg(kOpBlx, r_tgt);
992          m2l_->MarkSafepointPC(call_inst);
993          m2l_->FreeTemp(r_tgt);
994
995          m2l_->OpUnconditionalBranch(cont_);
996        }
997
998       private:
999         RegStorage r_method_;
1000      };
1001
1002      // Add to list for future.
1003      AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method));
1004    } else {
1005      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1006      LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
1007      LoadConstant(TargetReg(kArg1), string_idx);
1008      CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), r_method, TargetReg(kArg1),
1009                              true);
1010      LIR* target = NewLIR0(kPseudoTargetLabel);
1011      branch->target = target;
1012    }
1013    GenBarrier();
1014    StoreValue(rl_dest, GetReturn(false));
1015  } else {
1016    RegLocation rl_method = LoadCurrMethod();
1017    RegStorage res_reg = AllocTemp();
1018    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1019    LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
1020    LoadWordDisp(res_reg, offset_of_string, rl_result.reg);
1021    StoreValue(rl_dest, rl_result);
1022  }
1023}
1024
1025/*
1026 * Let helper function take care of everything.  Will
1027 * call Class::NewInstanceFromCode(type_idx, method);
1028 */
1029void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
1030  FlushAllRegs();  /* Everything to home location */
1031  // alloc will always check for resolution, do we also need to verify
1032  // access because the verifier was unable to?
1033  ThreadOffset<4> func_offset(-1);
1034  const DexFile* dex_file = cu_->dex_file;
1035  CompilerDriver* driver = cu_->compiler_driver;
1036  if (driver->CanAccessInstantiableTypeWithoutChecks(
1037      cu_->method_idx, *dex_file, type_idx)) {
1038    bool is_type_initialized;
1039    bool use_direct_type_ptr;
1040    uintptr_t direct_type_ptr;
1041    if (kEmbedClassInCode &&
1042        driver->CanEmbedTypeInCode(*dex_file, type_idx,
1043                                   &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) {
1044      // The fast path.
1045      if (!use_direct_type_ptr) {
1046        LoadClassType(type_idx, kArg0);
1047        if (!is_type_initialized) {
1048          func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved);
1049          CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true);
1050        } else {
1051          func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized);
1052          CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true);
1053        }
1054      } else {
1055        // Use the direct pointer.
1056        if (!is_type_initialized) {
1057          func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectResolved);
1058          CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
1059        } else {
1060          func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectInitialized);
1061          CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
1062        }
1063      }
1064    } else {
1065      // The slow path.
1066      DCHECK_EQ(func_offset.Int32Value(), -1);
1067      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObject);
1068      CallRuntimeHelperImmMethod(func_offset, type_idx, true);
1069    }
1070    DCHECK_NE(func_offset.Int32Value(), -1);
1071  } else {
1072    func_offset = QUICK_ENTRYPOINT_OFFSET(4, pAllocObjectWithAccessCheck);
1073    CallRuntimeHelperImmMethod(func_offset, type_idx, true);
1074  }
1075  RegLocation rl_result = GetReturn(false);
1076  StoreValue(rl_dest, rl_result);
1077}
1078
1079void Mir2Lir::GenThrow(RegLocation rl_src) {
1080  FlushAllRegs();
1081  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true);
1082}
1083
1084// For final classes there are no sub-classes to check and so we can answer the instance-of
1085// question with simple comparisons.
1086void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
1087                                 RegLocation rl_src) {
1088  // X86 has its own implementation.
1089  DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
1090
1091  RegLocation object = LoadValue(rl_src, kCoreReg);
1092  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1093  RegStorage result_reg = rl_result.reg;
1094  if (result_reg == object.reg) {
1095    result_reg = AllocTypedTemp(false, kCoreReg);
1096  }
1097  LoadConstant(result_reg, 0);     // assume false
1098  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
1099
1100  RegStorage check_class = AllocTypedTemp(false, kCoreReg);
1101  RegStorage object_class = AllocTypedTemp(false, kCoreReg);
1102
1103  LoadCurrMethodDirect(check_class);
1104  if (use_declaring_class) {
1105    LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
1106    LoadWordDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
1107  } else {
1108    LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1109                 check_class);
1110    LoadWordDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
1111    int32_t offset_of_type =
1112      mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
1113      (sizeof(mirror::Class*) * type_idx);
1114    LoadWordDisp(check_class, offset_of_type, check_class);
1115  }
1116
1117  LIR* ne_branchover = NULL;
1118  if (cu_->instruction_set == kThumb2) {
1119    OpRegReg(kOpCmp, check_class, object_class);  // Same?
1120    LIR* it = OpIT(kCondEq, "");   // if-convert the test
1121    LoadConstant(result_reg, 1);     // .eq case - load true
1122    OpEndIT(it);
1123  } else {
1124    ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
1125    LoadConstant(result_reg, 1);     // eq case - load true
1126  }
1127  LIR* target = NewLIR0(kPseudoTargetLabel);
1128  null_branchover->target = target;
1129  if (ne_branchover != NULL) {
1130    ne_branchover->target = target;
1131  }
1132  FreeTemp(object_class);
1133  FreeTemp(check_class);
1134  if (IsTemp(result_reg)) {
1135    OpRegCopy(rl_result.reg, result_reg);
1136    FreeTemp(result_reg);
1137  }
1138  StoreValue(rl_dest, rl_result);
1139}
1140
1141void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
1142                                         bool type_known_abstract, bool use_declaring_class,
1143                                         bool can_assume_type_is_in_dex_cache,
1144                                         uint32_t type_idx, RegLocation rl_dest,
1145                                         RegLocation rl_src) {
1146  // X86 has its own implementation.
1147  DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
1148
1149  FlushAllRegs();
1150  // May generate a call - use explicit registers
1151  LockCallTemps();
1152  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
1153  RegStorage class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
1154  if (needs_access_check) {
1155    // Check we have access to type_idx and if not throw IllegalAccessError,
1156    // returns Class* in kArg0
1157    CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
1158                         type_idx, true);
1159    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
1160    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1161  } else if (use_declaring_class) {
1162    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1163    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
1164                 class_reg);
1165  } else {
1166    // Load dex cache entry into class_reg (kArg2)
1167    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1168    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1169                 class_reg);
1170    int32_t offset_of_type =
1171        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
1172        * type_idx);
1173    LoadWordDisp(class_reg, offset_of_type, class_reg);
1174    if (!can_assume_type_is_in_dex_cache) {
1175      // Need to test presence of type in dex cache at runtime
1176      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
1177      // Not resolved
1178      // Call out to helper, which will return resolved type in kRet0
1179      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true);
1180      OpRegCopy(TargetReg(kArg2), TargetReg(kRet0));  // Align usage with fast path
1181      LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
1182      // Rejoin code paths
1183      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
1184      hop_branch->target = hop_target;
1185    }
1186  }
1187  /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
1188  RegLocation rl_result = GetReturn(false);
1189  if (cu_->instruction_set == kMips) {
1190    // On MIPS rArg0 != rl_result, place false in result if branch is taken.
1191    LoadConstant(rl_result.reg, 0);
1192  }
1193  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
1194
1195  /* load object->klass_ */
1196  DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1197  LoadWordDisp(TargetReg(kArg0),  mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
1198  /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
1199  LIR* branchover = NULL;
1200  if (type_known_final) {
1201    // rl_result == ref == null == 0.
1202    if (cu_->instruction_set == kThumb2) {
1203      OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
1204      LIR* it = OpIT(kCondEq, "E");   // if-convert the test
1205      LoadConstant(rl_result.reg, 1);     // .eq case - load true
1206      LoadConstant(rl_result.reg, 0);     // .ne case - load false
1207      OpEndIT(it);
1208    } else {
1209      LoadConstant(rl_result.reg, 0);     // ne case - load false
1210      branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
1211      LoadConstant(rl_result.reg, 1);     // eq case - load true
1212    }
1213  } else {
1214    if (cu_->instruction_set == kThumb2) {
1215      RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
1216      LIR* it = nullptr;
1217      if (!type_known_abstract) {
1218      /* Uses conditional nullification */
1219        OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
1220        it = OpIT(kCondEq, "EE");   // if-convert the test
1221        LoadConstant(TargetReg(kArg0), 1);     // .eq case - load true
1222      }
1223      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
1224      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
1225      if (it != nullptr) {
1226        OpEndIT(it);
1227      }
1228      FreeTemp(r_tgt);
1229    } else {
1230      if (!type_known_abstract) {
1231        /* Uses branchovers */
1232        LoadConstant(rl_result.reg, 1);     // assume true
1233        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
1234      }
1235      RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
1236      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
1237      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
1238      FreeTemp(r_tgt);
1239    }
1240  }
1241  // TODO: only clobber when type isn't final?
1242  ClobberCallerSave();
1243  /* branch targets here */
1244  LIR* target = NewLIR0(kPseudoTargetLabel);
1245  StoreValue(rl_dest, rl_result);
1246  branch1->target = target;
1247  if (branchover != NULL) {
1248    branchover->target = target;
1249  }
1250}
1251
1252void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
1253  bool type_known_final, type_known_abstract, use_declaring_class;
1254  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1255                                                                              *cu_->dex_file,
1256                                                                              type_idx,
1257                                                                              &type_known_final,
1258                                                                              &type_known_abstract,
1259                                                                              &use_declaring_class);
1260  bool can_assume_type_is_in_dex_cache = !needs_access_check &&
1261      cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
1262
1263  if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
1264    GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
1265  } else {
1266    GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
1267                               use_declaring_class, can_assume_type_is_in_dex_cache,
1268                               type_idx, rl_dest, rl_src);
1269  }
1270}
1271
1272void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) {
1273  bool type_known_final, type_known_abstract, use_declaring_class;
1274  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1275                                                                              *cu_->dex_file,
1276                                                                              type_idx,
1277                                                                              &type_known_final,
1278                                                                              &type_known_abstract,
1279                                                                              &use_declaring_class);
1280  // Note: currently type_known_final is unused, as optimizing will only improve the performance
1281  // of the exception throw path.
1282  DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
1283  if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) {
1284    // Verifier type analysis proved this check cast would never cause an exception.
1285    return;
1286  }
1287  FlushAllRegs();
1288  // May generate a call - use explicit registers
1289  LockCallTemps();
1290  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
1291  RegStorage class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
1292  if (needs_access_check) {
1293    // Check we have access to type_idx and if not throw IllegalAccessError,
1294    // returns Class* in kRet0
1295    // InitializeTypeAndVerifyAccess(idx, method)
1296    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
1297                            type_idx, TargetReg(kArg1), true);
1298    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
1299  } else if (use_declaring_class) {
1300    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
1301                 class_reg);
1302  } else {
1303    // Load dex cache entry into class_reg (kArg2)
1304    LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
1305                 class_reg);
1306    int32_t offset_of_type =
1307        mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
1308        (sizeof(mirror::Class*) * type_idx);
1309    LoadWordDisp(class_reg, offset_of_type, class_reg);
1310    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
1311      // Need to test presence of type in dex cache at runtime
1312      LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL);
1313      LIR* cont = NewLIR0(kPseudoTargetLabel);
1314
1315      // Slow path to initialize the type.  Executed if the type is NULL.
1316      class SlowPath : public LIRSlowPath {
1317       public:
1318        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
1319                 const RegStorage class_reg) :
1320                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
1321                   class_reg_(class_reg) {
1322        }
1323
1324        void Compile() {
1325          GenerateTargetLabel();
1326
1327          // Call out to helper, which will return resolved type in kArg0
1328          // InitializeTypeFromCode(idx, method)
1329          m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_,
1330                                        m2l_->TargetReg(kArg1), true);
1331          m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0));  // Align usage with fast path
1332          m2l_->OpUnconditionalBranch(cont_);
1333        }
1334       public:
1335        const int type_idx_;
1336        const RegStorage class_reg_;
1337      };
1338
1339      AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg));
1340    }
1341  }
1342  // At this point, class_reg (kArg2) has class
1343  LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
1344
1345  // Slow path for the case where the classes are not equal.  In this case we need
1346  // to call a helper function to do the check.
1347  class SlowPath : public LIRSlowPath {
1348   public:
1349    SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load):
1350               LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) {
1351    }
1352
1353    void Compile() {
1354      GenerateTargetLabel();
1355
1356      if (load_) {
1357        m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
1358                           m2l_->TargetReg(kArg1));
1359      }
1360      m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2),
1361                                    m2l_->TargetReg(kArg1), true);
1362
1363      m2l_->OpUnconditionalBranch(cont_);
1364    }
1365
1366   private:
1367    bool load_;
1368  };
1369
1370  if (type_known_abstract) {
1371    // Easier case, run slow path if target is non-null (slow path will load from target)
1372    LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0), 0, NULL);
1373    LIR* cont = NewLIR0(kPseudoTargetLabel);
1374    AddSlowPath(new (arena_) SlowPath(this, branch, cont, true));
1375  } else {
1376    // Harder, more common case.  We need to generate a forward branch over the load
1377    // if the target is null.  If it's non-null we perform the load and branch to the
1378    // slow path if the classes are not equal.
1379
1380    /* Null is OK - continue */
1381    LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
1382    /* load object->klass_ */
1383    DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1384    LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
1385
1386    LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL);
1387    LIR* cont = NewLIR0(kPseudoTargetLabel);
1388
1389    // Add the slow path that will not perform load since this is already done.
1390    AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false));
1391
1392    // Set the null check to branch to the continuation.
1393    branch1->target = cont;
1394  }
1395}
1396
1397void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
1398                           RegLocation rl_src1, RegLocation rl_src2) {
1399  RegLocation rl_result;
1400  if (cu_->instruction_set == kThumb2) {
1401    /*
1402     * NOTE:  This is the one place in the code in which we might have
1403     * as many as six live temporary registers.  There are 5 in the normal
1404     * set for Arm.  Until we have spill capabilities, temporarily add
1405     * lr to the temp set.  It is safe to do this locally, but note that
1406     * lr is used explicitly elsewhere in the code generator and cannot
1407     * normally be used as a general temp register.
1408     */
1409    MarkTemp(TargetReg(kLr));   // Add lr to the temp pool
1410    FreeTemp(TargetReg(kLr));   // and make it available
1411  }
1412  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1413  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1414  rl_result = EvalLoc(rl_dest, kCoreReg, true);
1415  // The longs may overlap - use intermediate temp if so
1416  if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) {
1417    RegStorage t_reg = AllocTemp();
1418    OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
1419    OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
1420    OpRegCopy(rl_result.reg.GetLow(), t_reg);
1421    FreeTemp(t_reg);
1422  } else {
1423    OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
1424    OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
1425  }
1426  /*
1427   * NOTE: If rl_dest refers to a frame variable in a large frame, the
1428   * following StoreValueWide might need to allocate a temp register.
1429   * To further work around the lack of a spill capability, explicitly
1430   * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
1431   * Remove when spill is functional.
1432   */
1433  FreeRegLocTemps(rl_result, rl_src1);
1434  FreeRegLocTemps(rl_result, rl_src2);
1435  StoreValueWide(rl_dest, rl_result);
1436  if (cu_->instruction_set == kThumb2) {
1437    Clobber(TargetReg(kLr));
1438    UnmarkTemp(TargetReg(kLr));  // Remove lr from the temp pool
1439  }
1440}
1441
1442
1443void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
1444                             RegLocation rl_src1, RegLocation rl_shift) {
1445  ThreadOffset<4> func_offset(-1);
1446
1447  switch (opcode) {
1448    case Instruction::SHL_LONG:
1449    case Instruction::SHL_LONG_2ADDR:
1450      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShlLong);
1451      break;
1452    case Instruction::SHR_LONG:
1453    case Instruction::SHR_LONG_2ADDR:
1454      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pShrLong);
1455      break;
1456    case Instruction::USHR_LONG:
1457    case Instruction::USHR_LONG_2ADDR:
1458      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pUshrLong);
1459      break;
1460    default:
1461      LOG(FATAL) << "Unexpected case";
1462  }
1463  FlushAllRegs();   /* Send everything to home location */
1464  CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
1465  RegLocation rl_result = GetReturnWide(false);
1466  StoreValueWide(rl_dest, rl_result);
1467}
1468
1469
1470void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
1471                            RegLocation rl_src1, RegLocation rl_src2) {
1472  DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
1473  OpKind op = kOpBkpt;
1474  bool is_div_rem = false;
1475  bool check_zero = false;
1476  bool unary = false;
1477  RegLocation rl_result;
1478  bool shift_op = false;
1479  switch (opcode) {
1480    case Instruction::NEG_INT:
1481      op = kOpNeg;
1482      unary = true;
1483      break;
1484    case Instruction::NOT_INT:
1485      op = kOpMvn;
1486      unary = true;
1487      break;
1488    case Instruction::ADD_INT:
1489    case Instruction::ADD_INT_2ADDR:
1490      op = kOpAdd;
1491      break;
1492    case Instruction::SUB_INT:
1493    case Instruction::SUB_INT_2ADDR:
1494      op = kOpSub;
1495      break;
1496    case Instruction::MUL_INT:
1497    case Instruction::MUL_INT_2ADDR:
1498      op = kOpMul;
1499      break;
1500    case Instruction::DIV_INT:
1501    case Instruction::DIV_INT_2ADDR:
1502      check_zero = true;
1503      op = kOpDiv;
1504      is_div_rem = true;
1505      break;
1506    /* NOTE: returns in kArg1 */
1507    case Instruction::REM_INT:
1508    case Instruction::REM_INT_2ADDR:
1509      check_zero = true;
1510      op = kOpRem;
1511      is_div_rem = true;
1512      break;
1513    case Instruction::AND_INT:
1514    case Instruction::AND_INT_2ADDR:
1515      op = kOpAnd;
1516      break;
1517    case Instruction::OR_INT:
1518    case Instruction::OR_INT_2ADDR:
1519      op = kOpOr;
1520      break;
1521    case Instruction::XOR_INT:
1522    case Instruction::XOR_INT_2ADDR:
1523      op = kOpXor;
1524      break;
1525    case Instruction::SHL_INT:
1526    case Instruction::SHL_INT_2ADDR:
1527      shift_op = true;
1528      op = kOpLsl;
1529      break;
1530    case Instruction::SHR_INT:
1531    case Instruction::SHR_INT_2ADDR:
1532      shift_op = true;
1533      op = kOpAsr;
1534      break;
1535    case Instruction::USHR_INT:
1536    case Instruction::USHR_INT_2ADDR:
1537      shift_op = true;
1538      op = kOpLsr;
1539      break;
1540    default:
1541      LOG(FATAL) << "Invalid word arith op: " << opcode;
1542  }
1543  if (!is_div_rem) {
1544    if (unary) {
1545      rl_src1 = LoadValue(rl_src1, kCoreReg);
1546      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1547      OpRegReg(op, rl_result.reg, rl_src1.reg);
1548    } else {
1549      if (shift_op) {
1550        rl_src2 = LoadValue(rl_src2, kCoreReg);
1551        RegStorage t_reg = AllocTemp();
1552        OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31);
1553        rl_src1 = LoadValue(rl_src1, kCoreReg);
1554        rl_result = EvalLoc(rl_dest, kCoreReg, true);
1555        OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg);
1556        FreeTemp(t_reg);
1557      } else {
1558        rl_src1 = LoadValue(rl_src1, kCoreReg);
1559        rl_src2 = LoadValue(rl_src2, kCoreReg);
1560        rl_result = EvalLoc(rl_dest, kCoreReg, true);
1561        OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
1562      }
1563    }
1564    StoreValue(rl_dest, rl_result);
1565  } else {
1566    bool done = false;      // Set to true if we happen to find a way to use a real instruction.
1567    if (cu_->instruction_set == kMips) {
1568      rl_src1 = LoadValue(rl_src1, kCoreReg);
1569      rl_src2 = LoadValue(rl_src2, kCoreReg);
1570      if (check_zero) {
1571          GenDivZeroCheck(rl_src2.reg);
1572      }
1573      rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
1574      done = true;
1575    } else if (cu_->instruction_set == kThumb2) {
1576      if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
1577        // Use ARM SDIV instruction for division.  For remainder we also need to
1578        // calculate using a MUL and subtract.
1579        rl_src1 = LoadValue(rl_src1, kCoreReg);
1580        rl_src2 = LoadValue(rl_src2, kCoreReg);
1581        if (check_zero) {
1582            GenDivZeroCheck(rl_src2.reg);
1583        }
1584        rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
1585        done = true;
1586      }
1587    }
1588
1589    // If we haven't already generated the code use the callout function.
1590    if (!done) {
1591      ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod);
1592      FlushAllRegs();   /* Send everything to home location */
1593      LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
1594      RegStorage r_tgt = CallHelperSetup(func_offset);
1595      LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
1596      if (check_zero) {
1597        GenDivZeroCheck(TargetReg(kArg1));
1598      }
1599      // NOTE: callout here is not a safepoint.
1600      CallHelper(r_tgt, func_offset, false /* not a safepoint */);
1601      if (op == kOpDiv)
1602        rl_result = GetReturn(false);
1603      else
1604        rl_result = GetReturnAlt();
1605    }
1606    StoreValue(rl_dest, rl_result);
1607  }
1608}
1609
1610/*
1611 * The following are the first-level codegen routines that analyze the format
1612 * of each bytecode then either dispatch special purpose codegen routines
1613 * or produce corresponding Thumb instructions directly.
1614 */
1615
1616// Returns true if no more than two bits are set in 'x'.
1617static bool IsPopCountLE2(unsigned int x) {
1618  x &= x - 1;
1619  return (x & (x - 1)) == 0;
1620}
1621
1622// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
1623// and store the result in 'rl_dest'.
1624bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
1625                               RegLocation rl_src, RegLocation rl_dest, int lit) {
1626  if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
1627    return false;
1628  }
1629  // No divide instruction for Arm, so check for more special cases
1630  if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
1631    return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
1632  }
1633  int k = LowestSetBit(lit);
1634  if (k >= 30) {
1635    // Avoid special cases.
1636    return false;
1637  }
1638  rl_src = LoadValue(rl_src, kCoreReg);
1639  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1640  if (is_div) {
1641    RegStorage t_reg = AllocTemp();
1642    if (lit == 2) {
1643      // Division by 2 is by far the most common division by constant.
1644      OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k);
1645      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
1646      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
1647    } else {
1648      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
1649      OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
1650      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
1651      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
1652    }
1653  } else {
1654    RegStorage t_reg1 = AllocTemp();
1655    RegStorage t_reg2 = AllocTemp();
1656    if (lit == 2) {
1657      OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k);
1658      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
1659      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
1660      OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
1661    } else {
1662      OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31);
1663      OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
1664      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
1665      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
1666      OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
1667    }
1668  }
1669  StoreValue(rl_dest, rl_result);
1670  return true;
1671}
1672
1673// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
1674// and store the result in 'rl_dest'.
1675bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
1676  if (lit < 0) {
1677    return false;
1678  }
1679  if (lit == 0) {
1680    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1681    LoadConstant(rl_result.reg, 0);
1682    StoreValue(rl_dest, rl_result);
1683    return true;
1684  }
1685  if (lit == 1) {
1686    rl_src = LoadValue(rl_src, kCoreReg);
1687    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1688    OpRegCopy(rl_result.reg, rl_src.reg);
1689    StoreValue(rl_dest, rl_result);
1690    return true;
1691  }
1692  // There is RegRegRegShift on Arm, so check for more special cases
1693  if (cu_->instruction_set == kThumb2) {
1694    return EasyMultiply(rl_src, rl_dest, lit);
1695  }
1696  // Can we simplify this multiplication?
1697  bool power_of_two = false;
1698  bool pop_count_le2 = false;
1699  bool power_of_two_minus_one = false;
1700  if (IsPowerOfTwo(lit)) {
1701    power_of_two = true;
1702  } else if (IsPopCountLE2(lit)) {
1703    pop_count_le2 = true;
1704  } else if (IsPowerOfTwo(lit + 1)) {
1705    power_of_two_minus_one = true;
1706  } else {
1707    return false;
1708  }
1709  rl_src = LoadValue(rl_src, kCoreReg);
1710  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1711  if (power_of_two) {
1712    // Shift.
1713    OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit));
1714  } else if (pop_count_le2) {
1715    // Shift and add and shift.
1716    int first_bit = LowestSetBit(lit);
1717    int second_bit = LowestSetBit(lit ^ (1 << first_bit));
1718    GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
1719  } else {
1720    // Reverse subtract: (src << (shift + 1)) - src.
1721    DCHECK(power_of_two_minus_one);
1722    // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
1723    RegStorage t_reg = AllocTemp();
1724    OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1));
1725    OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg);
1726  }
1727  StoreValue(rl_dest, rl_result);
1728  return true;
1729}
1730
1731void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
1732                               int lit) {
1733  RegLocation rl_result;
1734  OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
1735  int shift_op = false;
1736  bool is_div = false;
1737
1738  switch (opcode) {
1739    case Instruction::RSUB_INT_LIT8:
1740    case Instruction::RSUB_INT: {
1741      rl_src = LoadValue(rl_src, kCoreReg);
1742      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1743      if (cu_->instruction_set == kThumb2) {
1744        OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit);
1745      } else {
1746        OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
1747        OpRegImm(kOpAdd, rl_result.reg, lit);
1748      }
1749      StoreValue(rl_dest, rl_result);
1750      return;
1751    }
1752
1753    case Instruction::SUB_INT:
1754    case Instruction::SUB_INT_2ADDR:
1755      lit = -lit;
1756      // Intended fallthrough
1757    case Instruction::ADD_INT:
1758    case Instruction::ADD_INT_2ADDR:
1759    case Instruction::ADD_INT_LIT8:
1760    case Instruction::ADD_INT_LIT16:
1761      op = kOpAdd;
1762      break;
1763    case Instruction::MUL_INT:
1764    case Instruction::MUL_INT_2ADDR:
1765    case Instruction::MUL_INT_LIT8:
1766    case Instruction::MUL_INT_LIT16: {
1767      if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
1768        return;
1769      }
1770      op = kOpMul;
1771      break;
1772    }
1773    case Instruction::AND_INT:
1774    case Instruction::AND_INT_2ADDR:
1775    case Instruction::AND_INT_LIT8:
1776    case Instruction::AND_INT_LIT16:
1777      op = kOpAnd;
1778      break;
1779    case Instruction::OR_INT:
1780    case Instruction::OR_INT_2ADDR:
1781    case Instruction::OR_INT_LIT8:
1782    case Instruction::OR_INT_LIT16:
1783      op = kOpOr;
1784      break;
1785    case Instruction::XOR_INT:
1786    case Instruction::XOR_INT_2ADDR:
1787    case Instruction::XOR_INT_LIT8:
1788    case Instruction::XOR_INT_LIT16:
1789      op = kOpXor;
1790      break;
1791    case Instruction::SHL_INT_LIT8:
1792    case Instruction::SHL_INT:
1793    case Instruction::SHL_INT_2ADDR:
1794      lit &= 31;
1795      shift_op = true;
1796      op = kOpLsl;
1797      break;
1798    case Instruction::SHR_INT_LIT8:
1799    case Instruction::SHR_INT:
1800    case Instruction::SHR_INT_2ADDR:
1801      lit &= 31;
1802      shift_op = true;
1803      op = kOpAsr;
1804      break;
1805    case Instruction::USHR_INT_LIT8:
1806    case Instruction::USHR_INT:
1807    case Instruction::USHR_INT_2ADDR:
1808      lit &= 31;
1809      shift_op = true;
1810      op = kOpLsr;
1811      break;
1812
1813    case Instruction::DIV_INT:
1814    case Instruction::DIV_INT_2ADDR:
1815    case Instruction::DIV_INT_LIT8:
1816    case Instruction::DIV_INT_LIT16:
1817    case Instruction::REM_INT:
1818    case Instruction::REM_INT_2ADDR:
1819    case Instruction::REM_INT_LIT8:
1820    case Instruction::REM_INT_LIT16: {
1821      if (lit == 0) {
1822        GenDivZeroException();
1823        return;
1824      }
1825      if ((opcode == Instruction::DIV_INT) ||
1826          (opcode == Instruction::DIV_INT_2ADDR) ||
1827          (opcode == Instruction::DIV_INT_LIT8) ||
1828          (opcode == Instruction::DIV_INT_LIT16)) {
1829        is_div = true;
1830      } else {
1831        is_div = false;
1832      }
1833      if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) {
1834        return;
1835      }
1836
1837      bool done = false;
1838      if (cu_->instruction_set == kMips) {
1839        rl_src = LoadValue(rl_src, kCoreReg);
1840        rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
1841        done = true;
1842      } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
1843        rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
1844        done = true;
1845      } else if (cu_->instruction_set == kThumb2) {
1846        if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
1847          // Use ARM SDIV instruction for division.  For remainder we also need to
1848          // calculate using a MUL and subtract.
1849          rl_src = LoadValue(rl_src, kCoreReg);
1850          rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
1851          done = true;
1852        }
1853      }
1854
1855      if (!done) {
1856        FlushAllRegs();   /* Everything to home location. */
1857        LoadValueDirectFixed(rl_src, TargetReg(kArg0));
1858        Clobber(TargetReg(kArg0));
1859        ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pIdivmod);
1860        CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
1861        if (is_div)
1862          rl_result = GetReturn(false);
1863        else
1864          rl_result = GetReturnAlt();
1865      }
1866      StoreValue(rl_dest, rl_result);
1867      return;
1868    }
1869    default:
1870      LOG(FATAL) << "Unexpected opcode " << opcode;
1871  }
1872  rl_src = LoadValue(rl_src, kCoreReg);
1873  rl_result = EvalLoc(rl_dest, kCoreReg, true);
1874  // Avoid shifts by literal 0 - no support in Thumb.  Change to copy.
1875  if (shift_op && (lit == 0)) {
1876    OpRegCopy(rl_result.reg, rl_src.reg);
1877  } else {
1878    OpRegRegImm(op, rl_result.reg, rl_src.reg, lit);
1879  }
1880  StoreValue(rl_dest, rl_result);
1881}
1882
1883void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
1884                             RegLocation rl_src1, RegLocation rl_src2) {
1885  RegLocation rl_result;
1886  OpKind first_op = kOpBkpt;
1887  OpKind second_op = kOpBkpt;
1888  bool call_out = false;
1889  bool check_zero = false;
1890  ThreadOffset<4> func_offset(-1);
1891  int ret_reg = TargetReg(kRet0).GetReg();
1892
1893  switch (opcode) {
1894    case Instruction::NOT_LONG:
1895      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1896      rl_result = EvalLoc(rl_dest, kCoreReg, true);
1897      // Check for destructive overlap
1898      if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) {
1899        RegStorage t_reg = AllocTemp();
1900        OpRegCopy(t_reg, rl_src2.reg.GetHigh());
1901        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
1902        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
1903        FreeTemp(t_reg);
1904      } else {
1905        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
1906        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
1907      }
1908      StoreValueWide(rl_dest, rl_result);
1909      return;
1910    case Instruction::ADD_LONG:
1911    case Instruction::ADD_LONG_2ADDR:
1912      if (cu_->instruction_set != kThumb2) {
1913        GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
1914        return;
1915      }
1916      first_op = kOpAdd;
1917      second_op = kOpAdc;
1918      break;
1919    case Instruction::SUB_LONG:
1920    case Instruction::SUB_LONG_2ADDR:
1921      if (cu_->instruction_set != kThumb2) {
1922        GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
1923        return;
1924      }
1925      first_op = kOpSub;
1926      second_op = kOpSbc;
1927      break;
1928    case Instruction::MUL_LONG:
1929    case Instruction::MUL_LONG_2ADDR:
1930      if (cu_->instruction_set != kMips) {
1931        GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
1932        return;
1933      } else {
1934        call_out = true;
1935        ret_reg = TargetReg(kRet0).GetReg();
1936        func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul);
1937      }
1938      break;
1939    case Instruction::DIV_LONG:
1940    case Instruction::DIV_LONG_2ADDR:
1941      call_out = true;
1942      check_zero = true;
1943      ret_reg = TargetReg(kRet0).GetReg();
1944      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLdiv);
1945      break;
1946    case Instruction::REM_LONG:
1947    case Instruction::REM_LONG_2ADDR:
1948      call_out = true;
1949      check_zero = true;
1950      func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmod);
1951      /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
1952      ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2).GetReg() : TargetReg(kRet0).GetReg();
1953      break;
1954    case Instruction::AND_LONG_2ADDR:
1955    case Instruction::AND_LONG:
1956      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
1957        return GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
1958      }
1959      first_op = kOpAnd;
1960      second_op = kOpAnd;
1961      break;
1962    case Instruction::OR_LONG:
1963    case Instruction::OR_LONG_2ADDR:
1964      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
1965        GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
1966        return;
1967      }
1968      first_op = kOpOr;
1969      second_op = kOpOr;
1970      break;
1971    case Instruction::XOR_LONG:
1972    case Instruction::XOR_LONG_2ADDR:
1973      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
1974        GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
1975        return;
1976      }
1977      first_op = kOpXor;
1978      second_op = kOpXor;
1979      break;
1980    case Instruction::NEG_LONG: {
1981      GenNegLong(rl_dest, rl_src2);
1982      return;
1983    }
1984    default:
1985      LOG(FATAL) << "Invalid long arith op";
1986  }
1987  if (!call_out) {
1988    GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
1989  } else {
1990    FlushAllRegs();   /* Send everything to home location */
1991    if (check_zero) {
1992      RegStorage r_tmp1 = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
1993      RegStorage r_tmp2 = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
1994      LoadValueDirectWideFixed(rl_src2, r_tmp2);
1995      RegStorage r_tgt = CallHelperSetup(func_offset);
1996      GenDivZeroCheckWide(RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3)));
1997      LoadValueDirectWideFixed(rl_src1, r_tmp1);
1998      // NOTE: callout here is not a safepoint
1999      CallHelper(r_tgt, func_offset, false /* not safepoint */);
2000    } else {
2001      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
2002    }
2003    // Adjust return regs in to handle case of rem returning kArg2/kArg3
2004    if (ret_reg == TargetReg(kRet0).GetReg())
2005      rl_result = GetReturnWide(false);
2006    else
2007      rl_result = GetReturnWideAlt();
2008    StoreValueWide(rl_dest, rl_result);
2009  }
2010}
2011
2012void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset,
2013                                RegLocation rl_dest, RegLocation rl_src) {
2014  /*
2015   * Don't optimize the register usage since it calls out to support
2016   * functions
2017   */
2018  FlushAllRegs();   /* Send everything to home location */
2019  CallRuntimeHelperRegLocation(func_offset, rl_src, false);
2020  if (rl_dest.wide) {
2021    RegLocation rl_result;
2022    rl_result = GetReturnWide(rl_dest.fp);
2023    StoreValueWide(rl_dest, rl_result);
2024  } else {
2025    RegLocation rl_result;
2026    rl_result = GetReturn(rl_dest.fp);
2027    StoreValue(rl_dest, rl_result);
2028  }
2029}
2030
2031/* Check if we need to check for pending suspend request */
2032void Mir2Lir::GenSuspendTest(int opt_flags) {
2033  if (Runtime::Current()->ExplicitSuspendChecks()) {
2034    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2035      return;
2036    }
2037    FlushAllRegs();
2038    LIR* branch = OpTestSuspend(NULL);
2039    LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
2040    LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
2041                         current_dalvik_offset_);
2042    branch->target = target;
2043    suspend_launchpads_.Insert(target);
2044  } else {
2045    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2046      return;
2047    }
2048    FlushAllRegs();     // TODO: needed?
2049    LIR* inst = CheckSuspendUsingLoad();
2050    MarkSafepointPC(inst);
2051  }
2052}
2053
2054/* Check if we need to check for pending suspend request */
2055void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
2056  if (Runtime::Current()->ExplicitSuspendChecks()) {
2057    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2058      OpUnconditionalBranch(target);
2059      return;
2060    }
2061    OpTestSuspend(target);
2062    LIR* launch_pad =
2063        RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
2064               current_dalvik_offset_);
2065    FlushAllRegs();
2066    OpUnconditionalBranch(launch_pad);
2067    suspend_launchpads_.Insert(launch_pad);
2068  } else {
2069    // For the implicit suspend check, just perform the trigger
2070    // load and branch to the target.
2071    if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
2072      OpUnconditionalBranch(target);
2073      return;
2074    }
2075    FlushAllRegs();
2076    LIR* inst = CheckSuspendUsingLoad();
2077    MarkSafepointPC(inst);
2078    OpUnconditionalBranch(target);
2079  }
2080}
2081
2082/* Call out to helper assembly routine that will null check obj and then lock it. */
2083void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
2084  FlushAllRegs();
2085  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true);
2086}
2087
2088/* Call out to helper assembly routine that will null check obj and then unlock it. */
2089void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
2090  FlushAllRegs();
2091  CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true);
2092}
2093
2094/* Generic code for generating a wide constant into a VR. */
2095void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
2096  RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
2097  LoadConstantWide(rl_result.reg, value);
2098  StoreValueWide(rl_dest, rl_result);
2099}
2100
2101}  // namespace art
2102