gen_invoke.cc revision f9487c039efb4112616d438593a2ab02792e0304
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
21#include "dex_file-inl.h"
22#include "driver/compiler_options.h"
23#include "entrypoints/quick/quick_entrypoints.h"
24#include "invoke_type.h"
25#include "mirror/array.h"
26#include "mirror/string.h"
27#include "mir_to_lir-inl.h"
28#include "x86/codegen_x86.h"
29
30namespace art {
31
32/*
33 * This source files contains "gen" codegen routines that should
34 * be applicable to most targets.  Only mid-level support utilities
35 * and "op" calls may be used here.
36 */
37
38void Mir2Lir::AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume) {
39  class IntrinsicLaunchpadPath : public Mir2Lir::LIRSlowPath {
40   public:
41    IntrinsicLaunchpadPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
42        : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
43    }
44
45    void Compile() {
46      m2l_->ResetRegPool();
47      m2l_->ResetDefTracking();
48      LIR* label = GenerateTargetLabel();
49      label->opcode = kPseudoIntrinsicRetry;
50      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
51      m2l_->GenInvokeNoInline(info_);
52      if (cont_ != nullptr) {
53        m2l_->OpUnconditionalBranch(cont_);
54      }
55    }
56
57   private:
58    CallInfo* const info_;
59  };
60
61  AddSlowPath(new (arena_) IntrinsicLaunchpadPath(this, info, branch, resume));
62}
63
64/*
65 * To save scheduling time, helper calls are broken into two parts: generation of
66 * the helper target address, and the actual call to the helper.
67 * These functions can be overridden by architecture specific codegen.
68 */
69RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) {
70  return LoadHelper(helper_offset);
71}
72
73/* NOTE: if r_tgt is a temp, it will be freed following use */
74LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<4> helper_offset, bool safepoint_pc,
75                         bool use_link) {
76  OpKind op = use_link ? kOpBlx : kOpBx;
77  LIR* call_inst = OpReg(op, r_tgt);
78  FreeTemp(r_tgt);
79  if (safepoint_pc) {
80    MarkSafepointPC(call_inst);
81  }
82  return call_inst;
83}
84
85void Mir2Lir::CallRuntimeHelper(ThreadOffset<4> helper_offset, bool safepoint_pc) {
86  RegStorage r_tgt = CallHelperSetup(helper_offset);
87  ClobberCallerSave();
88  CallHelper(r_tgt, helper_offset, safepoint_pc);
89}
90
91void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<4> helper_offset, int arg0, bool safepoint_pc) {
92  RegStorage r_tgt = CallHelperSetup(helper_offset);
93  LoadConstant(TargetReg(kArg0), arg0);
94  ClobberCallerSave();
95  CallHelper(r_tgt, helper_offset, safepoint_pc);
96}
97
98void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<4> helper_offset, RegStorage arg0,
99                                   bool safepoint_pc) {
100  RegStorage r_tgt = CallHelperSetup(helper_offset);
101  OpRegCopy(TargetReg(kArg0), arg0);
102  ClobberCallerSave();
103  CallHelper(r_tgt, helper_offset, safepoint_pc);
104}
105
106void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<4> helper_offset, RegLocation arg0,
107                                           bool safepoint_pc) {
108  RegStorage r_tgt = CallHelperSetup(helper_offset);
109  if (arg0.wide == 0) {
110    LoadValueDirectFixed(arg0, TargetReg(kArg0));
111  } else {
112    RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
113    LoadValueDirectWideFixed(arg0, r_tmp);
114  }
115  ClobberCallerSave();
116  CallHelper(r_tgt, helper_offset, safepoint_pc);
117}
118
119void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<4> helper_offset, int arg0, int arg1,
120                                      bool safepoint_pc) {
121  RegStorage r_tgt = CallHelperSetup(helper_offset);
122  LoadConstant(TargetReg(kArg0), arg0);
123  LoadConstant(TargetReg(kArg1), arg1);
124  ClobberCallerSave();
125  CallHelper(r_tgt, helper_offset, safepoint_pc);
126}
127
128void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<4> helper_offset, int arg0,
129                                              RegLocation arg1, bool safepoint_pc) {
130  RegStorage r_tgt = CallHelperSetup(helper_offset);
131  if (arg1.wide == 0) {
132    LoadValueDirectFixed(arg1, TargetReg(kArg1));
133  } else {
134    RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
135    LoadValueDirectWideFixed(arg1, r_tmp);
136  }
137  LoadConstant(TargetReg(kArg0), arg0);
138  ClobberCallerSave();
139  CallHelper(r_tgt, helper_offset, safepoint_pc);
140}
141
142void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<4> helper_offset, RegLocation arg0,
143                                              int arg1, bool safepoint_pc) {
144  RegStorage r_tgt = CallHelperSetup(helper_offset);
145  LoadValueDirectFixed(arg0, TargetReg(kArg0));
146  LoadConstant(TargetReg(kArg1), arg1);
147  ClobberCallerSave();
148  CallHelper(r_tgt, helper_offset, safepoint_pc);
149}
150
151void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<4> helper_offset, int arg0, RegStorage arg1,
152                                      bool safepoint_pc) {
153  RegStorage r_tgt = CallHelperSetup(helper_offset);
154  OpRegCopy(TargetReg(kArg1), arg1);
155  LoadConstant(TargetReg(kArg0), arg0);
156  ClobberCallerSave();
157  CallHelper(r_tgt, helper_offset, safepoint_pc);
158}
159
160void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<4> helper_offset, RegStorage arg0, int arg1,
161                                      bool safepoint_pc) {
162  RegStorage r_tgt = CallHelperSetup(helper_offset);
163  OpRegCopy(TargetReg(kArg0), arg0);
164  LoadConstant(TargetReg(kArg1), arg1);
165  ClobberCallerSave();
166  CallHelper(r_tgt, helper_offset, safepoint_pc);
167}
168
169void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<4> helper_offset, int arg0,
170                                         bool safepoint_pc) {
171  RegStorage r_tgt = CallHelperSetup(helper_offset);
172  LoadCurrMethodDirect(TargetReg(kArg1));
173  LoadConstant(TargetReg(kArg0), arg0);
174  ClobberCallerSave();
175  CallHelper(r_tgt, helper_offset, safepoint_pc);
176}
177
178void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<4> helper_offset, RegStorage arg0,
179                                         bool safepoint_pc) {
180  RegStorage r_tgt = CallHelperSetup(helper_offset);
181  DCHECK_NE(TargetReg(kArg1).GetReg(), arg0.GetReg());
182  if (TargetReg(kArg0) != arg0) {
183    OpRegCopy(TargetReg(kArg0), arg0);
184  }
185  LoadCurrMethodDirect(TargetReg(kArg1));
186  ClobberCallerSave();
187  CallHelper(r_tgt, helper_offset, safepoint_pc);
188}
189
190void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<4> helper_offset, RegStorage arg0,
191                                                    RegLocation arg2, bool safepoint_pc) {
192  RegStorage r_tgt = CallHelperSetup(helper_offset);
193  DCHECK_NE(TargetReg(kArg1).GetReg(), arg0.GetReg());
194  if (TargetReg(kArg0) != arg0) {
195    OpRegCopy(TargetReg(kArg0), arg0);
196  }
197  LoadCurrMethodDirect(TargetReg(kArg1));
198  LoadValueDirectFixed(arg2, TargetReg(kArg2));
199  ClobberCallerSave();
200  CallHelper(r_tgt, helper_offset, safepoint_pc);
201}
202
203void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<4> helper_offset,
204                                                      RegLocation arg0, RegLocation arg1,
205                                                      bool safepoint_pc) {
206  RegStorage r_tgt = CallHelperSetup(helper_offset);
207  if (arg0.wide == 0) {
208    LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
209    if (arg1.wide == 0) {
210      if (cu_->instruction_set == kMips) {
211        LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
212      } else {
213        LoadValueDirectFixed(arg1, TargetReg(kArg1));
214      }
215    } else {
216      if (cu_->instruction_set == kMips) {
217        RegStorage r_tmp;
218        if (arg1.fp) {
219          r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
220        } else {
221          r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
222        }
223        LoadValueDirectWideFixed(arg1, r_tmp);
224      } else {
225        RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
226        LoadValueDirectWideFixed(arg1, r_tmp);
227      }
228    }
229  } else {
230    RegStorage r_tmp;
231    if (arg0.fp) {
232      r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg0), TargetReg(kFArg1));
233    } else {
234      r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
235    }
236    LoadValueDirectWideFixed(arg0, r_tmp);
237    if (arg1.wide == 0) {
238      LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
239    } else {
240      RegStorage r_tmp;
241      if (arg1.fp) {
242        r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
243      } else {
244        r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
245      }
246      LoadValueDirectWideFixed(arg1, r_tmp);
247    }
248  }
249  ClobberCallerSave();
250  CallHelper(r_tgt, helper_offset, safepoint_pc);
251}
252
253void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<4> helper_offset, RegStorage arg0,
254                                      RegStorage arg1, bool safepoint_pc) {
255  RegStorage r_tgt = CallHelperSetup(helper_offset);
256  DCHECK_NE(TargetReg(kArg0).GetReg(), arg1.GetReg());  // check copy into arg0 won't clobber arg1
257  OpRegCopy(TargetReg(kArg0), arg0);
258  OpRegCopy(TargetReg(kArg1), arg1);
259  ClobberCallerSave();
260  CallHelper(r_tgt, helper_offset, safepoint_pc);
261}
262
263void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<4> helper_offset, RegStorage arg0,
264                                         RegStorage arg1, int arg2, bool safepoint_pc) {
265  RegStorage r_tgt = CallHelperSetup(helper_offset);
266  DCHECK_NE(TargetReg(kArg0).GetReg(), arg1.GetReg());  // check copy into arg0 won't clobber arg1
267  OpRegCopy(TargetReg(kArg0), arg0);
268  OpRegCopy(TargetReg(kArg1), arg1);
269  LoadConstant(TargetReg(kArg2), arg2);
270  ClobberCallerSave();
271  CallHelper(r_tgt, helper_offset, safepoint_pc);
272}
273
274void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<4> helper_offset,
275                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
276  RegStorage r_tgt = CallHelperSetup(helper_offset);
277  LoadValueDirectFixed(arg2, TargetReg(kArg2));
278  LoadCurrMethodDirect(TargetReg(kArg1));
279  LoadConstant(TargetReg(kArg0), arg0);
280  ClobberCallerSave();
281  CallHelper(r_tgt, helper_offset, safepoint_pc);
282}
283
284void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<4> helper_offset, int arg0,
285                                            int arg2, bool safepoint_pc) {
286  RegStorage r_tgt = CallHelperSetup(helper_offset);
287  LoadCurrMethodDirect(TargetReg(kArg1));
288  LoadConstant(TargetReg(kArg2), arg2);
289  LoadConstant(TargetReg(kArg0), arg0);
290  ClobberCallerSave();
291  CallHelper(r_tgt, helper_offset, safepoint_pc);
292}
293
294void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<4> helper_offset,
295                                                         int arg0, RegLocation arg1,
296                                                         RegLocation arg2, bool safepoint_pc) {
297  RegStorage r_tgt = CallHelperSetup(helper_offset);
298  DCHECK_EQ(arg1.wide, 0U);
299  LoadValueDirectFixed(arg1, TargetReg(kArg1));
300  if (arg2.wide == 0) {
301    LoadValueDirectFixed(arg2, TargetReg(kArg2));
302  } else {
303    RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
304    LoadValueDirectWideFixed(arg2, r_tmp);
305  }
306  LoadConstant(TargetReg(kArg0), arg0);
307  ClobberCallerSave();
308  CallHelper(r_tgt, helper_offset, safepoint_pc);
309}
310
311void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<4> helper_offset,
312                                                                 RegLocation arg0, RegLocation arg1,
313                                                                 RegLocation arg2,
314                                                                 bool safepoint_pc) {
315  RegStorage r_tgt = CallHelperSetup(helper_offset);
316  DCHECK_EQ(arg0.wide, 0U);
317  LoadValueDirectFixed(arg0, TargetReg(kArg0));
318  DCHECK_EQ(arg1.wide, 0U);
319  LoadValueDirectFixed(arg1, TargetReg(kArg1));
320  DCHECK_EQ(arg1.wide, 0U);
321  LoadValueDirectFixed(arg2, TargetReg(kArg2));
322  ClobberCallerSave();
323  CallHelper(r_tgt, helper_offset, safepoint_pc);
324}
325
326/*
327 * If there are any ins passed in registers that have not been promoted
328 * to a callee-save register, flush them to the frame.  Perform intial
329 * assignment of promoted arguments.
330 *
331 * ArgLocs is an array of location records describing the incoming arguments
332 * with one location record per word of argument.
333 */
334void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
335  /*
336   * Dummy up a RegLocation for the incoming Method*
337   * It will attempt to keep kArg0 live (or copy it to home location
338   * if promoted).
339   */
340  RegLocation rl_src = rl_method;
341  rl_src.location = kLocPhysReg;
342  rl_src.reg = TargetReg(kArg0);
343  rl_src.home = false;
344  MarkLive(rl_src.reg, rl_src.s_reg_low);
345  StoreValue(rl_method, rl_src);
346  // If Method* has been promoted, explicitly flush
347  if (rl_method.location == kLocPhysReg) {
348    StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
349  }
350
351  if (cu_->num_ins == 0) {
352    return;
353  }
354
355  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
356  /*
357   * Copy incoming arguments to their proper home locations.
358   * NOTE: an older version of dx had an issue in which
359   * it would reuse static method argument registers.
360   * This could result in the same Dalvik virtual register
361   * being promoted to both core and fp regs. To account for this,
362   * we only copy to the corresponding promoted physical register
363   * if it matches the type of the SSA name for the incoming
364   * argument.  It is also possible that long and double arguments
365   * end up half-promoted.  In those cases, we must flush the promoted
366   * half to memory as well.
367   */
368  for (int i = 0; i < cu_->num_ins; i++) {
369    PromotionMap* v_map = &promotion_map_[start_vreg + i];
370    RegStorage reg = GetArgMappingToPhysicalReg(i);
371
372    if (reg.Valid()) {
373      // If arriving in register
374      bool need_flush = true;
375      RegLocation* t_loc = &ArgLocs[i];
376      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
377        OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
378        need_flush = false;
379      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
380        OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg);
381        need_flush = false;
382      } else {
383        need_flush = true;
384      }
385
386      // For wide args, force flush if not fully promoted
387      if (t_loc->wide) {
388        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
389        // Is only half promoted?
390        need_flush |= (p_map->core_location != v_map->core_location) ||
391            (p_map->fp_location != v_map->fp_location);
392        if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
393          /*
394           * In Arm, a double is represented as a pair of consecutive single float
395           * registers starting at an even number.  It's possible that both Dalvik vRegs
396           * representing the incoming double were independently promoted as singles - but
397           * not in a form usable as a double.  If so, we need to flush - even though the
398           * incoming arg appears fully in register.  At this point in the code, both
399           * halves of the double are promoted.  Make sure they are in a usable form.
400           */
401          int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
402          int low_reg = promotion_map_[lowreg_index].FpReg;
403          int high_reg = promotion_map_[lowreg_index + 1].FpReg;
404          if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
405            need_flush = true;
406          }
407        }
408      }
409      if (need_flush) {
410        StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kWord);
411      }
412    } else {
413      // If arriving in frame & promoted
414      if (v_map->core_location == kLocPhysReg) {
415        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
416                     RegStorage::Solo32(v_map->core_reg));
417      }
418      if (v_map->fp_location == kLocPhysReg) {
419        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg));
420      }
421    }
422  }
423}
424
425/*
426 * Bit of a hack here - in the absence of a real scheduling pass,
427 * emit the next instruction in static & direct invoke sequences.
428 */
429static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
430                          int state, const MethodReference& target_method,
431                          uint32_t unused,
432                          uintptr_t direct_code, uintptr_t direct_method,
433                          InvokeType type) {
434  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
435  if (direct_code != 0 && direct_method != 0) {
436    switch (state) {
437    case 0:  // Get the current Method* [sets kArg0]
438      if (direct_code != static_cast<unsigned int>(-1)) {
439        if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
440          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
441        }
442      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
443        cg->LoadCodeAddress(target_method, type, kInvokeTgt);
444      }
445      if (direct_method != static_cast<unsigned int>(-1)) {
446        cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
447      } else {
448        cg->LoadMethodAddress(target_method, type, kArg0);
449      }
450      break;
451    default:
452      return -1;
453    }
454  } else {
455    switch (state) {
456    case 0:  // Get the current Method* [sets kArg0]
457      // TUNING: we can save a reg copy if Method* has been promoted.
458      cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
459      break;
460    case 1:  // Get method->dex_cache_resolved_methods_
461      cg->LoadWordDisp(cg->TargetReg(kArg0),
462                       mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
463                       cg->TargetReg(kArg0));
464      // Set up direct code if known.
465      if (direct_code != 0) {
466        if (direct_code != static_cast<unsigned int>(-1)) {
467          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
468        } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
469          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
470          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
471        }
472      }
473      break;
474    case 2:  // Grab target method*
475      CHECK_EQ(cu->dex_file, target_method.dex_file);
476      cg->LoadWordDisp(cg->TargetReg(kArg0),
477                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
478                       (target_method.dex_method_index * 4), cg->TargetReg(kArg0));
479      break;
480    case 3:  // Grab the code from the method*
481      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
482        if (direct_code == 0) {
483          cg->LoadWordDisp(cg->TargetReg(kArg0),
484                           mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
485                           cg->TargetReg(kInvokeTgt));
486        }
487        break;
488      }
489      // Intentional fallthrough for x86
490    default:
491      return -1;
492    }
493  }
494  return state + 1;
495}
496
497/*
498 * Bit of a hack here - in the absence of a real scheduling pass,
499 * emit the next instruction in a virtual invoke sequence.
500 * We can use kLr as a temp prior to target address loading
501 * Note also that we'll load the first argument ("this") into
502 * kArg1 here rather than the standard LoadArgRegs.
503 */
504static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
505                         int state, const MethodReference& target_method,
506                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
507                         InvokeType unused3) {
508  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
509  /*
510   * This is the fast path in which the target virtual method is
511   * fully resolved at compile time.
512   */
513  switch (state) {
514    case 0: {  // Get "this" [set kArg1]
515      RegLocation  rl_arg = info->args[0];
516      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
517      break;
518    }
519    case 1:  // Is "this" null? [use kArg1]
520      cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
521      // get this->klass_ [use kArg1, set kInvokeTgt]
522      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
523                       cg->TargetReg(kInvokeTgt));
524      cg->MarkPossibleNullPointerException(info->opt_flags);
525      break;
526    case 2:  // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
527      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
528                       cg->TargetReg(kInvokeTgt));
529      break;
530    case 3:  // Get target method [use kInvokeTgt, set kArg0]
531      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
532                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
533                       cg->TargetReg(kArg0));
534      break;
535    case 4:  // Get the compiled code address [uses kArg0, sets kInvokeTgt]
536      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
537        cg->LoadWordDisp(cg->TargetReg(kArg0),
538                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
539                         cg->TargetReg(kInvokeTgt));
540        break;
541      }
542      // Intentional fallthrough for X86
543    default:
544      return -1;
545  }
546  return state + 1;
547}
548
549/*
550 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
551 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
552 * more than one interface method map to the same index. Note also that we'll load the first
553 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
554 */
555static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
556                                 const MethodReference& target_method,
557                                 uint32_t method_idx, uintptr_t unused,
558                                 uintptr_t direct_method, InvokeType unused2) {
559  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
560
561  switch (state) {
562    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
563      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
564      cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index);
565      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) {
566        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg));
567      }
568      break;
569    case 1: {  // Get "this" [set kArg1]
570      RegLocation  rl_arg = info->args[0];
571      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
572      break;
573    }
574    case 2:  // Is "this" null? [use kArg1]
575      cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
576      // Get this->klass_ [use kArg1, set kInvokeTgt]
577      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
578                       cg->TargetReg(kInvokeTgt));
579      cg->MarkPossibleNullPointerException(info->opt_flags);
580      break;
581    case 3:  // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
582      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
583                       cg->TargetReg(kInvokeTgt));
584      break;
585    case 4:  // Get target method [use kInvokeTgt, set kArg0]
586      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) +
587                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
588                       cg->TargetReg(kArg0));
589      break;
590    case 5:  // Get the compiled code address [use kArg0, set kInvokeTgt]
591      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
592        cg->LoadWordDisp(cg->TargetReg(kArg0),
593                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
594                         cg->TargetReg(kInvokeTgt));
595        break;
596      }
597      // Intentional fallthrough for X86
598    default:
599      return -1;
600  }
601  return state + 1;
602}
603
604static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset<4> trampoline,
605                            int state, const MethodReference& target_method,
606                            uint32_t method_idx) {
607  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
608  /*
609   * This handles the case in which the base method is not fully
610   * resolved at compile time, we bail to a runtime helper.
611   */
612  if (state == 0) {
613    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
614      // Load trampoline target
615      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
616    }
617    // Load kArg0 with method index
618    CHECK_EQ(cu->dex_file, target_method.dex_file);
619    cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
620    return 1;
621  }
622  return -1;
623}
624
625static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
626                                int state,
627                                const MethodReference& target_method,
628                                uint32_t unused, uintptr_t unused2,
629                                uintptr_t unused3, InvokeType unused4) {
630  ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
631  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
632}
633
634static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
635                                const MethodReference& target_method,
636                                uint32_t unused, uintptr_t unused2,
637                                uintptr_t unused3, InvokeType unused4) {
638  ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
639  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
640}
641
642static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
643                               const MethodReference& target_method,
644                               uint32_t unused, uintptr_t unused2,
645                               uintptr_t unused3, InvokeType unused4) {
646  ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
647  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
648}
649
650static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
651                           const MethodReference& target_method,
652                           uint32_t unused, uintptr_t unused2,
653                           uintptr_t unused3, InvokeType unused4) {
654  ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeVirtualTrampolineWithAccessCheck);
655  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
656}
657
658static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
659                                                CallInfo* info, int state,
660                                                const MethodReference& target_method,
661                                                uint32_t unused, uintptr_t unused2,
662                                                uintptr_t unused3, InvokeType unused4) {
663  ThreadOffset<4> trampoline =
664      QUICK_ENTRYPOINT_OFFSET(4, pInvokeInterfaceTrampolineWithAccessCheck);
665  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
666}
667
668int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
669                         NextCallInsn next_call_insn,
670                         const MethodReference& target_method,
671                         uint32_t vtable_idx, uintptr_t direct_code,
672                         uintptr_t direct_method, InvokeType type, bool skip_this) {
673  int last_arg_reg = TargetReg(kArg3).GetReg();
674  int next_reg = TargetReg(kArg1).GetReg();
675  int next_arg = 0;
676  if (skip_this) {
677    next_reg++;
678    next_arg++;
679  }
680  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
681    RegLocation rl_arg = info->args[next_arg++];
682    rl_arg = UpdateRawLoc(rl_arg);
683    if (rl_arg.wide && (next_reg <= TargetReg(kArg2).GetReg())) {
684      RegStorage r_tmp(RegStorage::k64BitPair, next_reg, next_reg + 1);
685      LoadValueDirectWideFixed(rl_arg, r_tmp);
686      next_reg++;
687      next_arg++;
688    } else {
689      if (rl_arg.wide) {
690        rl_arg = NarrowRegLoc(rl_arg);
691        rl_arg.is_const = false;
692      }
693      LoadValueDirectFixed(rl_arg, RegStorage::Solo32(next_reg));
694    }
695    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
696                                direct_code, direct_method, type);
697  }
698  return call_state;
699}
700
701/*
702 * Load up to 5 arguments, the first three of which will be in
703 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
704 * and as part of the load sequence, it must be replaced with
705 * the target method pointer.  Note, this may also be called
706 * for "range" variants if the number of arguments is 5 or fewer.
707 */
708int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
709                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
710                                  const MethodReference& target_method,
711                                  uint32_t vtable_idx, uintptr_t direct_code,
712                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
713  RegLocation rl_arg;
714
715  /* If no arguments, just return */
716  if (info->num_arg_words == 0)
717    return call_state;
718
719  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
720                              direct_code, direct_method, type);
721
722  DCHECK_LE(info->num_arg_words, 5);
723  if (info->num_arg_words > 3) {
724    int32_t next_use = 3;
725    // Detect special case of wide arg spanning arg3/arg4
726    RegLocation rl_use0 = info->args[0];
727    RegLocation rl_use1 = info->args[1];
728    RegLocation rl_use2 = info->args[2];
729    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) && rl_use2.wide) {
730      RegStorage reg;
731      // Wide spans, we need the 2nd half of uses[2].
732      rl_arg = UpdateLocWide(rl_use2);
733      if (rl_arg.location == kLocPhysReg) {
734        reg = rl_arg.reg.GetHigh();
735      } else {
736        // kArg2 & rArg3 can safely be used here
737        reg = TargetReg(kArg3);
738        LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
739        call_state = next_call_insn(cu_, info, call_state, target_method,
740                                    vtable_idx, direct_code, direct_method, type);
741      }
742      StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
743      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
744                                  direct_code, direct_method, type);
745      next_use++;
746    }
747    // Loop through the rest
748    while (next_use < info->num_arg_words) {
749      RegStorage low_reg;
750      RegStorage high_reg;
751      rl_arg = info->args[next_use];
752      rl_arg = UpdateRawLoc(rl_arg);
753      if (rl_arg.location == kLocPhysReg) {
754        if (rl_arg.wide) {
755          low_reg = rl_arg.reg.GetLow();
756          high_reg = rl_arg.reg.GetHigh();
757        } else {
758          low_reg = rl_arg.reg;
759        }
760      } else {
761        low_reg = TargetReg(kArg2);
762        if (rl_arg.wide) {
763          high_reg = TargetReg(kArg3);
764          LoadValueDirectWideFixed(rl_arg, RegStorage::MakeRegPair(low_reg, high_reg));
765        } else {
766          LoadValueDirectFixed(rl_arg, low_reg);
767        }
768        call_state = next_call_insn(cu_, info, call_state, target_method,
769                                    vtable_idx, direct_code, direct_method, type);
770      }
771      int outs_offset = (next_use + 1) * 4;
772      if (rl_arg.wide) {
773        StoreBaseDispWide(TargetReg(kSp), outs_offset, RegStorage::MakeRegPair(low_reg, high_reg));
774        next_use += 2;
775      } else {
776        StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
777        next_use++;
778      }
779      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
780                               direct_code, direct_method, type);
781    }
782  }
783
784  call_state = LoadArgRegs(info, call_state, next_call_insn,
785                           target_method, vtable_idx, direct_code, direct_method,
786                           type, skip_this);
787
788  if (pcrLabel) {
789    if (Runtime::Current()->ExplicitNullChecks()) {
790      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
791    } else {
792      *pcrLabel = nullptr;
793      // In lieu of generating a check for kArg1 being null, we need to
794      // perform a load when doing implicit checks.
795      RegStorage tmp = AllocTemp();
796      LoadWordDisp(TargetReg(kArg1), 0, tmp);
797      MarkPossibleNullPointerException(info->opt_flags);
798      FreeTemp(tmp);
799    }
800  }
801  return call_state;
802}
803
804/*
805 * May have 0+ arguments (also used for jumbo).  Note that
806 * source virtual registers may be in physical registers, so may
807 * need to be flushed to home location before copying.  This
808 * applies to arg3 and above (see below).
809 *
810 * Two general strategies:
811 *    If < 20 arguments
812 *       Pass args 3-18 using vldm/vstm block copy
813 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
814 *    If 20+ arguments
815 *       Pass args arg19+ using memcpy block copy
816 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
817 *
818 */
819int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
820                                LIR** pcrLabel, NextCallInsn next_call_insn,
821                                const MethodReference& target_method,
822                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
823                                InvokeType type, bool skip_this) {
824  // If we can treat it as non-range (Jumbo ops will use range form)
825  if (info->num_arg_words <= 5)
826    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
827                                next_call_insn, target_method, vtable_idx,
828                                direct_code, direct_method, type, skip_this);
829  /*
830   * First load the non-register arguments.  Both forms expect all
831   * of the source arguments to be in their home frame location, so
832   * scan the s_reg names and flush any that have been promoted to
833   * frame backing storage.
834   */
835  // Scan the rest of the args - if in phys_reg flush to memory
836  for (int next_arg = 0; next_arg < info->num_arg_words;) {
837    RegLocation loc = info->args[next_arg];
838    if (loc.wide) {
839      loc = UpdateLocWide(loc);
840      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
841        StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
842      }
843      next_arg += 2;
844    } else {
845      loc = UpdateLoc(loc);
846      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
847        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
848      }
849      next_arg++;
850    }
851  }
852
853  // Logic below assumes that Method pointer is at offset zero from SP.
854  DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
855
856  // The first 3 arguments are passed via registers.
857  // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
858  // get size of uintptr_t or size of object reference according to model being used.
859  int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
860  int start_offset = SRegOffset(info->args[3].s_reg_low);
861  int regs_left_to_pass_via_stack = info->num_arg_words - 3;
862  DCHECK_GT(regs_left_to_pass_via_stack, 0);
863
864  if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
865    // Use vldm/vstm pair using kArg3 as a temp
866    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
867                             direct_code, direct_method, type);
868    OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
869    LIR* ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack);
870    // TUNING: loosen barrier
871    ld->u.m.def_mask = ENCODE_ALL;
872    SetMemRefType(ld, true /* is_load */, kDalvikReg);
873    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
874                             direct_code, direct_method, type);
875    OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
876    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
877                             direct_code, direct_method, type);
878    LIR* st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack);
879    SetMemRefType(st, false /* is_load */, kDalvikReg);
880    st->u.m.def_mask = ENCODE_ALL;
881    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
882                             direct_code, direct_method, type);
883  } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
884    int current_src_offset = start_offset;
885    int current_dest_offset = outs_offset;
886
887    while (regs_left_to_pass_via_stack > 0) {
888      // This is based on the knowledge that the stack itself is 16-byte aligned.
889      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
890      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
891      size_t bytes_to_move;
892
893      /*
894       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
895       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
896       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
897       * We do this because we could potentially do a smaller move to align.
898       */
899      if (regs_left_to_pass_via_stack == 4 ||
900          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
901        // Moving 128-bits via xmm register.
902        bytes_to_move = sizeof(uint32_t) * 4;
903
904        // Allocate a free xmm temp. Since we are working through the calling sequence,
905        // we expect to have an xmm temporary available.
906        RegStorage temp = AllocTempDouble();
907        CHECK_GT(temp.GetLowReg(), 0);
908
909        LIR* ld1 = nullptr;
910        LIR* ld2 = nullptr;
911        LIR* st1 = nullptr;
912        LIR* st2 = nullptr;
913
914        /*
915         * The logic is similar for both loads and stores. If we have 16-byte alignment,
916         * do an aligned move. If we have 8-byte alignment, then do the move in two
917         * parts. This approach prevents possible cache line splits. Finally, fall back
918         * to doing an unaligned move. In most cases we likely won't split the cache
919         * line but we cannot prove it and thus take a conservative approach.
920         */
921        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
922        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
923
924        if (src_is_16b_aligned) {
925          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
926        } else if (src_is_8b_aligned) {
927          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP);
928          ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1),
929                            kMovHi128FP);
930        } else {
931          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP);
932        }
933
934        if (dest_is_16b_aligned) {
935          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP);
936        } else if (dest_is_8b_aligned) {
937          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP);
938          st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1),
939                            temp, kMovHi128FP);
940        } else {
941          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP);
942        }
943
944        // TODO If we could keep track of aliasing information for memory accesses that are wider
945        // than 64-bit, we wouldn't need to set up a barrier.
946        if (ld1 != nullptr) {
947          if (ld2 != nullptr) {
948            // For 64-bit load we can actually set up the aliasing information.
949            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
950            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
951          } else {
952            // Set barrier for 128-bit load.
953            SetMemRefType(ld1, true /* is_load */, kDalvikReg);
954            ld1->u.m.def_mask = ENCODE_ALL;
955          }
956        }
957        if (st1 != nullptr) {
958          if (st2 != nullptr) {
959            // For 64-bit store we can actually set up the aliasing information.
960            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
961            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
962          } else {
963            // Set barrier for 128-bit store.
964            SetMemRefType(st1, false /* is_load */, kDalvikReg);
965            st1->u.m.def_mask = ENCODE_ALL;
966          }
967        }
968
969        // Free the temporary used for the data movement.
970        // CLEANUP: temp is currently a bogus pair, elmiminate extra free when updated.
971        FreeTemp(temp.GetLow());
972        FreeTemp(temp.GetHigh());
973      } else {
974        // Moving 32-bits via general purpose register.
975        bytes_to_move = sizeof(uint32_t);
976
977        // Instead of allocating a new temp, simply reuse one of the registers being used
978        // for argument passing.
979        RegStorage temp = TargetReg(kArg3);
980
981        // Now load the argument VR and store to the outs.
982        LoadWordDisp(TargetReg(kSp), current_src_offset, temp);
983        StoreWordDisp(TargetReg(kSp), current_dest_offset, temp);
984      }
985
986      current_src_offset += bytes_to_move;
987      current_dest_offset += bytes_to_move;
988      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
989    }
990  } else {
991    // Generate memcpy
992    OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
993    OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
994    CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0),
995                               TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
996  }
997
998  call_state = LoadArgRegs(info, call_state, next_call_insn,
999                           target_method, vtable_idx, direct_code, direct_method,
1000                           type, skip_this);
1001
1002  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1003                           direct_code, direct_method, type);
1004  if (pcrLabel) {
1005    if (Runtime::Current()->ExplicitNullChecks()) {
1006      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
1007    } else {
1008      *pcrLabel = nullptr;
1009      // In lieu of generating a check for kArg1 being null, we need to
1010      // perform a load when doing implicit checks.
1011      RegStorage tmp = AllocTemp();
1012      LoadWordDisp(TargetReg(kArg1), 0, tmp);
1013      MarkPossibleNullPointerException(info->opt_flags);
1014      FreeTemp(tmp);
1015    }
1016  }
1017  return call_state;
1018}
1019
1020RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
1021  RegLocation res;
1022  if (info->result.location == kLocInvalid) {
1023    res = GetReturn(false);
1024  } else {
1025    res = info->result;
1026  }
1027  return res;
1028}
1029
1030RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
1031  RegLocation res;
1032  if (info->result.location == kLocInvalid) {
1033    res = GetReturnWide(false);
1034  } else {
1035    res = info->result;
1036  }
1037  return res;
1038}
1039
1040bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
1041  if (cu_->instruction_set == kMips) {
1042    // TODO - add Mips implementation
1043    return false;
1044  }
1045  // Location of reference to data array
1046  int value_offset = mirror::String::ValueOffset().Int32Value();
1047  // Location of count
1048  int count_offset = mirror::String::CountOffset().Int32Value();
1049  // Starting offset within data array
1050  int offset_offset = mirror::String::OffsetOffset().Int32Value();
1051  // Start of char data with array_
1052  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1053
1054  RegLocation rl_obj = info->args[0];
1055  RegLocation rl_idx = info->args[1];
1056  rl_obj = LoadValue(rl_obj, kCoreReg);
1057  // X86 wants to avoid putting a constant index into a register.
1058  if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) {
1059    rl_idx = LoadValue(rl_idx, kCoreReg);
1060  }
1061  RegStorage reg_max;
1062  GenNullCheck(rl_obj.reg, info->opt_flags);
1063  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
1064  LIR* range_check_branch = nullptr;
1065  RegStorage reg_off;
1066  RegStorage reg_ptr;
1067  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1068    reg_off = AllocTemp();
1069    reg_ptr = AllocTemp();
1070    if (range_check) {
1071      reg_max = AllocTemp();
1072      LoadWordDisp(rl_obj.reg, count_offset, reg_max);
1073      MarkPossibleNullPointerException(info->opt_flags);
1074    }
1075    LoadWordDisp(rl_obj.reg, offset_offset, reg_off);
1076    MarkPossibleNullPointerException(info->opt_flags);
1077    LoadWordDisp(rl_obj.reg, value_offset, reg_ptr);
1078    if (range_check) {
1079      // Set up a launch pad to allow retry in case of bounds violation */
1080      OpRegReg(kOpCmp, rl_idx.reg, reg_max);
1081      FreeTemp(reg_max);
1082      range_check_branch = OpCondBranch(kCondUge, nullptr);
1083    }
1084    OpRegImm(kOpAdd, reg_ptr, data_offset);
1085  } else {
1086    if (range_check) {
1087      // On x86, we can compare to memory directly
1088      // Set up a launch pad to allow retry in case of bounds violation */
1089      if (rl_idx.is_const) {
1090        range_check_branch = OpCmpMemImmBranch(
1091            kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
1092            mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr);
1093      } else {
1094        OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
1095        range_check_branch = OpCondBranch(kCondUge, nullptr);
1096      }
1097    }
1098    reg_off = AllocTemp();
1099    reg_ptr = AllocTemp();
1100    LoadWordDisp(rl_obj.reg, offset_offset, reg_off);
1101    LoadWordDisp(rl_obj.reg, value_offset, reg_ptr);
1102  }
1103  if (rl_idx.is_const) {
1104    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1105  } else {
1106    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
1107  }
1108  FreeTemp(rl_obj.reg);
1109  if (rl_idx.location == kLocPhysReg) {
1110    FreeTemp(rl_idx.reg);
1111  }
1112  RegLocation rl_dest = InlineTarget(info);
1113  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1114  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1115    LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
1116  } else {
1117    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg,
1118                        RegStorage::InvalidReg(), kUnsignedHalf, INVALID_SREG);
1119  }
1120  FreeTemp(reg_off);
1121  FreeTemp(reg_ptr);
1122  StoreValue(rl_dest, rl_result);
1123  if (range_check) {
1124    DCHECK(range_check_branch != nullptr);
1125    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1126    AddIntrinsicLaunchpad(info, range_check_branch);
1127  }
1128  return true;
1129}
1130
1131// Generates an inlined String.is_empty or String.length.
1132bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1133  if (cu_->instruction_set == kMips) {
1134    // TODO - add Mips implementation
1135    return false;
1136  }
1137  // dst = src.length();
1138  RegLocation rl_obj = info->args[0];
1139  rl_obj = LoadValue(rl_obj, kCoreReg);
1140  RegLocation rl_dest = InlineTarget(info);
1141  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1142  GenNullCheck(rl_obj.reg, info->opt_flags);
1143  LoadWordDisp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1144  MarkPossibleNullPointerException(info->opt_flags);
1145  if (is_empty) {
1146    // dst = (dst == 0);
1147    if (cu_->instruction_set == kThumb2) {
1148      RegStorage t_reg = AllocTemp();
1149      OpRegReg(kOpNeg, t_reg, rl_result.reg);
1150      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1151    } else {
1152      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1153      OpRegImm(kOpSub, rl_result.reg, 1);
1154      OpRegImm(kOpLsr, rl_result.reg, 31);
1155    }
1156  }
1157  StoreValue(rl_dest, rl_result);
1158  return true;
1159}
1160
1161bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1162  if (cu_->instruction_set == kMips) {
1163    // TODO - add Mips implementation
1164    return false;
1165  }
1166  RegLocation rl_src_i = info->args[0];
1167  RegLocation rl_dest = (size == kLong) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1168  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1169  if (size == kLong) {
1170    RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
1171    RegStorage r_i_low = rl_i.reg.GetLow();
1172    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1173      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1174      r_i_low = AllocTemp();
1175      OpRegCopy(r_i_low, rl_i.reg);
1176    }
1177    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1178    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1179    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1180      FreeTemp(r_i_low);
1181    }
1182    StoreValueWide(rl_dest, rl_result);
1183  } else {
1184    DCHECK(size == kWord || size == kSignedHalf);
1185    OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
1186    RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
1187    OpRegReg(op, rl_result.reg, rl_i.reg);
1188    StoreValue(rl_dest, rl_result);
1189  }
1190  return true;
1191}
1192
1193bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1194  if (cu_->instruction_set == kMips) {
1195    // TODO - add Mips implementation
1196    return false;
1197  }
1198  RegLocation rl_src = info->args[0];
1199  rl_src = LoadValue(rl_src, kCoreReg);
1200  RegLocation rl_dest = InlineTarget(info);
1201  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1202  RegStorage sign_reg = AllocTemp();
1203  // abs(x) = y<=x>>31, (x+y)^y.
1204  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1205  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1206  OpRegReg(kOpXor, rl_result.reg, sign_reg);
1207  StoreValue(rl_dest, rl_result);
1208  return true;
1209}
1210
1211bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1212  if (cu_->instruction_set == kMips) {
1213    // TODO - add Mips implementation
1214    return false;
1215  }
1216  RegLocation rl_src = info->args[0];
1217  rl_src = LoadValueWide(rl_src, kCoreReg);
1218  RegLocation rl_dest = InlineTargetWide(info);
1219  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1220
1221  // If on x86 or if we would clobber a register needed later, just copy the source first.
1222  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 || rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
1223    OpRegCopyWide(rl_result.reg, rl_src.reg);
1224    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1225        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1226        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1227        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1228      // Reuse source registers to avoid running out of temps.
1229      FreeTemp(rl_src.reg);
1230    }
1231    rl_src = rl_result;
1232  }
1233
1234  // abs(x) = y<=x>>31, (x+y)^y.
1235  RegStorage sign_reg = AllocTemp();
1236  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1237  OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1238  OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1239  OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1240  OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1241  StoreValueWide(rl_dest, rl_result);
1242  return true;
1243}
1244
1245bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
1246  if (cu_->instruction_set == kMips) {
1247    // TODO - add Mips implementation
1248    return false;
1249  }
1250  RegLocation rl_src = info->args[0];
1251  rl_src = LoadValue(rl_src, kCoreReg);
1252  RegLocation rl_dest = InlineTarget(info);
1253  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1254  OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
1255  StoreValue(rl_dest, rl_result);
1256  return true;
1257}
1258
1259bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
1260  if (cu_->instruction_set == kMips) {
1261    // TODO - add Mips implementation
1262    return false;
1263  }
1264  RegLocation rl_src = info->args[0];
1265  rl_src = LoadValueWide(rl_src, kCoreReg);
1266  RegLocation rl_dest = InlineTargetWide(info);
1267  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1268  OpRegCopyWide(rl_result.reg, rl_src.reg);
1269  OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
1270  StoreValueWide(rl_dest, rl_result);
1271  return true;
1272}
1273
1274bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1275  if (cu_->instruction_set == kMips) {
1276    // TODO - add Mips implementation
1277    return false;
1278  }
1279  RegLocation rl_src = info->args[0];
1280  RegLocation rl_dest = InlineTarget(info);
1281  StoreValue(rl_dest, rl_src);
1282  return true;
1283}
1284
1285bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1286  if (cu_->instruction_set == kMips) {
1287    // TODO - add Mips implementation
1288    return false;
1289  }
1290  RegLocation rl_src = info->args[0];
1291  RegLocation rl_dest = InlineTargetWide(info);
1292  StoreValueWide(rl_dest, rl_src);
1293  return true;
1294}
1295
1296/*
1297 * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1298 * otherwise bails to standard library code.
1299 */
1300bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1301  if (cu_->instruction_set == kMips) {
1302    // TODO - add Mips implementation
1303    return false;
1304  }
1305  RegLocation rl_obj = info->args[0];
1306  RegLocation rl_char = info->args[1];
1307  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1308    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1309    return false;
1310  }
1311
1312  ClobberCallerSave();
1313  LockCallTemps();  // Using fixed registers
1314  RegStorage reg_ptr = TargetReg(kArg0);
1315  RegStorage reg_char = TargetReg(kArg1);
1316  RegStorage reg_start = TargetReg(kArg2);
1317
1318  LoadValueDirectFixed(rl_obj, reg_ptr);
1319  LoadValueDirectFixed(rl_char, reg_char);
1320  if (zero_based) {
1321    LoadConstant(reg_start, 0);
1322  } else {
1323    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1324    LoadValueDirectFixed(rl_start, reg_start);
1325  }
1326  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pIndexOf));
1327  GenExplicitNullCheck(reg_ptr, info->opt_flags);
1328  LIR* high_code_point_branch =
1329      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1330  // NOTE: not a safepoint
1331  OpReg(kOpBlx, r_tgt);
1332  if (!rl_char.is_const) {
1333    // Add the slow path for code points beyond 0xFFFF.
1334    DCHECK(high_code_point_branch != nullptr);
1335    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1336    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1337    AddIntrinsicLaunchpad(info, high_code_point_branch, resume_tgt);
1338  } else {
1339    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1340    DCHECK(high_code_point_branch == nullptr);
1341  }
1342  RegLocation rl_return = GetReturn(false);
1343  RegLocation rl_dest = InlineTarget(info);
1344  StoreValue(rl_dest, rl_return);
1345  return true;
1346}
1347
1348/* Fast string.compareTo(Ljava/lang/string;)I. */
1349bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1350  if (cu_->instruction_set == kMips) {
1351    // TODO - add Mips implementation
1352    return false;
1353  }
1354  ClobberCallerSave();
1355  LockCallTemps();  // Using fixed registers
1356  RegStorage reg_this = TargetReg(kArg0);
1357  RegStorage reg_cmp = TargetReg(kArg1);
1358
1359  RegLocation rl_this = info->args[0];
1360  RegLocation rl_cmp = info->args[1];
1361  LoadValueDirectFixed(rl_this, reg_this);
1362  LoadValueDirectFixed(rl_cmp, reg_cmp);
1363  RegStorage r_tgt = (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) ?
1364      LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo)) : RegStorage::InvalidReg();
1365  GenExplicitNullCheck(reg_this, info->opt_flags);
1366  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1367  // TUNING: check if rl_cmp.s_reg_low is already null checked
1368  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1369  AddIntrinsicLaunchpad(info, cmp_null_check_branch);
1370  // NOTE: not a safepoint
1371  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1372    OpReg(kOpBlx, r_tgt);
1373  } else {
1374    OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1375  }
1376  RegLocation rl_return = GetReturn(false);
1377  RegLocation rl_dest = InlineTarget(info);
1378  StoreValue(rl_dest, rl_return);
1379  return true;
1380}
1381
1382bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1383  RegLocation rl_dest = InlineTarget(info);
1384  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1385  ThreadOffset<4> offset = Thread::PeerOffset<4>();
1386  if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
1387    LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg);
1388  } else {
1389    CHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1390    reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
1391  }
1392  StoreValue(rl_dest, rl_result);
1393  return true;
1394}
1395
1396bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1397                                  bool is_long, bool is_volatile) {
1398  if (cu_->instruction_set == kMips) {
1399    // TODO - add Mips implementation
1400    return false;
1401  }
1402  // Unused - RegLocation rl_src_unsafe = info->args[0];
1403  RegLocation rl_src_obj = info->args[1];  // Object
1404  RegLocation rl_src_offset = info->args[2];  // long low
1405  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1406  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1407
1408  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1409  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1410  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1411  if (is_long) {
1412    if (cu_->instruction_set == kX86) {
1413      LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg.GetLow(),
1414                          rl_result.reg.GetHigh(), kLong, INVALID_SREG);
1415    } else {
1416      RegStorage rl_temp_offset = AllocTemp();
1417      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1418      LoadBaseDispWide(rl_temp_offset, 0, rl_result.reg, INVALID_SREG);
1419      FreeTemp(rl_temp_offset.GetReg());
1420    }
1421  } else {
1422    LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, kWord);
1423  }
1424
1425  if (is_volatile) {
1426    // Without context sensitive analysis, we must issue the most conservative barriers.
1427    // In this case, either a load or store may follow so we issue both barriers.
1428    GenMemBarrier(kLoadLoad);
1429    GenMemBarrier(kLoadStore);
1430  }
1431
1432  if (is_long) {
1433    StoreValueWide(rl_dest, rl_result);
1434  } else {
1435    StoreValue(rl_dest, rl_result);
1436  }
1437  return true;
1438}
1439
1440bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1441                                  bool is_object, bool is_volatile, bool is_ordered) {
1442  if (cu_->instruction_set == kMips) {
1443    // TODO - add Mips implementation
1444    return false;
1445  }
1446  // Unused - RegLocation rl_src_unsafe = info->args[0];
1447  RegLocation rl_src_obj = info->args[1];  // Object
1448  RegLocation rl_src_offset = info->args[2];  // long low
1449  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1450  RegLocation rl_src_value = info->args[4];  // value to store
1451  if (is_volatile || is_ordered) {
1452    // There might have been a store before this volatile one so insert StoreStore barrier.
1453    GenMemBarrier(kStoreStore);
1454  }
1455  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1456  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1457  RegLocation rl_value;
1458  if (is_long) {
1459    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1460    if (cu_->instruction_set == kX86) {
1461      StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg.GetLow(),
1462                           rl_value.reg.GetHigh(), kLong, INVALID_SREG);
1463    } else {
1464      RegStorage rl_temp_offset = AllocTemp();
1465      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1466      StoreBaseDispWide(rl_temp_offset, 0, rl_value.reg);
1467      FreeTemp(rl_temp_offset.GetReg());
1468    }
1469  } else {
1470    rl_value = LoadValue(rl_src_value, kCoreReg);
1471    StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, kWord);
1472  }
1473
1474  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1475  FreeTemp(rl_offset.reg.GetReg());
1476
1477  if (is_volatile) {
1478    // A load might follow the volatile store so insert a StoreLoad barrier.
1479    GenMemBarrier(kStoreLoad);
1480  }
1481  if (is_object) {
1482    MarkGCCard(rl_value.reg, rl_object.reg);
1483  }
1484  return true;
1485}
1486
1487void Mir2Lir::GenInvoke(CallInfo* info) {
1488  if ((info->opt_flags & MIR_INLINED) != 0) {
1489    // Already inlined but we may still need the null check.
1490    if (info->type != kStatic &&
1491        ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
1492         (info->opt_flags & MIR_IGNORE_NULL_CHECK) == 0))  {
1493      RegLocation rl_obj = LoadValue(info->args[0], kCoreReg);
1494      GenImmedCheck(kCondEq, rl_obj.reg, 0, kThrowNullPointer);
1495    }
1496    return;
1497  }
1498  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1499  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1500      ->GenIntrinsic(this, info)) {
1501    return;
1502  }
1503  GenInvokeNoInline(info);
1504}
1505
1506void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1507  int call_state = 0;
1508  LIR* null_ck;
1509  LIR** p_null_ck = NULL;
1510  NextCallInsn next_call_insn;
1511  FlushAllRegs();  /* Everything to home location */
1512  // Explicit register usage
1513  LockCallTemps();
1514
1515  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1516  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1517  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1518  info->type = static_cast<InvokeType>(method_info.GetSharpType());
1519  bool fast_path = method_info.FastPath();
1520  bool skip_this;
1521  if (info->type == kInterface) {
1522    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1523    skip_this = fast_path;
1524  } else if (info->type == kDirect) {
1525    if (fast_path) {
1526      p_null_ck = &null_ck;
1527    }
1528    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1529    skip_this = false;
1530  } else if (info->type == kStatic) {
1531    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1532    skip_this = false;
1533  } else if (info->type == kSuper) {
1534    DCHECK(!fast_path);  // Fast path is a direct call.
1535    next_call_insn = NextSuperCallInsnSP;
1536    skip_this = false;
1537  } else {
1538    DCHECK_EQ(info->type, kVirtual);
1539    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1540    skip_this = fast_path;
1541  }
1542  MethodReference target_method = method_info.GetTargetMethod();
1543  if (!info->is_range) {
1544    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1545                                      next_call_insn, target_method, method_info.VTableIndex(),
1546                                      method_info.DirectCode(), method_info.DirectMethod(),
1547                                      original_type, skip_this);
1548  } else {
1549    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1550                                    next_call_insn, target_method, method_info.VTableIndex(),
1551                                    method_info.DirectCode(), method_info.DirectMethod(),
1552                                    original_type, skip_this);
1553  }
1554  // Finish up any of the call sequence not interleaved in arg loading
1555  while (call_state >= 0) {
1556    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1557                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
1558  }
1559  LIR* call_inst;
1560  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1561    call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
1562  } else {
1563    if (fast_path) {
1564      if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
1565        // We can have the linker fixup a call relative.
1566        call_inst =
1567          reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type);
1568      } else {
1569        call_inst = OpMem(kOpBlx, TargetReg(kArg0),
1570                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
1571      }
1572    } else {
1573      ThreadOffset<4> trampoline(-1);
1574      switch (info->type) {
1575      case kInterface:
1576        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeInterfaceTrampolineWithAccessCheck);
1577        break;
1578      case kDirect:
1579        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
1580        break;
1581      case kStatic:
1582        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
1583        break;
1584      case kSuper:
1585        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
1586        break;
1587      case kVirtual:
1588        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeVirtualTrampolineWithAccessCheck);
1589        break;
1590      default:
1591        LOG(FATAL) << "Unexpected invoke type";
1592      }
1593      call_inst = OpThreadMem(kOpBlx, trampoline);
1594    }
1595  }
1596  MarkSafepointPC(call_inst);
1597
1598  ClobberCallerSave();
1599  if (info->result.location != kLocInvalid) {
1600    // We have a following MOVE_RESULT - do it now.
1601    if (info->result.wide) {
1602      RegLocation ret_loc = GetReturnWide(info->result.fp);
1603      StoreValueWide(info->result, ret_loc);
1604    } else {
1605      RegLocation ret_loc = GetReturn(info->result.fp);
1606      StoreValue(info->result, ret_loc);
1607    }
1608  }
1609}
1610
1611}  // namespace art
1612