gen_invoke.cc revision 081f73e888b3c246cf7635db37b7f1105cf1a2ff
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
21#include "dex_file-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "invoke_type.h"
24#include "mirror/array.h"
25#include "mirror/string.h"
26#include "mir_to_lir-inl.h"
27#include "x86/codegen_x86.h"
28
29namespace art {
30
31/*
32 * This source files contains "gen" codegen routines that should
33 * be applicable to most targets.  Only mid-level support utilities
34 * and "op" calls may be used here.
35 */
36
37void Mir2Lir::AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume) {
38  class IntrinsicLaunchpadPath : public Mir2Lir::LIRSlowPath {
39   public:
40    IntrinsicLaunchpadPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
41        : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
42    }
43
44    void Compile() {
45      m2l_->ResetRegPool();
46      m2l_->ResetDefTracking();
47      LIR* label = GenerateTargetLabel();
48      label->opcode = kPseudoIntrinsicRetry;
49      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
50      m2l_->GenInvokeNoInline(info_);
51      if (cont_ != nullptr) {
52        m2l_->OpUnconditionalBranch(cont_);
53      }
54    }
55
56   private:
57    CallInfo* const info_;
58  };
59
60  AddSlowPath(new (arena_) IntrinsicLaunchpadPath(this, info, branch, resume));
61}
62
63/*
64 * To save scheduling time, helper calls are broken into two parts: generation of
65 * the helper target address, and the actual call to the helper.  Because x86
66 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
67 * load arguments between the two parts.
68 */
69RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) {
70  return (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) ? RegStorage::InvalidReg() : LoadHelper(helper_offset);
71}
72
73/* NOTE: if r_tgt is a temp, it will be freed following use */
74LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<4> helper_offset, bool safepoint_pc,
75                         bool use_link) {
76  LIR* call_inst;
77  OpKind op = use_link ? kOpBlx : kOpBx;
78  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
79    call_inst = OpThreadMem(op, helper_offset);
80  } else {
81    call_inst = OpReg(op, r_tgt);
82    FreeTemp(r_tgt);
83  }
84  if (safepoint_pc) {
85    MarkSafepointPC(call_inst);
86  }
87  return call_inst;
88}
89
90void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<4> helper_offset, int arg0, bool safepoint_pc) {
91  RegStorage r_tgt = CallHelperSetup(helper_offset);
92  LoadConstant(TargetReg(kArg0), arg0);
93  ClobberCallerSave();
94  CallHelper(r_tgt, helper_offset, safepoint_pc);
95}
96
97void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<4> helper_offset, RegStorage arg0,
98                                   bool safepoint_pc) {
99  RegStorage r_tgt = CallHelperSetup(helper_offset);
100  OpRegCopy(TargetReg(kArg0), arg0);
101  ClobberCallerSave();
102  CallHelper(r_tgt, helper_offset, safepoint_pc);
103}
104
105void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<4> helper_offset, RegLocation arg0,
106                                           bool safepoint_pc) {
107  RegStorage r_tgt = CallHelperSetup(helper_offset);
108  if (arg0.wide == 0) {
109    LoadValueDirectFixed(arg0, TargetReg(kArg0));
110  } else {
111    RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
112    LoadValueDirectWideFixed(arg0, r_tmp);
113  }
114  ClobberCallerSave();
115  CallHelper(r_tgt, helper_offset, safepoint_pc);
116}
117
118void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<4> helper_offset, int arg0, int arg1,
119                                      bool safepoint_pc) {
120  RegStorage r_tgt = CallHelperSetup(helper_offset);
121  LoadConstant(TargetReg(kArg0), arg0);
122  LoadConstant(TargetReg(kArg1), arg1);
123  ClobberCallerSave();
124  CallHelper(r_tgt, helper_offset, safepoint_pc);
125}
126
127void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<4> helper_offset, int arg0,
128                                              RegLocation arg1, bool safepoint_pc) {
129  RegStorage r_tgt = CallHelperSetup(helper_offset);
130  if (arg1.wide == 0) {
131    LoadValueDirectFixed(arg1, TargetReg(kArg1));
132  } else {
133    RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
134    LoadValueDirectWideFixed(arg1, r_tmp);
135  }
136  LoadConstant(TargetReg(kArg0), arg0);
137  ClobberCallerSave();
138  CallHelper(r_tgt, helper_offset, safepoint_pc);
139}
140
141void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<4> helper_offset, RegLocation arg0,
142                                              int arg1, bool safepoint_pc) {
143  RegStorage r_tgt = CallHelperSetup(helper_offset);
144  LoadValueDirectFixed(arg0, TargetReg(kArg0));
145  LoadConstant(TargetReg(kArg1), arg1);
146  ClobberCallerSave();
147  CallHelper(r_tgt, helper_offset, safepoint_pc);
148}
149
150void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<4> helper_offset, int arg0, RegStorage arg1,
151                                      bool safepoint_pc) {
152  RegStorage r_tgt = CallHelperSetup(helper_offset);
153  OpRegCopy(TargetReg(kArg1), arg1);
154  LoadConstant(TargetReg(kArg0), arg0);
155  ClobberCallerSave();
156  CallHelper(r_tgt, helper_offset, safepoint_pc);
157}
158
159void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<4> helper_offset, RegStorage arg0, int arg1,
160                                      bool safepoint_pc) {
161  RegStorage r_tgt = CallHelperSetup(helper_offset);
162  OpRegCopy(TargetReg(kArg0), arg0);
163  LoadConstant(TargetReg(kArg1), arg1);
164  ClobberCallerSave();
165  CallHelper(r_tgt, helper_offset, safepoint_pc);
166}
167
168void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<4> helper_offset, int arg0,
169                                         bool safepoint_pc) {
170  RegStorage r_tgt = CallHelperSetup(helper_offset);
171  LoadCurrMethodDirect(TargetReg(kArg1));
172  LoadConstant(TargetReg(kArg0), arg0);
173  ClobberCallerSave();
174  CallHelper(r_tgt, helper_offset, safepoint_pc);
175}
176
177void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<4> helper_offset, RegStorage arg0,
178                                         bool safepoint_pc) {
179  RegStorage r_tgt = CallHelperSetup(helper_offset);
180  DCHECK_NE(TargetReg(kArg1).GetReg(), arg0.GetReg());
181  if (TargetReg(kArg0) != arg0) {
182    OpRegCopy(TargetReg(kArg0), arg0);
183  }
184  LoadCurrMethodDirect(TargetReg(kArg1));
185  ClobberCallerSave();
186  CallHelper(r_tgt, helper_offset, safepoint_pc);
187}
188
189void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<4> helper_offset, RegStorage arg0,
190                                                    RegLocation arg2, bool safepoint_pc) {
191  RegStorage r_tgt = CallHelperSetup(helper_offset);
192  DCHECK_NE(TargetReg(kArg1).GetReg(), arg0.GetReg());
193  if (TargetReg(kArg0) != arg0) {
194    OpRegCopy(TargetReg(kArg0), arg0);
195  }
196  LoadCurrMethodDirect(TargetReg(kArg1));
197  LoadValueDirectFixed(arg2, TargetReg(kArg2));
198  ClobberCallerSave();
199  CallHelper(r_tgt, helper_offset, safepoint_pc);
200}
201
202void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<4> helper_offset,
203                                                      RegLocation arg0, RegLocation arg1,
204                                                      bool safepoint_pc) {
205  RegStorage r_tgt = CallHelperSetup(helper_offset);
206  if (arg0.wide == 0) {
207    LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
208    if (arg1.wide == 0) {
209      if (cu_->instruction_set == kMips) {
210        LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
211      } else {
212        LoadValueDirectFixed(arg1, TargetReg(kArg1));
213      }
214    } else {
215      if (cu_->instruction_set == kMips) {
216        RegStorage r_tmp;
217        if (arg1.fp) {
218          r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
219        } else {
220          r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
221        }
222        LoadValueDirectWideFixed(arg1, r_tmp);
223      } else {
224        RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
225        LoadValueDirectWideFixed(arg1, r_tmp);
226      }
227    }
228  } else {
229    RegStorage r_tmp;
230    if (arg0.fp) {
231      r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg0), TargetReg(kFArg1));
232    } else {
233      r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
234    }
235    LoadValueDirectWideFixed(arg0, r_tmp);
236    if (arg1.wide == 0) {
237      LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
238    } else {
239      RegStorage r_tmp;
240      if (arg1.fp) {
241        r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
242      } else {
243        r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
244      }
245      LoadValueDirectWideFixed(arg1, r_tmp);
246    }
247  }
248  ClobberCallerSave();
249  CallHelper(r_tgt, helper_offset, safepoint_pc);
250}
251
252void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<4> helper_offset, RegStorage arg0,
253                                      RegStorage arg1, bool safepoint_pc) {
254  RegStorage r_tgt = CallHelperSetup(helper_offset);
255  DCHECK_NE(TargetReg(kArg0).GetReg(), arg1.GetReg());  // check copy into arg0 won't clobber arg1
256  OpRegCopy(TargetReg(kArg0), arg0);
257  OpRegCopy(TargetReg(kArg1), arg1);
258  ClobberCallerSave();
259  CallHelper(r_tgt, helper_offset, safepoint_pc);
260}
261
262void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<4> helper_offset, RegStorage arg0,
263                                         RegStorage arg1, int arg2, bool safepoint_pc) {
264  RegStorage r_tgt = CallHelperSetup(helper_offset);
265  DCHECK_NE(TargetReg(kArg0).GetReg(), arg1.GetReg());  // check copy into arg0 won't clobber arg1
266  OpRegCopy(TargetReg(kArg0), arg0);
267  OpRegCopy(TargetReg(kArg1), arg1);
268  LoadConstant(TargetReg(kArg2), arg2);
269  ClobberCallerSave();
270  CallHelper(r_tgt, helper_offset, safepoint_pc);
271}
272
273void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<4> helper_offset,
274                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
275  RegStorage r_tgt = CallHelperSetup(helper_offset);
276  LoadValueDirectFixed(arg2, TargetReg(kArg2));
277  LoadCurrMethodDirect(TargetReg(kArg1));
278  LoadConstant(TargetReg(kArg0), arg0);
279  ClobberCallerSave();
280  CallHelper(r_tgt, helper_offset, safepoint_pc);
281}
282
283void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<4> helper_offset, int arg0,
284                                            int arg2, bool safepoint_pc) {
285  RegStorage r_tgt = CallHelperSetup(helper_offset);
286  LoadCurrMethodDirect(TargetReg(kArg1));
287  LoadConstant(TargetReg(kArg2), arg2);
288  LoadConstant(TargetReg(kArg0), arg0);
289  ClobberCallerSave();
290  CallHelper(r_tgt, helper_offset, safepoint_pc);
291}
292
293void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<4> helper_offset,
294                                                         int arg0, RegLocation arg1,
295                                                         RegLocation arg2, bool safepoint_pc) {
296  RegStorage r_tgt = CallHelperSetup(helper_offset);
297  DCHECK_EQ(arg1.wide, 0U);
298  LoadValueDirectFixed(arg1, TargetReg(kArg1));
299  if (arg2.wide == 0) {
300    LoadValueDirectFixed(arg2, TargetReg(kArg2));
301  } else {
302    RegStorage r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
303    LoadValueDirectWideFixed(arg2, r_tmp);
304  }
305  LoadConstant(TargetReg(kArg0), arg0);
306  ClobberCallerSave();
307  CallHelper(r_tgt, helper_offset, safepoint_pc);
308}
309
310void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<4> helper_offset,
311                                                                 RegLocation arg0, RegLocation arg1,
312                                                                 RegLocation arg2,
313                                                                 bool safepoint_pc) {
314  RegStorage r_tgt = CallHelperSetup(helper_offset);
315  DCHECK_EQ(arg0.wide, 0U);
316  LoadValueDirectFixed(arg0, TargetReg(kArg0));
317  DCHECK_EQ(arg1.wide, 0U);
318  LoadValueDirectFixed(arg1, TargetReg(kArg1));
319  DCHECK_EQ(arg1.wide, 0U);
320  LoadValueDirectFixed(arg2, TargetReg(kArg2));
321  ClobberCallerSave();
322  CallHelper(r_tgt, helper_offset, safepoint_pc);
323}
324
325/*
326 * If there are any ins passed in registers that have not been promoted
327 * to a callee-save register, flush them to the frame.  Perform intial
328 * assignment of promoted arguments.
329 *
330 * ArgLocs is an array of location records describing the incoming arguments
331 * with one location record per word of argument.
332 */
333void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
334  /*
335   * Dummy up a RegLocation for the incoming Method*
336   * It will attempt to keep kArg0 live (or copy it to home location
337   * if promoted).
338   */
339  RegLocation rl_src = rl_method;
340  rl_src.location = kLocPhysReg;
341  rl_src.reg = TargetReg(kArg0);
342  rl_src.home = false;
343  MarkLive(rl_src.reg, rl_src.s_reg_low);
344  StoreValue(rl_method, rl_src);
345  // If Method* has been promoted, explicitly flush
346  if (rl_method.location == kLocPhysReg) {
347    StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
348  }
349
350  if (cu_->num_ins == 0) {
351    return;
352  }
353
354  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
355  /*
356   * Copy incoming arguments to their proper home locations.
357   * NOTE: an older version of dx had an issue in which
358   * it would reuse static method argument registers.
359   * This could result in the same Dalvik virtual register
360   * being promoted to both core and fp regs. To account for this,
361   * we only copy to the corresponding promoted physical register
362   * if it matches the type of the SSA name for the incoming
363   * argument.  It is also possible that long and double arguments
364   * end up half-promoted.  In those cases, we must flush the promoted
365   * half to memory as well.
366   */
367  for (int i = 0; i < cu_->num_ins; i++) {
368    PromotionMap* v_map = &promotion_map_[start_vreg + i];
369    RegStorage reg = GetArgMappingToPhysicalReg(i);
370
371    if (reg.Valid()) {
372      // If arriving in register
373      bool need_flush = true;
374      RegLocation* t_loc = &ArgLocs[i];
375      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
376        OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
377        need_flush = false;
378      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
379        OpRegCopy(RegStorage::Solo32(v_map->FpReg), reg);
380        need_flush = false;
381      } else {
382        need_flush = true;
383      }
384
385      // For wide args, force flush if not fully promoted
386      if (t_loc->wide) {
387        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
388        // Is only half promoted?
389        need_flush |= (p_map->core_location != v_map->core_location) ||
390            (p_map->fp_location != v_map->fp_location);
391        if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
392          /*
393           * In Arm, a double is represented as a pair of consecutive single float
394           * registers starting at an even number.  It's possible that both Dalvik vRegs
395           * representing the incoming double were independently promoted as singles - but
396           * not in a form usable as a double.  If so, we need to flush - even though the
397           * incoming arg appears fully in register.  At this point in the code, both
398           * halves of the double are promoted.  Make sure they are in a usable form.
399           */
400          int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
401          int low_reg = promotion_map_[lowreg_index].FpReg;
402          int high_reg = promotion_map_[lowreg_index + 1].FpReg;
403          if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
404            need_flush = true;
405          }
406        }
407      }
408      if (need_flush) {
409        StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kWord);
410      }
411    } else {
412      // If arriving in frame & promoted
413      if (v_map->core_location == kLocPhysReg) {
414        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
415                     RegStorage::Solo32(v_map->core_reg));
416      }
417      if (v_map->fp_location == kLocPhysReg) {
418        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg));
419      }
420    }
421  }
422}
423
424/*
425 * Bit of a hack here - in the absence of a real scheduling pass,
426 * emit the next instruction in static & direct invoke sequences.
427 */
428static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
429                          int state, const MethodReference& target_method,
430                          uint32_t unused,
431                          uintptr_t direct_code, uintptr_t direct_method,
432                          InvokeType type) {
433  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
434  if (direct_code != 0 && direct_method != 0) {
435    switch (state) {
436    case 0:  // Get the current Method* [sets kArg0]
437      if (direct_code != static_cast<unsigned int>(-1)) {
438        if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
439          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
440        }
441      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
442        cg->LoadCodeAddress(target_method, type, kInvokeTgt);
443      }
444      if (direct_method != static_cast<unsigned int>(-1)) {
445        cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
446      } else {
447        cg->LoadMethodAddress(target_method, type, kArg0);
448      }
449      break;
450    default:
451      return -1;
452    }
453  } else {
454    switch (state) {
455    case 0:  // Get the current Method* [sets kArg0]
456      // TUNING: we can save a reg copy if Method* has been promoted.
457      cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
458      break;
459    case 1:  // Get method->dex_cache_resolved_methods_
460      cg->LoadWordDisp(cg->TargetReg(kArg0),
461                       mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
462                       cg->TargetReg(kArg0));
463      // Set up direct code if known.
464      if (direct_code != 0) {
465        if (direct_code != static_cast<unsigned int>(-1)) {
466          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
467        } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
468          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
469          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
470        }
471      }
472      break;
473    case 2:  // Grab target method*
474      CHECK_EQ(cu->dex_file, target_method.dex_file);
475      cg->LoadWordDisp(cg->TargetReg(kArg0),
476                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
477                       (target_method.dex_method_index * 4), cg->TargetReg(kArg0));
478      break;
479    case 3:  // Grab the code from the method*
480      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
481        if (direct_code == 0) {
482          cg->LoadWordDisp(cg->TargetReg(kArg0),
483                           mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
484                           cg->TargetReg(kInvokeTgt));
485        }
486        break;
487      }
488      // Intentional fallthrough for x86
489    default:
490      return -1;
491    }
492  }
493  return state + 1;
494}
495
496/*
497 * Bit of a hack here - in the absence of a real scheduling pass,
498 * emit the next instruction in a virtual invoke sequence.
499 * We can use kLr as a temp prior to target address loading
500 * Note also that we'll load the first argument ("this") into
501 * kArg1 here rather than the standard LoadArgRegs.
502 */
503static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
504                         int state, const MethodReference& target_method,
505                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
506                         InvokeType unused3) {
507  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
508  /*
509   * This is the fast path in which the target virtual method is
510   * fully resolved at compile time.
511   */
512  switch (state) {
513    case 0: {  // Get "this" [set kArg1]
514      RegLocation  rl_arg = info->args[0];
515      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
516      break;
517    }
518    case 1:  // Is "this" null? [use kArg1]
519      cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
520      // get this->klass_ [use kArg1, set kInvokeTgt]
521      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
522                       cg->TargetReg(kInvokeTgt));
523      cg->MarkPossibleNullPointerException(info->opt_flags);
524      break;
525    case 2:  // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
526      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
527                       cg->TargetReg(kInvokeTgt));
528      break;
529    case 3:  // Get target method [use kInvokeTgt, set kArg0]
530      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
531                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
532                       cg->TargetReg(kArg0));
533      break;
534    case 4:  // Get the compiled code address [uses kArg0, sets kInvokeTgt]
535      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
536        cg->LoadWordDisp(cg->TargetReg(kArg0),
537                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
538                         cg->TargetReg(kInvokeTgt));
539        break;
540      }
541      // Intentional fallthrough for X86
542    default:
543      return -1;
544  }
545  return state + 1;
546}
547
548/*
549 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
550 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
551 * more than one interface method map to the same index. Note also that we'll load the first
552 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
553 */
554static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
555                                 const MethodReference& target_method,
556                                 uint32_t method_idx, uintptr_t unused,
557                                 uintptr_t direct_method, InvokeType unused2) {
558  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
559
560  switch (state) {
561    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
562      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
563      cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index);
564      if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) {
565        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg));
566      }
567      break;
568    case 1: {  // Get "this" [set kArg1]
569      RegLocation  rl_arg = info->args[0];
570      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
571      break;
572    }
573    case 2:  // Is "this" null? [use kArg1]
574      cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
575      // Get this->klass_ [use kArg1, set kInvokeTgt]
576      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
577                       cg->TargetReg(kInvokeTgt));
578      cg->MarkPossibleNullPointerException(info->opt_flags);
579      break;
580    case 3:  // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
581      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
582                       cg->TargetReg(kInvokeTgt));
583      break;
584    case 4:  // Get target method [use kInvokeTgt, set kArg0]
585      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) +
586                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
587                       cg->TargetReg(kArg0));
588      break;
589    case 5:  // Get the compiled code address [use kArg0, set kInvokeTgt]
590      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
591        cg->LoadWordDisp(cg->TargetReg(kArg0),
592                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
593                         cg->TargetReg(kInvokeTgt));
594        break;
595      }
596      // Intentional fallthrough for X86
597    default:
598      return -1;
599  }
600  return state + 1;
601}
602
603static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset<4> trampoline,
604                            int state, const MethodReference& target_method,
605                            uint32_t method_idx) {
606  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
607  /*
608   * This handles the case in which the base method is not fully
609   * resolved at compile time, we bail to a runtime helper.
610   */
611  if (state == 0) {
612    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
613      // Load trampoline target
614      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
615    }
616    // Load kArg0 with method index
617    CHECK_EQ(cu->dex_file, target_method.dex_file);
618    cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
619    return 1;
620  }
621  return -1;
622}
623
624static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
625                                int state,
626                                const MethodReference& target_method,
627                                uint32_t unused, uintptr_t unused2,
628                                uintptr_t unused3, InvokeType unused4) {
629  ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
630  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
631}
632
633static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
634                                const MethodReference& target_method,
635                                uint32_t unused, uintptr_t unused2,
636                                uintptr_t unused3, InvokeType unused4) {
637  ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
638  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
639}
640
641static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
642                               const MethodReference& target_method,
643                               uint32_t unused, uintptr_t unused2,
644                               uintptr_t unused3, InvokeType unused4) {
645  ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
646  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
647}
648
649static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
650                           const MethodReference& target_method,
651                           uint32_t unused, uintptr_t unused2,
652                           uintptr_t unused3, InvokeType unused4) {
653  ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeVirtualTrampolineWithAccessCheck);
654  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
655}
656
657static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
658                                                CallInfo* info, int state,
659                                                const MethodReference& target_method,
660                                                uint32_t unused, uintptr_t unused2,
661                                                uintptr_t unused3, InvokeType unused4) {
662  ThreadOffset<4> trampoline =
663      QUICK_ENTRYPOINT_OFFSET(4, pInvokeInterfaceTrampolineWithAccessCheck);
664  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
665}
666
667int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
668                         NextCallInsn next_call_insn,
669                         const MethodReference& target_method,
670                         uint32_t vtable_idx, uintptr_t direct_code,
671                         uintptr_t direct_method, InvokeType type, bool skip_this) {
672  int last_arg_reg = TargetReg(kArg3).GetReg();
673  int next_reg = TargetReg(kArg1).GetReg();
674  int next_arg = 0;
675  if (skip_this) {
676    next_reg++;
677    next_arg++;
678  }
679  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
680    RegLocation rl_arg = info->args[next_arg++];
681    rl_arg = UpdateRawLoc(rl_arg);
682    if (rl_arg.wide && (next_reg <= TargetReg(kArg2).GetReg())) {
683      RegStorage r_tmp(RegStorage::k64BitPair, next_reg, next_reg + 1);
684      LoadValueDirectWideFixed(rl_arg, r_tmp);
685      next_reg++;
686      next_arg++;
687    } else {
688      if (rl_arg.wide) {
689        rl_arg = NarrowRegLoc(rl_arg);
690        rl_arg.is_const = false;
691      }
692      LoadValueDirectFixed(rl_arg, RegStorage::Solo32(next_reg));
693    }
694    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
695                                direct_code, direct_method, type);
696  }
697  return call_state;
698}
699
700/*
701 * Load up to 5 arguments, the first three of which will be in
702 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
703 * and as part of the load sequence, it must be replaced with
704 * the target method pointer.  Note, this may also be called
705 * for "range" variants if the number of arguments is 5 or fewer.
706 */
707int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
708                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
709                                  const MethodReference& target_method,
710                                  uint32_t vtable_idx, uintptr_t direct_code,
711                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
712  RegLocation rl_arg;
713
714  /* If no arguments, just return */
715  if (info->num_arg_words == 0)
716    return call_state;
717
718  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
719                              direct_code, direct_method, type);
720
721  DCHECK_LE(info->num_arg_words, 5);
722  if (info->num_arg_words > 3) {
723    int32_t next_use = 3;
724    // Detect special case of wide arg spanning arg3/arg4
725    RegLocation rl_use0 = info->args[0];
726    RegLocation rl_use1 = info->args[1];
727    RegLocation rl_use2 = info->args[2];
728    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) && rl_use2.wide) {
729      RegStorage reg;
730      // Wide spans, we need the 2nd half of uses[2].
731      rl_arg = UpdateLocWide(rl_use2);
732      if (rl_arg.location == kLocPhysReg) {
733        reg = rl_arg.reg.GetHigh();
734      } else {
735        // kArg2 & rArg3 can safely be used here
736        reg = TargetReg(kArg3);
737        LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
738        call_state = next_call_insn(cu_, info, call_state, target_method,
739                                    vtable_idx, direct_code, direct_method, type);
740      }
741      StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
742      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
743                                  direct_code, direct_method, type);
744      next_use++;
745    }
746    // Loop through the rest
747    while (next_use < info->num_arg_words) {
748      RegStorage low_reg;
749      RegStorage high_reg;
750      rl_arg = info->args[next_use];
751      rl_arg = UpdateRawLoc(rl_arg);
752      if (rl_arg.location == kLocPhysReg) {
753        if (rl_arg.wide) {
754          low_reg = rl_arg.reg.GetLow();
755          high_reg = rl_arg.reg.GetHigh();
756        } else {
757          low_reg = rl_arg.reg;
758        }
759      } else {
760        low_reg = TargetReg(kArg2);
761        if (rl_arg.wide) {
762          high_reg = TargetReg(kArg3);
763          LoadValueDirectWideFixed(rl_arg, RegStorage::MakeRegPair(low_reg, high_reg));
764        } else {
765          LoadValueDirectFixed(rl_arg, low_reg);
766        }
767        call_state = next_call_insn(cu_, info, call_state, target_method,
768                                    vtable_idx, direct_code, direct_method, type);
769      }
770      int outs_offset = (next_use + 1) * 4;
771      if (rl_arg.wide) {
772        StoreBaseDispWide(TargetReg(kSp), outs_offset, RegStorage::MakeRegPair(low_reg, high_reg));
773        next_use += 2;
774      } else {
775        StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
776        next_use++;
777      }
778      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
779                               direct_code, direct_method, type);
780    }
781  }
782
783  call_state = LoadArgRegs(info, call_state, next_call_insn,
784                           target_method, vtable_idx, direct_code, direct_method,
785                           type, skip_this);
786
787  if (pcrLabel) {
788    if (Runtime::Current()->ExplicitNullChecks()) {
789      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
790    } else {
791      *pcrLabel = nullptr;
792      // In lieu of generating a check for kArg1 being null, we need to
793      // perform a load when doing implicit checks.
794      RegStorage tmp = AllocTemp();
795      LoadWordDisp(TargetReg(kArg1), 0, tmp);
796      MarkPossibleNullPointerException(info->opt_flags);
797      FreeTemp(tmp);
798    }
799  }
800  return call_state;
801}
802
803/*
804 * May have 0+ arguments (also used for jumbo).  Note that
805 * source virtual registers may be in physical registers, so may
806 * need to be flushed to home location before copying.  This
807 * applies to arg3 and above (see below).
808 *
809 * Two general strategies:
810 *    If < 20 arguments
811 *       Pass args 3-18 using vldm/vstm block copy
812 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
813 *    If 20+ arguments
814 *       Pass args arg19+ using memcpy block copy
815 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
816 *
817 */
818int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
819                                LIR** pcrLabel, NextCallInsn next_call_insn,
820                                const MethodReference& target_method,
821                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
822                                InvokeType type, bool skip_this) {
823  // If we can treat it as non-range (Jumbo ops will use range form)
824  if (info->num_arg_words <= 5)
825    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
826                                next_call_insn, target_method, vtable_idx,
827                                direct_code, direct_method, type, skip_this);
828  /*
829   * First load the non-register arguments.  Both forms expect all
830   * of the source arguments to be in their home frame location, so
831   * scan the s_reg names and flush any that have been promoted to
832   * frame backing storage.
833   */
834  // Scan the rest of the args - if in phys_reg flush to memory
835  for (int next_arg = 0; next_arg < info->num_arg_words;) {
836    RegLocation loc = info->args[next_arg];
837    if (loc.wide) {
838      loc = UpdateLocWide(loc);
839      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
840        StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
841      }
842      next_arg += 2;
843    } else {
844      loc = UpdateLoc(loc);
845      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
846        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
847      }
848      next_arg++;
849    }
850  }
851
852  // Logic below assumes that Method pointer is at offset zero from SP.
853  DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
854
855  // The first 3 arguments are passed via registers.
856  // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
857  // get size of uintptr_t or size of object reference according to model being used.
858  int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
859  int start_offset = SRegOffset(info->args[3].s_reg_low);
860  int regs_left_to_pass_via_stack = info->num_arg_words - 3;
861  DCHECK_GT(regs_left_to_pass_via_stack, 0);
862
863  if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
864    // Use vldm/vstm pair using kArg3 as a temp
865    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
866                             direct_code, direct_method, type);
867    OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
868    LIR* ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack);
869    // TUNING: loosen barrier
870    ld->u.m.def_mask = ENCODE_ALL;
871    SetMemRefType(ld, true /* is_load */, kDalvikReg);
872    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
873                             direct_code, direct_method, type);
874    OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
875    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
876                             direct_code, direct_method, type);
877    LIR* st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack);
878    SetMemRefType(st, false /* is_load */, kDalvikReg);
879    st->u.m.def_mask = ENCODE_ALL;
880    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
881                             direct_code, direct_method, type);
882  } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
883    int current_src_offset = start_offset;
884    int current_dest_offset = outs_offset;
885
886    while (regs_left_to_pass_via_stack > 0) {
887      // This is based on the knowledge that the stack itself is 16-byte aligned.
888      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
889      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
890      size_t bytes_to_move;
891
892      /*
893       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
894       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
895       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
896       * We do this because we could potentially do a smaller move to align.
897       */
898      if (regs_left_to_pass_via_stack == 4 ||
899          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
900        // Moving 128-bits via xmm register.
901        bytes_to_move = sizeof(uint32_t) * 4;
902
903        // Allocate a free xmm temp. Since we are working through the calling sequence,
904        // we expect to have an xmm temporary available.
905        RegStorage temp = AllocTempDouble();
906        CHECK_GT(temp.GetLowReg(), 0);
907
908        LIR* ld1 = nullptr;
909        LIR* ld2 = nullptr;
910        LIR* st1 = nullptr;
911        LIR* st2 = nullptr;
912
913        /*
914         * The logic is similar for both loads and stores. If we have 16-byte alignment,
915         * do an aligned move. If we have 8-byte alignment, then do the move in two
916         * parts. This approach prevents possible cache line splits. Finally, fall back
917         * to doing an unaligned move. In most cases we likely won't split the cache
918         * line but we cannot prove it and thus take a conservative approach.
919         */
920        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
921        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
922
923        if (src_is_16b_aligned) {
924          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
925        } else if (src_is_8b_aligned) {
926          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP);
927          ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1),
928                            kMovHi128FP);
929        } else {
930          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP);
931        }
932
933        if (dest_is_16b_aligned) {
934          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP);
935        } else if (dest_is_8b_aligned) {
936          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP);
937          st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1),
938                            temp, kMovHi128FP);
939        } else {
940          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP);
941        }
942
943        // TODO If we could keep track of aliasing information for memory accesses that are wider
944        // than 64-bit, we wouldn't need to set up a barrier.
945        if (ld1 != nullptr) {
946          if (ld2 != nullptr) {
947            // For 64-bit load we can actually set up the aliasing information.
948            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
949            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
950          } else {
951            // Set barrier for 128-bit load.
952            SetMemRefType(ld1, true /* is_load */, kDalvikReg);
953            ld1->u.m.def_mask = ENCODE_ALL;
954          }
955        }
956        if (st1 != nullptr) {
957          if (st2 != nullptr) {
958            // For 64-bit store we can actually set up the aliasing information.
959            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
960            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
961          } else {
962            // Set barrier for 128-bit store.
963            SetMemRefType(st1, false /* is_load */, kDalvikReg);
964            st1->u.m.def_mask = ENCODE_ALL;
965          }
966        }
967
968        // Free the temporary used for the data movement.
969        // CLEANUP: temp is currently a bogus pair, elmiminate extra free when updated.
970        FreeTemp(temp.GetLow());
971        FreeTemp(temp.GetHigh());
972      } else {
973        // Moving 32-bits via general purpose register.
974        bytes_to_move = sizeof(uint32_t);
975
976        // Instead of allocating a new temp, simply reuse one of the registers being used
977        // for argument passing.
978        RegStorage temp = TargetReg(kArg3);
979
980        // Now load the argument VR and store to the outs.
981        LoadWordDisp(TargetReg(kSp), current_src_offset, temp);
982        StoreWordDisp(TargetReg(kSp), current_dest_offset, temp);
983      }
984
985      current_src_offset += bytes_to_move;
986      current_dest_offset += bytes_to_move;
987      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
988    }
989  } else {
990    // Generate memcpy
991    OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
992    OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
993    CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0),
994                               TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
995  }
996
997  call_state = LoadArgRegs(info, call_state, next_call_insn,
998                           target_method, vtable_idx, direct_code, direct_method,
999                           type, skip_this);
1000
1001  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1002                           direct_code, direct_method, type);
1003  if (pcrLabel) {
1004    if (Runtime::Current()->ExplicitNullChecks()) {
1005      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
1006    } else {
1007      *pcrLabel = nullptr;
1008      // In lieu of generating a check for kArg1 being null, we need to
1009      // perform a load when doing implicit checks.
1010      RegStorage tmp = AllocTemp();
1011      LoadWordDisp(TargetReg(kArg1), 0, tmp);
1012      MarkPossibleNullPointerException(info->opt_flags);
1013      FreeTemp(tmp);
1014    }
1015  }
1016  return call_state;
1017}
1018
1019RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
1020  RegLocation res;
1021  if (info->result.location == kLocInvalid) {
1022    res = GetReturn(false);
1023  } else {
1024    res = info->result;
1025  }
1026  return res;
1027}
1028
1029RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
1030  RegLocation res;
1031  if (info->result.location == kLocInvalid) {
1032    res = GetReturnWide(false);
1033  } else {
1034    res = info->result;
1035  }
1036  return res;
1037}
1038
1039bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
1040  if (cu_->instruction_set == kMips) {
1041    // TODO - add Mips implementation
1042    return false;
1043  }
1044  // Location of reference to data array
1045  int value_offset = mirror::String::ValueOffset().Int32Value();
1046  // Location of count
1047  int count_offset = mirror::String::CountOffset().Int32Value();
1048  // Starting offset within data array
1049  int offset_offset = mirror::String::OffsetOffset().Int32Value();
1050  // Start of char data with array_
1051  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1052
1053  RegLocation rl_obj = info->args[0];
1054  RegLocation rl_idx = info->args[1];
1055  rl_obj = LoadValue(rl_obj, kCoreReg);
1056  // X86 wants to avoid putting a constant index into a register.
1057  if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) {
1058    rl_idx = LoadValue(rl_idx, kCoreReg);
1059  }
1060  RegStorage reg_max;
1061  GenNullCheck(rl_obj.reg, info->opt_flags);
1062  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
1063  LIR* range_check_branch = nullptr;
1064  RegStorage reg_off;
1065  RegStorage reg_ptr;
1066  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1067    reg_off = AllocTemp();
1068    reg_ptr = AllocTemp();
1069    if (range_check) {
1070      reg_max = AllocTemp();
1071      LoadWordDisp(rl_obj.reg, count_offset, reg_max);
1072      MarkPossibleNullPointerException(info->opt_flags);
1073    }
1074    LoadWordDisp(rl_obj.reg, offset_offset, reg_off);
1075    MarkPossibleNullPointerException(info->opt_flags);
1076    LoadWordDisp(rl_obj.reg, value_offset, reg_ptr);
1077    if (range_check) {
1078      // Set up a launch pad to allow retry in case of bounds violation */
1079      OpRegReg(kOpCmp, rl_idx.reg, reg_max);
1080      FreeTemp(reg_max);
1081      range_check_branch = OpCondBranch(kCondUge, nullptr);
1082    }
1083    OpRegImm(kOpAdd, reg_ptr, data_offset);
1084  } else {
1085    if (range_check) {
1086      // On x86, we can compare to memory directly
1087      // Set up a launch pad to allow retry in case of bounds violation */
1088      if (rl_idx.is_const) {
1089        range_check_branch = OpCmpMemImmBranch(
1090            kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
1091            mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr);
1092      } else {
1093        OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
1094        range_check_branch = OpCondBranch(kCondUge, nullptr);
1095      }
1096    }
1097    reg_off = AllocTemp();
1098    reg_ptr = AllocTemp();
1099    LoadWordDisp(rl_obj.reg, offset_offset, reg_off);
1100    LoadWordDisp(rl_obj.reg, value_offset, reg_ptr);
1101  }
1102  if (rl_idx.is_const) {
1103    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1104  } else {
1105    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
1106  }
1107  FreeTemp(rl_obj.reg);
1108  if (rl_idx.location == kLocPhysReg) {
1109    FreeTemp(rl_idx.reg);
1110  }
1111  RegLocation rl_dest = InlineTarget(info);
1112  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1113  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1114    LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
1115  } else {
1116    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg,
1117                        RegStorage::InvalidReg(), kUnsignedHalf, INVALID_SREG);
1118  }
1119  FreeTemp(reg_off);
1120  FreeTemp(reg_ptr);
1121  StoreValue(rl_dest, rl_result);
1122  if (range_check) {
1123    DCHECK(range_check_branch != nullptr);
1124    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1125    AddIntrinsicLaunchpad(info, range_check_branch);
1126  }
1127  return true;
1128}
1129
1130// Generates an inlined String.is_empty or String.length.
1131bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1132  if (cu_->instruction_set == kMips) {
1133    // TODO - add Mips implementation
1134    return false;
1135  }
1136  // dst = src.length();
1137  RegLocation rl_obj = info->args[0];
1138  rl_obj = LoadValue(rl_obj, kCoreReg);
1139  RegLocation rl_dest = InlineTarget(info);
1140  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1141  GenNullCheck(rl_obj.reg, info->opt_flags);
1142  LoadWordDisp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1143  MarkPossibleNullPointerException(info->opt_flags);
1144  if (is_empty) {
1145    // dst = (dst == 0);
1146    if (cu_->instruction_set == kThumb2) {
1147      RegStorage t_reg = AllocTemp();
1148      OpRegReg(kOpNeg, t_reg, rl_result.reg);
1149      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1150    } else {
1151      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1152      OpRegImm(kOpSub, rl_result.reg, 1);
1153      OpRegImm(kOpLsr, rl_result.reg, 31);
1154    }
1155  }
1156  StoreValue(rl_dest, rl_result);
1157  return true;
1158}
1159
1160bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1161  if (cu_->instruction_set == kMips) {
1162    // TODO - add Mips implementation
1163    return false;
1164  }
1165  RegLocation rl_src_i = info->args[0];
1166  RegLocation rl_dest = (size == kLong) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1167  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1168  if (size == kLong) {
1169    RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
1170    RegStorage r_i_low = rl_i.reg.GetLow();
1171    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1172      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1173      r_i_low = AllocTemp();
1174      OpRegCopy(r_i_low, rl_i.reg);
1175    }
1176    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1177    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1178    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1179      FreeTemp(r_i_low);
1180    }
1181    StoreValueWide(rl_dest, rl_result);
1182  } else {
1183    DCHECK(size == kWord || size == kSignedHalf);
1184    OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
1185    RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
1186    OpRegReg(op, rl_result.reg, rl_i.reg);
1187    StoreValue(rl_dest, rl_result);
1188  }
1189  return true;
1190}
1191
1192bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1193  if (cu_->instruction_set == kMips) {
1194    // TODO - add Mips implementation
1195    return false;
1196  }
1197  RegLocation rl_src = info->args[0];
1198  rl_src = LoadValue(rl_src, kCoreReg);
1199  RegLocation rl_dest = InlineTarget(info);
1200  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1201  RegStorage sign_reg = AllocTemp();
1202  // abs(x) = y<=x>>31, (x+y)^y.
1203  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1204  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1205  OpRegReg(kOpXor, rl_result.reg, sign_reg);
1206  StoreValue(rl_dest, rl_result);
1207  return true;
1208}
1209
1210bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1211  if (cu_->instruction_set == kMips) {
1212    // TODO - add Mips implementation
1213    return false;
1214  }
1215  RegLocation rl_src = info->args[0];
1216  rl_src = LoadValueWide(rl_src, kCoreReg);
1217  RegLocation rl_dest = InlineTargetWide(info);
1218  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1219
1220  // If on x86 or if we would clobber a register needed later, just copy the source first.
1221  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 || rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
1222    OpRegCopyWide(rl_result.reg, rl_src.reg);
1223    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1224        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1225        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1226        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1227      // Reuse source registers to avoid running out of temps.
1228      FreeTemp(rl_src.reg);
1229    }
1230    rl_src = rl_result;
1231  }
1232
1233  // abs(x) = y<=x>>31, (x+y)^y.
1234  RegStorage sign_reg = AllocTemp();
1235  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1236  OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1237  OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1238  OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1239  OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1240  StoreValueWide(rl_dest, rl_result);
1241  return true;
1242}
1243
1244bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
1245  if (cu_->instruction_set == kMips) {
1246    // TODO - add Mips implementation
1247    return false;
1248  }
1249  RegLocation rl_src = info->args[0];
1250  rl_src = LoadValue(rl_src, kCoreReg);
1251  RegLocation rl_dest = InlineTarget(info);
1252  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1253  OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
1254  StoreValue(rl_dest, rl_result);
1255  return true;
1256}
1257
1258bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
1259  if (cu_->instruction_set == kMips) {
1260    // TODO - add Mips implementation
1261    return false;
1262  }
1263  RegLocation rl_src = info->args[0];
1264  rl_src = LoadValueWide(rl_src, kCoreReg);
1265  RegLocation rl_dest = InlineTargetWide(info);
1266  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1267  OpRegCopyWide(rl_result.reg, rl_src.reg);
1268  OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
1269  StoreValueWide(rl_dest, rl_result);
1270  return true;
1271}
1272
1273bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1274  if (cu_->instruction_set == kMips) {
1275    // TODO - add Mips implementation
1276    return false;
1277  }
1278  RegLocation rl_src = info->args[0];
1279  RegLocation rl_dest = InlineTarget(info);
1280  StoreValue(rl_dest, rl_src);
1281  return true;
1282}
1283
1284bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1285  if (cu_->instruction_set == kMips) {
1286    // TODO - add Mips implementation
1287    return false;
1288  }
1289  RegLocation rl_src = info->args[0];
1290  RegLocation rl_dest = InlineTargetWide(info);
1291  StoreValueWide(rl_dest, rl_src);
1292  return true;
1293}
1294
1295/*
1296 * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1297 * otherwise bails to standard library code.
1298 */
1299bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1300  if (cu_->instruction_set == kMips) {
1301    // TODO - add Mips implementation
1302    return false;
1303  }
1304  RegLocation rl_obj = info->args[0];
1305  RegLocation rl_char = info->args[1];
1306  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1307    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1308    return false;
1309  }
1310
1311  ClobberCallerSave();
1312  LockCallTemps();  // Using fixed registers
1313  RegStorage reg_ptr = TargetReg(kArg0);
1314  RegStorage reg_char = TargetReg(kArg1);
1315  RegStorage reg_start = TargetReg(kArg2);
1316
1317  LoadValueDirectFixed(rl_obj, reg_ptr);
1318  LoadValueDirectFixed(rl_char, reg_char);
1319  if (zero_based) {
1320    LoadConstant(reg_start, 0);
1321  } else {
1322    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1323    LoadValueDirectFixed(rl_start, reg_start);
1324  }
1325  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pIndexOf));
1326  GenExplicitNullCheck(reg_ptr, info->opt_flags);
1327  LIR* high_code_point_branch =
1328      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1329  // NOTE: not a safepoint
1330  OpReg(kOpBlx, r_tgt);
1331  if (!rl_char.is_const) {
1332    // Add the slow path for code points beyond 0xFFFF.
1333    DCHECK(high_code_point_branch != nullptr);
1334    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1335    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1336    AddIntrinsicLaunchpad(info, high_code_point_branch, resume_tgt);
1337  } else {
1338    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1339    DCHECK(high_code_point_branch == nullptr);
1340  }
1341  RegLocation rl_return = GetReturn(false);
1342  RegLocation rl_dest = InlineTarget(info);
1343  StoreValue(rl_dest, rl_return);
1344  return true;
1345}
1346
1347/* Fast string.compareTo(Ljava/lang/string;)I. */
1348bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1349  if (cu_->instruction_set == kMips) {
1350    // TODO - add Mips implementation
1351    return false;
1352  }
1353  ClobberCallerSave();
1354  LockCallTemps();  // Using fixed registers
1355  RegStorage reg_this = TargetReg(kArg0);
1356  RegStorage reg_cmp = TargetReg(kArg1);
1357
1358  RegLocation rl_this = info->args[0];
1359  RegLocation rl_cmp = info->args[1];
1360  LoadValueDirectFixed(rl_this, reg_this);
1361  LoadValueDirectFixed(rl_cmp, reg_cmp);
1362  RegStorage r_tgt = (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) ?
1363      LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo)) : RegStorage::InvalidReg();
1364  GenExplicitNullCheck(reg_this, info->opt_flags);
1365  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1366  // TUNING: check if rl_cmp.s_reg_low is already null checked
1367  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1368  AddIntrinsicLaunchpad(info, cmp_null_check_branch);
1369  // NOTE: not a safepoint
1370  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1371    OpReg(kOpBlx, r_tgt);
1372  } else {
1373    OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1374  }
1375  RegLocation rl_return = GetReturn(false);
1376  RegLocation rl_dest = InlineTarget(info);
1377  StoreValue(rl_dest, rl_return);
1378  return true;
1379}
1380
1381bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1382  RegLocation rl_dest = InlineTarget(info);
1383  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1384  ThreadOffset<4> offset = Thread::PeerOffset<4>();
1385  if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
1386    LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg);
1387  } else {
1388    CHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1389    reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
1390  }
1391  StoreValue(rl_dest, rl_result);
1392  return true;
1393}
1394
1395bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1396                                  bool is_long, bool is_volatile) {
1397  if (cu_->instruction_set == kMips) {
1398    // TODO - add Mips implementation
1399    return false;
1400  }
1401  // Unused - RegLocation rl_src_unsafe = info->args[0];
1402  RegLocation rl_src_obj = info->args[1];  // Object
1403  RegLocation rl_src_offset = info->args[2];  // long low
1404  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1405  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1406
1407  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1408  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1409  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1410  if (is_long) {
1411    if (cu_->instruction_set == kX86) {
1412      LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg.GetLow(),
1413                          rl_result.reg.GetHigh(), kLong, INVALID_SREG);
1414    } else {
1415      RegStorage rl_temp_offset = AllocTemp();
1416      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1417      LoadBaseDispWide(rl_temp_offset, 0, rl_result.reg, INVALID_SREG);
1418      FreeTemp(rl_temp_offset.GetReg());
1419    }
1420  } else {
1421    LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, kWord);
1422  }
1423
1424  if (is_volatile) {
1425    // Without context sensitive analysis, we must issue the most conservative barriers.
1426    // In this case, either a load or store may follow so we issue both barriers.
1427    GenMemBarrier(kLoadLoad);
1428    GenMemBarrier(kLoadStore);
1429  }
1430
1431  if (is_long) {
1432    StoreValueWide(rl_dest, rl_result);
1433  } else {
1434    StoreValue(rl_dest, rl_result);
1435  }
1436  return true;
1437}
1438
1439bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1440                                  bool is_object, bool is_volatile, bool is_ordered) {
1441  if (cu_->instruction_set == kMips) {
1442    // TODO - add Mips implementation
1443    return false;
1444  }
1445  // Unused - RegLocation rl_src_unsafe = info->args[0];
1446  RegLocation rl_src_obj = info->args[1];  // Object
1447  RegLocation rl_src_offset = info->args[2];  // long low
1448  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1449  RegLocation rl_src_value = info->args[4];  // value to store
1450  if (is_volatile || is_ordered) {
1451    // There might have been a store before this volatile one so insert StoreStore barrier.
1452    GenMemBarrier(kStoreStore);
1453  }
1454  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1455  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1456  RegLocation rl_value;
1457  if (is_long) {
1458    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1459    if (cu_->instruction_set == kX86) {
1460      StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg.GetLow(),
1461                           rl_value.reg.GetHigh(), kLong, INVALID_SREG);
1462    } else {
1463      RegStorage rl_temp_offset = AllocTemp();
1464      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1465      StoreBaseDispWide(rl_temp_offset, 0, rl_value.reg);
1466      FreeTemp(rl_temp_offset.GetReg());
1467    }
1468  } else {
1469    rl_value = LoadValue(rl_src_value, kCoreReg);
1470    StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, kWord);
1471  }
1472
1473  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1474  FreeTemp(rl_offset.reg.GetReg());
1475
1476  if (is_volatile) {
1477    // A load might follow the volatile store so insert a StoreLoad barrier.
1478    GenMemBarrier(kStoreLoad);
1479  }
1480  if (is_object) {
1481    MarkGCCard(rl_value.reg, rl_object.reg);
1482  }
1483  return true;
1484}
1485
1486void Mir2Lir::GenInvoke(CallInfo* info) {
1487  if ((info->opt_flags & MIR_INLINED) != 0) {
1488    // Already inlined but we may still need the null check.
1489    if (info->type != kStatic &&
1490        ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
1491         (info->opt_flags & MIR_IGNORE_NULL_CHECK) == 0))  {
1492      RegLocation rl_obj = LoadValue(info->args[0], kCoreReg);
1493      GenImmedCheck(kCondEq, rl_obj.reg, 0, kThrowNullPointer);
1494    }
1495    return;
1496  }
1497  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1498  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1499      ->GenIntrinsic(this, info)) {
1500    return;
1501  }
1502  GenInvokeNoInline(info);
1503}
1504
1505void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1506  int call_state = 0;
1507  LIR* null_ck;
1508  LIR** p_null_ck = NULL;
1509  NextCallInsn next_call_insn;
1510  FlushAllRegs();  /* Everything to home location */
1511  // Explicit register usage
1512  LockCallTemps();
1513
1514  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1515  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1516  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1517  info->type = static_cast<InvokeType>(method_info.GetSharpType());
1518  bool fast_path = method_info.FastPath();
1519  bool skip_this;
1520  if (info->type == kInterface) {
1521    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1522    skip_this = fast_path;
1523  } else if (info->type == kDirect) {
1524    if (fast_path) {
1525      p_null_ck = &null_ck;
1526    }
1527    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1528    skip_this = false;
1529  } else if (info->type == kStatic) {
1530    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1531    skip_this = false;
1532  } else if (info->type == kSuper) {
1533    DCHECK(!fast_path);  // Fast path is a direct call.
1534    next_call_insn = NextSuperCallInsnSP;
1535    skip_this = false;
1536  } else {
1537    DCHECK_EQ(info->type, kVirtual);
1538    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1539    skip_this = fast_path;
1540  }
1541  MethodReference target_method = method_info.GetTargetMethod();
1542  if (!info->is_range) {
1543    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1544                                      next_call_insn, target_method, method_info.VTableIndex(),
1545                                      method_info.DirectCode(), method_info.DirectMethod(),
1546                                      original_type, skip_this);
1547  } else {
1548    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1549                                    next_call_insn, target_method, method_info.VTableIndex(),
1550                                    method_info.DirectCode(), method_info.DirectMethod(),
1551                                    original_type, skip_this);
1552  }
1553  // Finish up any of the call sequence not interleaved in arg loading
1554  while (call_state >= 0) {
1555    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1556                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
1557  }
1558  LIR* call_inst;
1559  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1560    call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
1561  } else {
1562    if (fast_path) {
1563      if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
1564        // We can have the linker fixup a call relative.
1565        call_inst =
1566          reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type);
1567      } else {
1568        call_inst = OpMem(kOpBlx, TargetReg(kArg0),
1569                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
1570      }
1571    } else {
1572      ThreadOffset<4> trampoline(-1);
1573      switch (info->type) {
1574      case kInterface:
1575        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeInterfaceTrampolineWithAccessCheck);
1576        break;
1577      case kDirect:
1578        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
1579        break;
1580      case kStatic:
1581        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
1582        break;
1583      case kSuper:
1584        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
1585        break;
1586      case kVirtual:
1587        trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeVirtualTrampolineWithAccessCheck);
1588        break;
1589      default:
1590        LOG(FATAL) << "Unexpected invoke type";
1591      }
1592      call_inst = OpThreadMem(kOpBlx, trampoline);
1593    }
1594  }
1595  MarkSafepointPC(call_inst);
1596
1597  ClobberCallerSave();
1598  if (info->result.location != kLocInvalid) {
1599    // We have a following MOVE_RESULT - do it now.
1600    if (info->result.wide) {
1601      RegLocation ret_loc = GetReturnWide(info->result.fp);
1602      StoreValueWide(info->result, ret_loc);
1603    } else {
1604      RegLocation ret_loc = GetReturn(info->result.fp);
1605      StoreValue(info->result, ret_loc);
1606    }
1607  }
1608}
1609
1610}  // namespace art
1611