gen_invoke.cc revision 87b7c52ac660119b8dea46967974b76c86d0750b
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mir_to_lir-inl.h"
18
19#include "arm/codegen_arm.h"
20#include "dex/compiler_ir.h"
21#include "dex/dex_flags.h"
22#include "dex/mir_graph.h"
23#include "dex/quick/dex_file_method_inliner.h"
24#include "dex/quick/dex_file_to_method_inliner_map.h"
25#include "dex_file-inl.h"
26#include "driver/compiler_driver.h"
27#include "driver/compiler_options.h"
28#include "entrypoints/quick/quick_entrypoints.h"
29#include "invoke_type.h"
30#include "mirror/array.h"
31#include "mirror/class-inl.h"
32#include "mirror/dex_cache.h"
33#include "mirror/object_array-inl.h"
34#include "mirror/string.h"
35#include "scoped_thread_state_change.h"
36
37namespace art {
38
39// Shortcuts to repeatedly used long types.
40typedef mirror::ObjectArray<mirror::Object> ObjArray;
41
42/*
43 * This source files contains "gen" codegen routines that should
44 * be applicable to most targets.  Only mid-level support utilities
45 * and "op" calls may be used here.
46 */
47
48void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
49  class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
50   public:
51    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in)
52        : LIRSlowPath(m2l, branch_in, resume_in), info_(info_in) {
53      DCHECK_EQ(info_in->offset, current_dex_pc_);
54    }
55
56    void Compile() {
57      m2l_->ResetRegPool();
58      m2l_->ResetDefTracking();
59      GenerateTargetLabel(kPseudoIntrinsicRetry);
60      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
61      m2l_->GenInvokeNoInline(info_);
62      if (cont_ != nullptr) {
63        m2l_->OpUnconditionalBranch(cont_);
64      }
65    }
66
67   private:
68    CallInfo* const info_;
69  };
70
71  AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
72}
73
74/*
75 * To save scheduling time, helper calls are broken into two parts: generation of
76 * the helper target address, and the actual call to the helper.  Because x86
77 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
78 * load arguments between the two parts.
79 */
80// template <size_t pointer_size>
81RegStorage Mir2Lir::CallHelperSetup(QuickEntrypointEnum trampoline) {
82  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
83    return RegStorage::InvalidReg();
84  } else {
85    return LoadHelper(trampoline);
86  }
87}
88
89LIR* Mir2Lir::CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
90                         bool use_link) {
91  LIR* call_inst = InvokeTrampoline(use_link ? kOpBlx : kOpBx, r_tgt, trampoline);
92
93  if (r_tgt.Valid()) {
94    FreeTemp(r_tgt);
95  }
96
97  if (safepoint_pc) {
98    MarkSafepointPC(call_inst);
99  }
100  return call_inst;
101}
102
103void Mir2Lir::CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc) {
104  RegStorage r_tgt = CallHelperSetup(trampoline);
105  ClobberCallerSave();
106  CallHelper(r_tgt, trampoline, safepoint_pc);
107}
108
109void Mir2Lir::CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc) {
110  RegStorage r_tgt = CallHelperSetup(trampoline);
111  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
112  ClobberCallerSave();
113  CallHelper(r_tgt, trampoline, safepoint_pc);
114}
115
116void Mir2Lir::CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0,
117                                   bool safepoint_pc) {
118  RegStorage r_tgt = CallHelperSetup(trampoline);
119  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
120  ClobberCallerSave();
121  CallHelper(r_tgt, trampoline, safepoint_pc);
122}
123
124void Mir2Lir::CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
125                                           bool safepoint_pc) {
126  RegStorage r_tgt = CallHelperSetup(trampoline);
127  if (arg0.wide == 0) {
128    LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
129  } else {
130    LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
131  }
132  ClobberCallerSave();
133  CallHelper(r_tgt, trampoline, safepoint_pc);
134}
135
136void Mir2Lir::CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
137                                      bool safepoint_pc) {
138  RegStorage r_tgt = CallHelperSetup(trampoline);
139  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
140  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
141  ClobberCallerSave();
142  CallHelper(r_tgt, trampoline, safepoint_pc);
143}
144
145void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0,
146                                              RegLocation arg1, bool safepoint_pc) {
147  RegStorage r_tgt = CallHelperSetup(trampoline);
148  if (arg1.wide == 0) {
149    LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
150  } else {
151    RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
152    LoadValueDirectWideFixed(arg1, r_tmp);
153  }
154  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
155  ClobberCallerSave();
156  CallHelper(r_tgt, trampoline, safepoint_pc);
157}
158
159void Mir2Lir::CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0,
160                                              int arg1, bool safepoint_pc) {
161  RegStorage r_tgt = CallHelperSetup(trampoline);
162  DCHECK(!arg0.wide);
163  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
164  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
165  ClobberCallerSave();
166  CallHelper(r_tgt, trampoline, safepoint_pc);
167}
168
169void Mir2Lir::CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
170                                      bool safepoint_pc) {
171  RegStorage r_tgt = CallHelperSetup(trampoline);
172  OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
173  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
174  ClobberCallerSave();
175  CallHelper(r_tgt, trampoline, safepoint_pc);
176}
177
178void Mir2Lir::CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
179                                      bool safepoint_pc) {
180  RegStorage r_tgt = CallHelperSetup(trampoline);
181  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
182  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
183  ClobberCallerSave();
184  CallHelper(r_tgt, trampoline, safepoint_pc);
185}
186
187void Mir2Lir::CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0,
188                                         bool safepoint_pc) {
189  RegStorage r_tgt = CallHelperSetup(trampoline);
190  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
191  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
192  ClobberCallerSave();
193  CallHelper(r_tgt, trampoline, safepoint_pc);
194}
195
196void Mir2Lir::CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
197                                         bool safepoint_pc) {
198  RegStorage r_tgt = CallHelperSetup(trampoline);
199  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
200  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
201  if (r_tmp.NotExactlyEquals(arg0)) {
202    OpRegCopy(r_tmp, arg0);
203  }
204  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
205  ClobberCallerSave();
206  CallHelper(r_tgt, trampoline, safepoint_pc);
207}
208
209void Mir2Lir::CallRuntimeHelperRegRegLocationMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
210                                                    RegLocation arg1, bool safepoint_pc) {
211  RegStorage r_tgt = CallHelperSetup(trampoline);
212  DCHECK(!IsSameReg(TargetReg(kArg2, arg0.GetWideKind()), arg0));
213  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
214  if (r_tmp.NotExactlyEquals(arg0)) {
215    OpRegCopy(r_tmp, arg0);
216  }
217  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
218  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
219  ClobberCallerSave();
220  CallHelper(r_tgt, trampoline, safepoint_pc);
221}
222
223void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline,
224                                                      RegLocation arg0, RegLocation arg1,
225                                                      bool safepoint_pc) {
226  RegStorage r_tgt = CallHelperSetup(trampoline);
227  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kMips64 ||
228      cu_->instruction_set == kX86_64) {
229    RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
230
231    RegStorage arg1_reg;
232    if (arg1.fp == arg0.fp) {
233      arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
234    } else {
235      arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
236    }
237
238    if (arg0.wide == 0) {
239      LoadValueDirectFixed(arg0, arg0_reg);
240    } else {
241      LoadValueDirectWideFixed(arg0, arg0_reg);
242    }
243
244    if (arg1.wide == 0) {
245      LoadValueDirectFixed(arg1, arg1_reg);
246    } else {
247      LoadValueDirectWideFixed(arg1, arg1_reg);
248    }
249  } else {
250    DCHECK(!cu_->target64);
251    if (arg0.wide == 0) {
252      LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
253      if (arg1.wide == 0) {
254        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
255        if (cu_->instruction_set == kMips) {
256          LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg1, kNotWide));
257        } else {
258          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide));
259        }
260      } else {
261        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
262        if (cu_->instruction_set == kMips) {
263          LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
264        } else {
265          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide));
266        }
267      }
268    } else {
269      LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
270      if (arg1.wide == 0) {
271        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
272        if (cu_->instruction_set == kMips) {
273          LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kNotWide));
274        } else {
275          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
276        }
277      } else {
278        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
279        if (cu_->instruction_set == kMips) {
280          LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
281        } else {
282          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
283        }
284      }
285    }
286  }
287  ClobberCallerSave();
288  CallHelper(r_tgt, trampoline, safepoint_pc);
289}
290
291void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
292  WideKind arg0_kind = arg0.GetWideKind();
293  WideKind arg1_kind = arg1.GetWideKind();
294  if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) {
295    if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) {
296      // Swap kArg0 and kArg1 with kArg2 as temp.
297      OpRegCopy(TargetReg(kArg2, arg1_kind), arg1);
298      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
299      OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind));
300    } else {
301      OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
302      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
303    }
304  } else {
305    OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
306    OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
307  }
308}
309
310void Mir2Lir::CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0,
311                                      RegStorage arg1, bool safepoint_pc) {
312  RegStorage r_tgt = CallHelperSetup(trampoline);
313  CopyToArgumentRegs(arg0, arg1);
314  ClobberCallerSave();
315  CallHelper(r_tgt, trampoline, safepoint_pc);
316}
317
318void Mir2Lir::CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
319                                         RegStorage arg1, int arg2, bool safepoint_pc) {
320  RegStorage r_tgt = CallHelperSetup(trampoline);
321  CopyToArgumentRegs(arg0, arg1);
322  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
323  ClobberCallerSave();
324  CallHelper(r_tgt, trampoline, safepoint_pc);
325}
326
327void Mir2Lir::CallRuntimeHelperImmRegLocationMethod(QuickEntrypointEnum trampoline, int arg0,
328                                                    RegLocation arg1, bool safepoint_pc) {
329  RegStorage r_tgt = CallHelperSetup(trampoline);
330  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
331  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
332  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
333  ClobberCallerSave();
334  CallHelper(r_tgt, trampoline, safepoint_pc);
335}
336
337void Mir2Lir::CallRuntimeHelperImmImmMethod(QuickEntrypointEnum trampoline, int arg0, int arg1,
338                                            bool safepoint_pc) {
339  RegStorage r_tgt = CallHelperSetup(trampoline);
340  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
341  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
342  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
343  ClobberCallerSave();
344  CallHelper(r_tgt, trampoline, safepoint_pc);
345}
346
347void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
348                                                         RegLocation arg1,
349                                                         RegLocation arg2, bool safepoint_pc) {
350  RegStorage r_tgt = CallHelperSetup(trampoline);
351  DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
352                                                        // instantiation bug in GCC.
353  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
354  if (arg2.wide == 0) {
355    LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
356  } else {
357    LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide));
358  }
359  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
360  ClobberCallerSave();
361  CallHelper(r_tgt, trampoline, safepoint_pc);
362}
363
364void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
365    QuickEntrypointEnum trampoline,
366    RegLocation arg0,
367    RegLocation arg1,
368    RegLocation arg2,
369    bool safepoint_pc) {
370  RegStorage r_tgt = CallHelperSetup(trampoline);
371  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
372  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
373  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
374  ClobberCallerSave();
375  CallHelper(r_tgt, trampoline, safepoint_pc);
376}
377
378/*
379 * If there are any ins passed in registers that have not been promoted
380 * to a callee-save register, flush them to the frame.  Perform initial
381 * assignment of promoted arguments.
382 *
383 * ArgLocs is an array of location records describing the incoming arguments
384 * with one location record per word of argument.
385 */
386// TODO: Support 64-bit argument registers.
387void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
388  /*
389   * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
390   * It will attempt to keep kArg0 live (or copy it to home location
391   * if promoted).
392   */
393  RegLocation rl_src = rl_method;
394  rl_src.location = kLocPhysReg;
395  rl_src.reg = TargetReg(kArg0, kRef);
396  rl_src.home = false;
397  MarkLive(rl_src);
398  StoreValue(rl_method, rl_src);
399  // If Method* has been promoted, explicitly flush
400  if (rl_method.location == kLocPhysReg) {
401    StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
402  }
403
404  if (mir_graph_->GetNumOfInVRs() == 0) {
405    return;
406  }
407
408  int start_vreg = mir_graph_->GetFirstInVR();
409  /*
410   * Copy incoming arguments to their proper home locations.
411   * NOTE: an older version of dx had an issue in which
412   * it would reuse static method argument registers.
413   * This could result in the same Dalvik virtual register
414   * being promoted to both core and fp regs. To account for this,
415   * we only copy to the corresponding promoted physical register
416   * if it matches the type of the SSA name for the incoming
417   * argument.  It is also possible that long and double arguments
418   * end up half-promoted.  In those cases, we must flush the promoted
419   * half to memory as well.
420   */
421  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
422  RegLocation* t_loc = nullptr;
423  EnsureInitializedArgMappingToPhysicalReg();
424  for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i += t_loc->wide ? 2 : 1) {
425    // get reg corresponding to input
426    RegStorage reg = in_to_reg_storage_mapping_.GetReg(i);
427    t_loc = &ArgLocs[i];
428
429    // If the wide input appeared as single, flush it and go
430    // as it comes from memory.
431    if (t_loc->wide && reg.Valid() && !reg.Is64Bit()) {
432      // The memory already holds the half. Don't do anything.
433      reg = RegStorage::InvalidReg();
434    }
435
436    if (reg.Valid()) {
437      // If arriving in register.
438
439      // We have already updated the arg location with promoted info
440      // so we can be based on it.
441      if (t_loc->location == kLocPhysReg) {
442        // Just copy it.
443        if (t_loc->wide) {
444          OpRegCopyWide(t_loc->reg, reg);
445        } else {
446          OpRegCopy(t_loc->reg, reg);
447        }
448      } else {
449        // Needs flush.
450        int offset = SRegOffset(start_vreg + i);
451        if (t_loc->ref) {
452          StoreRefDisp(TargetPtrReg(kSp), offset, reg, kNotVolatile);
453        } else {
454          StoreBaseDisp(TargetPtrReg(kSp), offset, reg, t_loc->wide ? k64 : k32, kNotVolatile);
455        }
456      }
457    } else {
458      // If arriving in frame & promoted.
459      if (t_loc->location == kLocPhysReg) {
460        int offset = SRegOffset(start_vreg + i);
461        if (t_loc->ref) {
462          LoadRefDisp(TargetPtrReg(kSp), offset, t_loc->reg, kNotVolatile);
463        } else {
464          LoadBaseDisp(TargetPtrReg(kSp), offset, t_loc->reg, t_loc->wide ? k64 : k32,
465                       kNotVolatile);
466        }
467      }
468    }
469  }
470}
471
472static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) {
473  RegLocation rl_arg = info->args[0];
474  cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef));
475}
476
477static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
478  cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags);
479  // get this->klass_ [use kArg1, set kArg0]
480  cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(),
481                  cg->TargetReg(kArg0, kRef),
482                  kNotVolatile);
483  cg->MarkPossibleNullPointerException(info->opt_flags);
484}
485
486static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
487                                                       const CompilationUnit* cu, Mir2Lir* cg) {
488  if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
489    int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
490        InstructionSetPointerSize(cu->instruction_set)).Int32Value();
491    // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
492    cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
493                     cg->TargetPtrReg(kInvokeTgt));
494    return true;
495  }
496  return false;
497}
498
499/*
500 * Bit of a hack here - in the absence of a real scheduling pass,
501 * emit the next instruction in a virtual invoke sequence.
502 * We can use kLr as a temp prior to target address loading
503 * Note also that we'll load the first argument ("this") into
504 * kArg1 here rather than the standard GenDalvikArgs.
505 */
506static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
507                         int state, const MethodReference& target_method,
508                         uint32_t method_idx, uintptr_t, uintptr_t,
509                         InvokeType) {
510  UNUSED(target_method);
511  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
512  /*
513   * This is the fast path in which the target virtual method is
514   * fully resolved at compile time.
515   */
516  switch (state) {
517    case 0:
518      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
519      break;
520    case 1:
521      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
522                                                  // Includes a null-check.
523      break;
524    case 2: {
525      // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0]
526      int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
527          method_idx * sizeof(mirror::Class::VTableEntry);
528      // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0]
529      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
530      break;
531    }
532    case 3:
533      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
534        break;                                    // kInvokeTgt := kArg0->entrypoint
535      }
536      DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
537      FALLTHROUGH_INTENDED;
538    default:
539      return -1;
540  }
541  return state + 1;
542}
543
544/*
545 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
546 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
547 * more than one interface method map to the same index. Note also that we'll load the first
548 * argument ("this") into kArg1 here rather than the standard GenDalvikArgs.
549 */
550static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
551                                 const MethodReference& target_method,
552                                 uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
553  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
554
555  switch (state) {
556    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
557      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
558      cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index);
559      if (cu->instruction_set == kX86) {
560        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide));
561      }
562      break;
563    case 1:
564      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
565      break;
566    case 2:
567      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
568                                                  // Includes a null-check.
569      break;
570    case 3: {  // Get target method [use kInvokeTgt, set kArg0]
571      int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
572          (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
573      // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0]
574      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
575      break;
576    }
577    case 4:
578      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
579        break;                                    // kInvokeTgt := kArg0->entrypoint
580      }
581      DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
582      FALLTHROUGH_INTENDED;
583    default:
584      return -1;
585  }
586  return state + 1;
587}
588
589static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
590                            QuickEntrypointEnum trampoline, int state,
591                            const MethodReference& target_method, uint32_t method_idx) {
592  UNUSED(info, method_idx);
593  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
594
595  /*
596   * This handles the case in which the base method is not fully
597   * resolved at compile time, we bail to a runtime helper.
598   */
599  if (state == 0) {
600    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
601      // Load trampoline target
602      int32_t disp;
603      if (cu->target64) {
604        disp = GetThreadOffset<8>(trampoline).Int32Value();
605      } else {
606        disp = GetThreadOffset<4>(trampoline).Int32Value();
607      }
608      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), disp, cg->TargetPtrReg(kInvokeTgt));
609    }
610    // Load kArg0 with method index
611    CHECK_EQ(cu->dex_file, target_method.dex_file);
612    cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index);
613    return 1;
614  }
615  return -1;
616}
617
618static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
619                                int state,
620                                const MethodReference& target_method,
621                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
622  return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
623                          target_method, 0);
624}
625
626static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
627                                const MethodReference& target_method,
628                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
629  return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
630                          target_method, 0);
631}
632
633static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
634                               const MethodReference& target_method,
635                               uint32_t, uintptr_t, uintptr_t, InvokeType) {
636  return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
637                          target_method, 0);
638}
639
640static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
641                           const MethodReference& target_method,
642                           uint32_t, uintptr_t, uintptr_t, InvokeType) {
643  return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
644                          target_method, 0);
645}
646
647static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
648                                                CallInfo* info, int state,
649                                                const MethodReference& target_method,
650                                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
651  return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
652                          target_method, 0);
653}
654
655// Default implementation of implicit null pointer check.
656// Overridden by arch specific as necessary.
657void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
658  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
659    return;
660  }
661  RegStorage tmp = AllocTemp();
662  Load32Disp(reg, 0, tmp);
663  MarkPossibleNullPointerException(opt_flags);
664  FreeTemp(tmp);
665}
666
667/**
668 * @brief Used to flush promoted registers if they are used as argument
669 * in an invocation.
670 * @param info the infromation about arguments for invocation.
671 * @param start the first argument we should start to look from.
672 */
673void Mir2Lir::GenDalvikArgsFlushPromoted(CallInfo* info, int start) {
674  if (cu_->disable_opt & (1 << kPromoteRegs)) {
675    // This make sense only if promotion is enabled.
676    return;
677  }
678  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
679  // Scan the rest of the args - if in phys_reg flush to memory
680  for (size_t next_arg = start; next_arg < info->num_arg_words;) {
681    RegLocation loc = info->args[next_arg];
682    if (loc.wide) {
683      loc = UpdateLocWide(loc);
684      if (loc.location == kLocPhysReg) {
685        StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
686      }
687      next_arg += 2;
688    } else {
689      loc = UpdateLoc(loc);
690      if (loc.location == kLocPhysReg) {
691        if (loc.ref) {
692          StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
693        } else {
694          StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
695                        kNotVolatile);
696        }
697      }
698      next_arg++;
699    }
700  }
701}
702
703/**
704 * @brief Used to optimize the copying of VRs which are arguments of invocation.
705 * Please note that you should flush promoted registers first if you copy.
706 * If implementation does copying it may skip several of the first VRs but must copy
707 * till the end. Implementation must return the number of skipped VRs
708 * (it might be all VRs).
709 * @see GenDalvikArgsFlushPromoted
710 * @param info the information about arguments for invocation.
711 * @param first the first argument we should start to look from.
712 * @param count the number of remaining arguments we can handle.
713 * @return the number of arguments which we did not handle. Unhandled arguments
714 * must be attached to the first one.
715 */
716int Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
717  // call is pretty expensive, let's use it if count is big.
718  if (count > 16) {
719    GenDalvikArgsFlushPromoted(info, first);
720    int start_offset = SRegOffset(info->args[first].s_reg_low);
721    int outs_offset = StackVisitor::GetOutVROffset(first, cu_->instruction_set);
722
723    OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
724    OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
725    CallRuntimeHelperRegRegImm(kQuickMemcpy, TargetReg(kArg0, kRef), TargetReg(kArg1, kRef),
726                               count * 4, false);
727    count = 0;
728  }
729  return count;
730}
731
732int Mir2Lir::GenDalvikArgs(CallInfo* info, int call_state,
733                           LIR** pcrLabel, NextCallInsn next_call_insn,
734                           const MethodReference& target_method,
735                           uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
736                           InvokeType type, bool skip_this) {
737  // If no arguments, just return.
738  if (info->num_arg_words == 0u)
739    return call_state;
740
741  const size_t start_index = skip_this ? 1 : 0;
742
743  // Get architecture dependent mapping between output VRs and physical registers
744  // basing on shorty of method to call.
745  InToRegStorageMapping in_to_reg_storage_mapping(arena_);
746  {
747    const char* target_shorty = mir_graph_->GetShortyFromMethodReference(target_method);
748    ShortyIterator shorty_iterator(target_shorty, type == kStatic);
749    in_to_reg_storage_mapping.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
750  }
751
752  size_t stack_map_start = std::max(in_to_reg_storage_mapping.GetEndMappedIn(), start_index);
753  if ((stack_map_start < info->num_arg_words) && info->args[stack_map_start].high_word) {
754    // It is possible that the last mapped reg is 32 bit while arg is 64-bit.
755    // It will be handled together with low part mapped to register.
756    stack_map_start++;
757  }
758  size_t regs_left_to_pass_via_stack = info->num_arg_words - stack_map_start;
759
760  // If it is a range case we can try to copy remaining VRs (not mapped to physical registers)
761  // using more optimal algorithm.
762  if (info->is_range && regs_left_to_pass_via_stack > 1) {
763    regs_left_to_pass_via_stack = GenDalvikArgsBulkCopy(info, stack_map_start,
764                                                        regs_left_to_pass_via_stack);
765  }
766
767  // Now handle any remaining VRs mapped to stack.
768  if (in_to_reg_storage_mapping.HasArgumentsOnStack()) {
769    // Two temps but do not use kArg1, it might be this which we can skip.
770    // Separate single and wide - it can give some advantage.
771    RegStorage regRef = TargetReg(kArg3, kRef);
772    RegStorage regSingle = TargetReg(kArg3, kNotWide);
773    RegStorage regWide = TargetReg(kArg2, kWide);
774    for (size_t i = start_index; i < stack_map_start + regs_left_to_pass_via_stack; i++) {
775      RegLocation rl_arg = info->args[i];
776      rl_arg = UpdateRawLoc(rl_arg);
777      RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
778      if (!reg.Valid()) {
779        int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
780        {
781          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
782          if (rl_arg.wide) {
783            if (rl_arg.location == kLocPhysReg) {
784              StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
785            } else {
786              LoadValueDirectWideFixed(rl_arg, regWide);
787              StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
788            }
789          } else {
790            if (rl_arg.location == kLocPhysReg) {
791              if (rl_arg.ref) {
792                StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
793              } else {
794                StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
795              }
796            } else {
797              if (rl_arg.ref) {
798                LoadValueDirectFixed(rl_arg, regRef);
799                StoreRefDisp(TargetPtrReg(kSp), out_offset, regRef, kNotVolatile);
800              } else {
801                LoadValueDirectFixed(rl_arg, regSingle);
802                StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
803              }
804            }
805          }
806        }
807        call_state = next_call_insn(cu_, info, call_state, target_method,
808                                    vtable_idx, direct_code, direct_method, type);
809      }
810      if (rl_arg.wide) {
811        i++;
812      }
813    }
814  }
815
816  // Finish with VRs mapped to physical registers.
817  for (size_t i = start_index; i < stack_map_start; i++) {
818    RegLocation rl_arg = info->args[i];
819    rl_arg = UpdateRawLoc(rl_arg);
820    RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
821    if (reg.Valid()) {
822      if (rl_arg.wide) {
823        // if reg is not 64-bit (it is half of 64-bit) then handle it separately.
824        if (!reg.Is64Bit()) {
825          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
826          if (rl_arg.location == kLocPhysReg) {
827            int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
828            // Dump it to memory.
829            StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
830            LoadBaseDisp(TargetPtrReg(kSp), out_offset, reg, k32, kNotVolatile);
831          } else {
832            int high_offset = StackVisitor::GetOutVROffset(i + 1, cu_->instruction_set);
833            // First, use target reg for high part.
834            LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low + 1), reg, k32,
835                         kNotVolatile);
836            StoreBaseDisp(TargetPtrReg(kSp), high_offset, reg, k32, kNotVolatile);
837            // Now, use target reg for low part.
838            LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low), reg, k32, kNotVolatile);
839            int low_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
840            // And store it to the expected memory location.
841            StoreBaseDisp(TargetPtrReg(kSp), low_offset, reg, k32, kNotVolatile);
842          }
843        } else {
844          LoadValueDirectWideFixed(rl_arg, reg);
845        }
846      } else {
847        LoadValueDirectFixed(rl_arg, reg);
848      }
849      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
850                               direct_code, direct_method, type);
851    }
852    if (rl_arg.wide) {
853      i++;
854    }
855  }
856
857  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
858                           direct_code, direct_method, type);
859  if (pcrLabel) {
860    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
861      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
862    } else {
863      *pcrLabel = nullptr;
864      GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
865    }
866  }
867  return call_state;
868}
869
870void Mir2Lir::EnsureInitializedArgMappingToPhysicalReg() {
871  if (!in_to_reg_storage_mapping_.IsInitialized()) {
872    ShortyIterator shorty_iterator(cu_->shorty, cu_->invoke_type == kStatic);
873    in_to_reg_storage_mapping_.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
874  }
875}
876
877RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
878  RegLocation res;
879  if (info->result.location == kLocInvalid) {
880    // If result is unused, return a sink target based on type of invoke target.
881    res = GetReturn(
882        ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
883  } else {
884    res = info->result;
885    DCHECK_EQ(LocToRegClass(res),
886              ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
887  }
888  return res;
889}
890
891RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
892  RegLocation res;
893  if (info->result.location == kLocInvalid) {
894    // If result is unused, return a sink target based on type of invoke target.
895    res = GetReturnWide(ShortyToRegClass(
896        mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
897  } else {
898    res = info->result;
899    DCHECK_EQ(LocToRegClass(res),
900              ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
901  }
902  return res;
903}
904
905bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
906  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
907    // TODO: add Mips and Mips64 implementations.
908    return false;
909  }
910
911  bool use_direct_type_ptr;
912  uintptr_t direct_type_ptr;
913  ClassReference ref;
914  if (!cu_->compiler_driver->CanEmbedReferenceTypeInCode(&ref,
915        &use_direct_type_ptr, &direct_type_ptr)) {
916    return false;
917  }
918
919  RegStorage reg_class = TargetReg(kArg1, kRef);
920  Clobber(reg_class);
921  LockTemp(reg_class);
922  if (use_direct_type_ptr) {
923    LoadConstant(reg_class, direct_type_ptr);
924  } else {
925    uint16_t type_idx = ref.first->GetClassDef(ref.second).class_idx_;
926    LoadClassType(*ref.first, type_idx, kArg1);
927  }
928
929  uint32_t slow_path_flag_offset = cu_->compiler_driver->GetReferenceSlowFlagOffset();
930  uint32_t disable_flag_offset = cu_->compiler_driver->GetReferenceDisableFlagOffset();
931  CHECK(slow_path_flag_offset && disable_flag_offset &&
932        (slow_path_flag_offset != disable_flag_offset));
933
934  // intrinsic logic start.
935  RegLocation rl_obj = info->args[0];
936  rl_obj = LoadValue(rl_obj, kRefReg);
937
938  RegStorage reg_slow_path = AllocTemp();
939  RegStorage reg_disabled = AllocTemp();
940  LoadBaseDisp(reg_class, slow_path_flag_offset, reg_slow_path, kSignedByte, kNotVolatile);
941  LoadBaseDisp(reg_class, disable_flag_offset, reg_disabled, kSignedByte, kNotVolatile);
942  FreeTemp(reg_class);
943  LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
944  FreeTemp(reg_disabled);
945
946  // if slow path, jump to JNI path target
947  LIR* slow_path_branch;
948  if (or_inst->u.m.def_mask->HasBit(ResourceMask::kCCode)) {
949    // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag).
950    slow_path_branch = OpCondBranch(kCondNe, nullptr);
951  } else {
952    // Generate compare and branch.
953    slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
954  }
955  FreeTemp(reg_slow_path);
956
957  // slow path not enabled, simply load the referent of the reference object
958  RegLocation rl_dest = InlineTarget(info);
959  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
960  GenNullCheck(rl_obj.reg, info->opt_flags);
961  LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
962      kNotVolatile);
963  MarkPossibleNullPointerException(info->opt_flags);
964  StoreValue(rl_dest, rl_result);
965
966  LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
967  AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
968  ClobberCallerSave();  // We must clobber everything because slow path will return here
969  return true;
970}
971
972bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
973  // Location of reference to data array
974  int value_offset = mirror::String::ValueOffset().Int32Value();
975  // Location of count
976  int count_offset = mirror::String::CountOffset().Int32Value();
977  // Starting offset within data array
978  int offset_offset = mirror::String::OffsetOffset().Int32Value();
979  // Start of char data with array_
980  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
981
982  RegLocation rl_obj = info->args[0];
983  RegLocation rl_idx = info->args[1];
984  rl_obj = LoadValue(rl_obj, kRefReg);
985  rl_idx = LoadValue(rl_idx, kCoreReg);
986  RegStorage reg_max;
987  GenNullCheck(rl_obj.reg, info->opt_flags);
988  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
989  LIR* range_check_branch = nullptr;
990  RegStorage reg_off;
991  RegStorage reg_ptr;
992  reg_off = AllocTemp();
993  reg_ptr = AllocTempRef();
994  if (range_check) {
995    reg_max = AllocTemp();
996    Load32Disp(rl_obj.reg, count_offset, reg_max);
997    MarkPossibleNullPointerException(info->opt_flags);
998  }
999  Load32Disp(rl_obj.reg, offset_offset, reg_off);
1000  MarkPossibleNullPointerException(info->opt_flags);
1001  LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1002  if (range_check) {
1003    // Set up a slow path to allow retry in case of bounds violation */
1004    OpRegReg(kOpCmp, rl_idx.reg, reg_max);
1005    FreeTemp(reg_max);
1006    range_check_branch = OpCondBranch(kCondUge, nullptr);
1007  }
1008  OpRegImm(kOpAdd, reg_ptr, data_offset);
1009  if (rl_idx.is_const) {
1010    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1011  } else {
1012    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
1013  }
1014  FreeTemp(rl_obj.reg);
1015  if (rl_idx.location == kLocPhysReg) {
1016    FreeTemp(rl_idx.reg);
1017  }
1018  RegLocation rl_dest = InlineTarget(info);
1019  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1020  LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
1021  FreeTemp(reg_off);
1022  FreeTemp(reg_ptr);
1023  StoreValue(rl_dest, rl_result);
1024  if (range_check) {
1025    DCHECK(range_check_branch != nullptr);
1026    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1027    AddIntrinsicSlowPath(info, range_check_branch);
1028  }
1029  return true;
1030}
1031
1032// Generates an inlined String.is_empty or String.length.
1033bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1034  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1035    // TODO: add Mips and Mips64 implementations.
1036    return false;
1037  }
1038  // dst = src.length();
1039  RegLocation rl_obj = info->args[0];
1040  rl_obj = LoadValue(rl_obj, kRefReg);
1041  RegLocation rl_dest = InlineTarget(info);
1042  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1043  GenNullCheck(rl_obj.reg, info->opt_flags);
1044  Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1045  MarkPossibleNullPointerException(info->opt_flags);
1046  if (is_empty) {
1047    // dst = (dst == 0);
1048    if (cu_->instruction_set == kThumb2) {
1049      RegStorage t_reg = AllocTemp();
1050      OpRegReg(kOpNeg, t_reg, rl_result.reg);
1051      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1052    } else if (cu_->instruction_set == kArm64) {
1053      OpRegImm(kOpSub, rl_result.reg, 1);
1054      OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31);
1055    } else {
1056      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1057      OpRegImm(kOpSub, rl_result.reg, 1);
1058      OpRegImm(kOpLsr, rl_result.reg, 31);
1059    }
1060  }
1061  StoreValue(rl_dest, rl_result);
1062  return true;
1063}
1064
1065bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1066  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1067    // TODO: add Mips and Mips64 implementations.
1068    return false;
1069  }
1070  RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1071  if (rl_dest.s_reg_low == INVALID_SREG) {
1072    // Result is unused, the code is dead. Inlining successful, no code generated.
1073    return true;
1074  }
1075  RegLocation rl_src_i = info->args[0];
1076  RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1077  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1078  if (IsWide(size)) {
1079    if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
1080      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
1081      StoreValueWide(rl_dest, rl_result);
1082      return true;
1083    }
1084    RegStorage r_i_low = rl_i.reg.GetLow();
1085    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1086      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1087      r_i_low = AllocTemp();
1088      OpRegCopy(r_i_low, rl_i.reg);
1089    }
1090    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1091    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1092    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1093      FreeTemp(r_i_low);
1094    }
1095    StoreValueWide(rl_dest, rl_result);
1096  } else {
1097    DCHECK(size == k32 || size == kSignedHalf);
1098    OpKind op = (size == k32) ? kOpRev : kOpRevsh;
1099    OpRegReg(op, rl_result.reg, rl_i.reg);
1100    StoreValue(rl_dest, rl_result);
1101  }
1102  return true;
1103}
1104
1105bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1106  RegLocation rl_dest = InlineTarget(info);
1107  if (rl_dest.s_reg_low == INVALID_SREG) {
1108    // Result is unused, the code is dead. Inlining successful, no code generated.
1109    return true;
1110  }
1111  RegLocation rl_src = info->args[0];
1112  rl_src = LoadValue(rl_src, kCoreReg);
1113  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1114  RegStorage sign_reg = AllocTemp();
1115  // abs(x) = y<=x>>31, (x+y)^y.
1116  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1117  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1118  OpRegReg(kOpXor, rl_result.reg, sign_reg);
1119  StoreValue(rl_dest, rl_result);
1120  return true;
1121}
1122
1123bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1124  RegLocation rl_dest = InlineTargetWide(info);
1125  if (rl_dest.s_reg_low == INVALID_SREG) {
1126    // Result is unused, the code is dead. Inlining successful, no code generated.
1127    return true;
1128  }
1129  RegLocation rl_src = info->args[0];
1130  rl_src = LoadValueWide(rl_src, kCoreReg);
1131  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1132
1133  // If on x86 or if we would clobber a register needed later, just copy the source first.
1134  if (cu_->instruction_set != kX86_64 &&
1135      (cu_->instruction_set == kX86 ||
1136       rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) {
1137    OpRegCopyWide(rl_result.reg, rl_src.reg);
1138    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1139        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1140        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1141        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1142      // Reuse source registers to avoid running out of temps.
1143      FreeTemp(rl_src.reg);
1144    }
1145    rl_src = rl_result;
1146  }
1147
1148  // abs(x) = y<=x>>31, (x+y)^y.
1149  RegStorage sign_reg;
1150  if (cu_->instruction_set == kX86_64) {
1151    sign_reg = AllocTempWide();
1152    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
1153    OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1154    OpRegReg(kOpXor, rl_result.reg, sign_reg);
1155  } else {
1156    sign_reg = AllocTemp();
1157    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1158    OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1159    OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1160    OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1161    OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1162  }
1163  FreeTemp(sign_reg);
1164  StoreValueWide(rl_dest, rl_result);
1165  return true;
1166}
1167
1168bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1169  // Currently implemented only for ARM64.
1170  UNUSED(info, size);
1171  return false;
1172}
1173
1174bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
1175  // Currently implemented only for ARM64.
1176  UNUSED(info, is_min, is_double);
1177  return false;
1178}
1179
1180bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
1181  UNUSED(info);
1182  return false;
1183}
1184
1185bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
1186  UNUSED(info);
1187  return false;
1188}
1189
1190bool Mir2Lir::GenInlinedRint(CallInfo* info) {
1191  UNUSED(info);
1192  return false;
1193}
1194
1195bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
1196  UNUSED(info, is_double);
1197  return false;
1198}
1199
1200bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1201  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1202    // TODO: add Mips and Mips64 implementations.
1203    return false;
1204  }
1205  RegLocation rl_dest = InlineTarget(info);
1206  if (rl_dest.s_reg_low == INVALID_SREG) {
1207    // Result is unused, the code is dead. Inlining successful, no code generated.
1208    return true;
1209  }
1210  RegLocation rl_src = info->args[0];
1211  StoreValue(rl_dest, rl_src);
1212  return true;
1213}
1214
1215bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1216  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1217    // TODO: add Mips and Mips64 implementations.
1218    return false;
1219  }
1220  RegLocation rl_dest = InlineTargetWide(info);
1221  if (rl_dest.s_reg_low == INVALID_SREG) {
1222    // Result is unused, the code is dead. Inlining successful, no code generated.
1223    return true;
1224  }
1225  RegLocation rl_src = info->args[0];
1226  StoreValueWide(rl_dest, rl_src);
1227  return true;
1228}
1229
1230bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1231  UNUSED(info);
1232  return false;
1233}
1234
1235
1236/*
1237 * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1238 * otherwise bails to standard library code.
1239 */
1240bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1241  RegLocation rl_obj = info->args[0];
1242  RegLocation rl_char = info->args[1];
1243  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1244    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1245    return false;
1246  }
1247
1248  ClobberCallerSave();
1249  LockCallTemps();  // Using fixed registers
1250  RegStorage reg_ptr = TargetReg(kArg0, kRef);
1251  RegStorage reg_char = TargetReg(kArg1, kNotWide);
1252  RegStorage reg_start = TargetReg(kArg2, kNotWide);
1253
1254  LoadValueDirectFixed(rl_obj, reg_ptr);
1255  LoadValueDirectFixed(rl_char, reg_char);
1256  if (zero_based) {
1257    LoadConstant(reg_start, 0);
1258  } else {
1259    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1260    LoadValueDirectFixed(rl_start, reg_start);
1261  }
1262  RegStorage r_tgt = LoadHelper(kQuickIndexOf);
1263  GenExplicitNullCheck(reg_ptr, info->opt_flags);
1264  LIR* high_code_point_branch =
1265      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1266  // NOTE: not a safepoint
1267  OpReg(kOpBlx, r_tgt);
1268  if (!rl_char.is_const) {
1269    // Add the slow path for code points beyond 0xFFFF.
1270    DCHECK(high_code_point_branch != nullptr);
1271    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1272    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1273    AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
1274    ClobberCallerSave();  // We must clobber everything because slow path will return here
1275  } else {
1276    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1277    DCHECK(high_code_point_branch == nullptr);
1278  }
1279  RegLocation rl_return = GetReturn(kCoreReg);
1280  RegLocation rl_dest = InlineTarget(info);
1281  StoreValue(rl_dest, rl_return);
1282  return true;
1283}
1284
1285/* Fast string.compareTo(Ljava/lang/string;)I. */
1286bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1287  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1288    // TODO: add Mips and Mips64 implementations.
1289    return false;
1290  }
1291  ClobberCallerSave();
1292  LockCallTemps();  // Using fixed registers
1293  RegStorage reg_this = TargetReg(kArg0, kRef);
1294  RegStorage reg_cmp = TargetReg(kArg1, kRef);
1295
1296  RegLocation rl_this = info->args[0];
1297  RegLocation rl_cmp = info->args[1];
1298  LoadValueDirectFixed(rl_this, reg_this);
1299  LoadValueDirectFixed(rl_cmp, reg_cmp);
1300  RegStorage r_tgt;
1301  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1302    r_tgt = LoadHelper(kQuickStringCompareTo);
1303  } else {
1304    r_tgt = RegStorage::InvalidReg();
1305  }
1306  GenExplicitNullCheck(reg_this, info->opt_flags);
1307  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1308  // TUNING: check if rl_cmp.s_reg_low is already null checked
1309  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1310  AddIntrinsicSlowPath(info, cmp_null_check_branch);
1311  // NOTE: not a safepoint
1312  CallHelper(r_tgt, kQuickStringCompareTo, false, true);
1313  RegLocation rl_return = GetReturn(kCoreReg);
1314  RegLocation rl_dest = InlineTarget(info);
1315  StoreValue(rl_dest, rl_return);
1316  return true;
1317}
1318
1319bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1320  RegLocation rl_dest = InlineTarget(info);
1321
1322  // Early exit if the result is unused.
1323  if (rl_dest.orig_sreg < 0) {
1324    return true;
1325  }
1326
1327  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1328
1329  if (Is64BitInstructionSet(cu_->instruction_set)) {
1330    LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
1331                kNotVolatile);
1332  } else {
1333    Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
1334  }
1335
1336  StoreValue(rl_dest, rl_result);
1337  return true;
1338}
1339
1340bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1341                                  bool is_long, bool is_volatile) {
1342  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1343    // TODO: add Mips and Mips64 implementations.
1344    return false;
1345  }
1346  // Unused - RegLocation rl_src_unsafe = info->args[0];
1347  RegLocation rl_src_obj = info->args[1];  // Object
1348  RegLocation rl_src_offset = info->args[2];  // long low
1349  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1350  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1351
1352  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1353  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1354  RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
1355  if (is_long) {
1356    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1357        || cu_->instruction_set == kArm64) {
1358      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
1359    } else {
1360      RegStorage rl_temp_offset = AllocTemp();
1361      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1362      LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
1363      FreeTemp(rl_temp_offset);
1364    }
1365  } else {
1366    if (rl_result.ref) {
1367      LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
1368    } else {
1369      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
1370    }
1371  }
1372
1373  if (is_volatile) {
1374    GenMemBarrier(kLoadAny);
1375  }
1376
1377  if (is_long) {
1378    StoreValueWide(rl_dest, rl_result);
1379  } else {
1380    StoreValue(rl_dest, rl_result);
1381  }
1382  return true;
1383}
1384
1385bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1386                                  bool is_object, bool is_volatile, bool is_ordered) {
1387  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1388    // TODO: add Mips and Mips64 implementations.
1389    return false;
1390  }
1391  // Unused - RegLocation rl_src_unsafe = info->args[0];
1392  RegLocation rl_src_obj = info->args[1];  // Object
1393  RegLocation rl_src_offset = info->args[2];  // long low
1394  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1395  RegLocation rl_src_value = info->args[4];  // value to store
1396  if (is_volatile || is_ordered) {
1397    GenMemBarrier(kAnyStore);
1398  }
1399  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1400  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1401  RegLocation rl_value;
1402  if (is_long) {
1403    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1404    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1405        || cu_->instruction_set == kArm64) {
1406      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
1407    } else {
1408      RegStorage rl_temp_offset = AllocTemp();
1409      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1410      StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
1411      FreeTemp(rl_temp_offset);
1412    }
1413  } else {
1414    rl_value = LoadValue(rl_src_value, LocToRegClass(rl_src_value));
1415    if (rl_value.ref) {
1416      StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
1417    } else {
1418      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
1419    }
1420  }
1421
1422  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1423  FreeTemp(rl_offset.reg);
1424
1425  if (is_volatile) {
1426    // Prevent reordering with a subsequent volatile load.
1427    // May also be needed to address store atomicity issues.
1428    GenMemBarrier(kAnyAny);
1429  }
1430  if (is_object) {
1431    MarkGCCard(0, rl_value.reg, rl_object.reg);
1432  }
1433  return true;
1434}
1435
1436void Mir2Lir::GenInvoke(CallInfo* info) {
1437  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1438  if (mir_graph_->GetMethodLoweringInfo(info->mir).IsIntrinsic()) {
1439    const DexFile* dex_file = info->method_ref.dex_file;
1440    auto* inliner = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(dex_file);
1441    if (inliner->GenIntrinsic(this, info)) {
1442      return;
1443    }
1444  }
1445  GenInvokeNoInline(info);
1446}
1447
1448void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1449  int call_state = 0;
1450  LIR* null_ck;
1451  LIR** p_null_ck = nullptr;
1452  NextCallInsn next_call_insn;
1453  FlushAllRegs();  /* Everything to home location */
1454  // Explicit register usage
1455  LockCallTemps();
1456
1457  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1458  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1459  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1460  info->type = method_info.GetSharpType();
1461  bool fast_path = method_info.FastPath();
1462  bool skip_this;
1463
1464  if (info->type == kInterface) {
1465    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1466    skip_this = fast_path;
1467  } else if (info->type == kDirect) {
1468    if (fast_path) {
1469      p_null_ck = &null_ck;
1470    }
1471    next_call_insn = fast_path ? GetNextSDCallInsn() : NextDirectCallInsnSP;
1472    skip_this = false;
1473  } else if (info->type == kStatic) {
1474    next_call_insn = fast_path ? GetNextSDCallInsn() : NextStaticCallInsnSP;
1475    skip_this = false;
1476  } else if (info->type == kSuper) {
1477    DCHECK(!fast_path);  // Fast path is a direct call.
1478    next_call_insn = NextSuperCallInsnSP;
1479    skip_this = false;
1480  } else {
1481    DCHECK_EQ(info->type, kVirtual);
1482    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1483    skip_this = fast_path;
1484  }
1485  MethodReference target_method = method_info.GetTargetMethod();
1486  call_state = GenDalvikArgs(info, call_state, p_null_ck,
1487                             next_call_insn, target_method, method_info.VTableIndex(),
1488                             method_info.DirectCode(), method_info.DirectMethod(),
1489                             original_type, skip_this);
1490  // Finish up any of the call sequence not interleaved in arg loading
1491  while (call_state >= 0) {
1492    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1493                                method_info.DirectCode(), method_info.DirectMethod(),
1494                                original_type);
1495  }
1496  LIR* call_insn = GenCallInsn(method_info);
1497  MarkSafepointPC(call_insn);
1498
1499  FreeCallTemps();
1500  if (info->result.location != kLocInvalid) {
1501    // We have a following MOVE_RESULT - do it now.
1502    if (info->result.wide) {
1503      RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
1504      StoreValueWide(info->result, ret_loc);
1505    } else {
1506      RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
1507      StoreValue(info->result, ret_loc);
1508    }
1509  }
1510}
1511
1512}  // namespace art
1513