gen_invoke.cc revision d85614222fa062ec809af9d65f04ab6b7dc1c248
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
21#include "dex_file-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "invoke_type.h"
24#include "mirror/array.h"
25#include "mirror/class-inl.h"
26#include "mirror/dex_cache.h"
27#include "mirror/object_array-inl.h"
28#include "mirror/reference-inl.h"
29#include "mirror/string.h"
30#include "mir_to_lir-inl.h"
31#include "scoped_thread_state_change.h"
32#include "x86/codegen_x86.h"
33
34namespace art {
35
36// Shortcuts to repeatedly used long types.
37typedef mirror::ObjectArray<mirror::Object> ObjArray;
38
39/*
40 * This source files contains "gen" codegen routines that should
41 * be applicable to most targets.  Only mid-level support utilities
42 * and "op" calls may be used here.
43 */
44
45void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
46  class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
47   public:
48    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
49        : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
50    }
51
52    void Compile() {
53      m2l_->ResetRegPool();
54      m2l_->ResetDefTracking();
55      GenerateTargetLabel(kPseudoIntrinsicRetry);
56      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
57      m2l_->GenInvokeNoInline(info_);
58      if (cont_ != nullptr) {
59        m2l_->OpUnconditionalBranch(cont_);
60      }
61    }
62
63   private:
64    CallInfo* const info_;
65  };
66
67  AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
68}
69
70// Macro to help instantiate.
71// TODO: This might be used to only instantiate <4> on pure 32b systems.
72#define INSTANTIATE(sig_part1, ...) \
73  template sig_part1(ThreadOffset<4>, __VA_ARGS__); \
74  template sig_part1(ThreadOffset<8>, __VA_ARGS__); \
75
76
77/*
78 * To save scheduling time, helper calls are broken into two parts: generation of
79 * the helper target address, and the actual call to the helper.  Because x86
80 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
81 * load arguments between the two parts.
82 */
83// template <size_t pointer_size>
84RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) {
85  // All CallRuntimeHelperXXX call this first. So make a central check here.
86  DCHECK_EQ(4U, GetInstructionSetPointerSize(cu_->instruction_set));
87
88  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
89    return RegStorage::InvalidReg();
90  } else {
91    return LoadHelper(helper_offset);
92  }
93}
94
95RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<8> helper_offset) {
96  // All CallRuntimeHelperXXX call this first. So make a central check here.
97  DCHECK_EQ(8U, GetInstructionSetPointerSize(cu_->instruction_set));
98
99  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
100    return RegStorage::InvalidReg();
101  } else {
102    return LoadHelper(helper_offset);
103  }
104}
105
106/* NOTE: if r_tgt is a temp, it will be freed following use */
107template <size_t pointer_size>
108LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset,
109                         bool safepoint_pc, bool use_link) {
110  LIR* call_inst;
111  OpKind op = use_link ? kOpBlx : kOpBx;
112  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
113    call_inst = OpThreadMem(op, helper_offset);
114  } else {
115    call_inst = OpReg(op, r_tgt);
116    FreeTemp(r_tgt);
117  }
118  if (safepoint_pc) {
119    MarkSafepointPC(call_inst);
120  }
121  return call_inst;
122}
123template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<4> helper_offset,
124                                        bool safepoint_pc, bool use_link);
125template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<8> helper_offset,
126                                        bool safepoint_pc, bool use_link);
127
128template <size_t pointer_size>
129void Mir2Lir::CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc) {
130  RegStorage r_tgt = CallHelperSetup(helper_offset);
131  ClobberCallerSave();
132  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
133}
134INSTANTIATE(void Mir2Lir::CallRuntimeHelper, bool safepoint_pc)
135
136template <size_t pointer_size>
137void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0,
138                                   bool safepoint_pc) {
139  RegStorage r_tgt = CallHelperSetup(helper_offset);
140  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
141  ClobberCallerSave();
142  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
143}
144INSTANTIATE(void Mir2Lir::CallRuntimeHelperImm, int arg0, bool safepoint_pc)
145
146template <size_t pointer_size>
147void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
148                                   bool safepoint_pc) {
149  RegStorage r_tgt = CallHelperSetup(helper_offset);
150  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
151  ClobberCallerSave();
152  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
153}
154INSTANTIATE(void Mir2Lir::CallRuntimeHelperReg, RegStorage arg0, bool safepoint_pc)
155
156template <size_t pointer_size>
157void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset,
158                                           RegLocation arg0, bool safepoint_pc) {
159  RegStorage r_tgt = CallHelperSetup(helper_offset);
160  if (arg0.wide == 0) {
161    LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
162  } else {
163    LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
164  }
165  ClobberCallerSave();
166  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
167}
168INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocation, RegLocation arg0, bool safepoint_pc)
169
170template <size_t pointer_size>
171void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1,
172                                      bool safepoint_pc) {
173  RegStorage r_tgt = CallHelperSetup(helper_offset);
174  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
175  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
176  ClobberCallerSave();
177  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
178}
179INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmImm, int arg0, int arg1, bool safepoint_pc)
180
181template <size_t pointer_size>
182void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
183                                              RegLocation arg1, bool safepoint_pc) {
184  RegStorage r_tgt = CallHelperSetup(helper_offset);
185  DCHECK(!arg1.fp);
186  if (arg1.wide == 0) {
187    LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
188  } else {
189    RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
190    LoadValueDirectWideFixed(arg1, r_tmp);
191  }
192  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
193  ClobberCallerSave();
194  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
195}
196INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocation, int arg0, RegLocation arg1,
197            bool safepoint_pc)
198
199template <size_t pointer_size>
200void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset,
201                                              RegLocation arg0, int arg1, bool safepoint_pc) {
202  RegStorage r_tgt = CallHelperSetup(helper_offset);
203  DCHECK(!arg0.wide);
204  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
205  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
206  ClobberCallerSave();
207  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
208}
209INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationImm, RegLocation arg0, int arg1,
210            bool safepoint_pc)
211
212template <size_t pointer_size>
213void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0,
214                                      RegStorage arg1, bool safepoint_pc) {
215  RegStorage r_tgt = CallHelperSetup(helper_offset);
216  OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
217  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
218  ClobberCallerSave();
219  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
220}
221INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmReg, int arg0, RegStorage arg1, bool safepoint_pc)
222
223template <size_t pointer_size>
224void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
225                                      int arg1, bool safepoint_pc) {
226  RegStorage r_tgt = CallHelperSetup(helper_offset);
227  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
228  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
229  ClobberCallerSave();
230  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
231}
232INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegImm, RegStorage arg0, int arg1, bool safepoint_pc)
233
234template <size_t pointer_size>
235void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0,
236                                         bool safepoint_pc) {
237  RegStorage r_tgt = CallHelperSetup(helper_offset);
238  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
239  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
240  ClobberCallerSave();
241  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
242}
243INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethod, int arg0, bool safepoint_pc)
244
245template <size_t pointer_size>
246void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
247                                         bool safepoint_pc) {
248  RegStorage r_tgt = CallHelperSetup(helper_offset);
249  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
250  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
251  if (r_tmp.NotExactlyEquals(arg0)) {
252    OpRegCopy(r_tmp, arg0);
253  }
254  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
255  ClobberCallerSave();
256  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
257}
258INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethod, RegStorage arg0, bool safepoint_pc)
259
260template <size_t pointer_size>
261void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
262                                                    RegStorage arg0, RegLocation arg2,
263                                                    bool safepoint_pc) {
264  RegStorage r_tgt = CallHelperSetup(helper_offset);
265  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
266  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
267  if (r_tmp.NotExactlyEquals(arg0)) {
268    OpRegCopy(r_tmp, arg0);
269  }
270  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
271  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
272  ClobberCallerSave();
273  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
274}
275INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethodRegLocation, RegStorage arg0, RegLocation arg2,
276            bool safepoint_pc)
277
278template <size_t pointer_size>
279void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
280                                                      RegLocation arg0, RegLocation arg1,
281                                                      bool safepoint_pc) {
282  RegStorage r_tgt = CallHelperSetup(helper_offset);
283  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
284    RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
285
286    RegStorage arg1_reg;
287    if (arg1.fp == arg0.fp) {
288      arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
289    } else {
290      arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
291    }
292
293    if (arg0.wide == 0) {
294      LoadValueDirectFixed(arg0, arg0_reg);
295    } else {
296      LoadValueDirectWideFixed(arg0, arg0_reg);
297    }
298
299    if (arg1.wide == 0) {
300      LoadValueDirectFixed(arg1, arg1_reg);
301    } else {
302      LoadValueDirectWideFixed(arg1, arg1_reg);
303    }
304  } else {
305    DCHECK(!cu_->target64);
306    if (arg0.wide == 0) {
307      LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
308      if (arg1.wide == 0) {
309        if (cu_->instruction_set == kMips) {
310          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide));
311        } else {
312          LoadValueDirectFixed(arg1, TargetReg(kArg1, kNotWide));
313        }
314      } else {
315        if (cu_->instruction_set == kMips) {
316          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
317        } else {
318          LoadValueDirectWideFixed(arg1, TargetReg(kArg1, kWide));
319        }
320      }
321    } else {
322      LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
323      if (arg1.wide == 0) {
324        LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
325      } else {
326        LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
327      }
328    }
329  }
330  ClobberCallerSave();
331  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
332}
333INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
334            RegLocation arg1, bool safepoint_pc)
335
336void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
337  WideKind arg0_kind = arg0.GetWideKind();
338  WideKind arg1_kind = arg1.GetWideKind();
339  if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) {
340    if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) {
341      // Swap kArg0 and kArg1 with kArg2 as temp.
342      OpRegCopy(TargetReg(kArg2, arg1_kind), arg1);
343      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
344      OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind));
345    } else {
346      OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
347      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
348    }
349  } else {
350    OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
351    OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
352  }
353}
354
355template <size_t pointer_size>
356void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
357                                      RegStorage arg1, bool safepoint_pc) {
358  RegStorage r_tgt = CallHelperSetup(helper_offset);
359  CopyToArgumentRegs(arg0, arg1);
360  ClobberCallerSave();
361  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
362}
363INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegReg, RegStorage arg0, RegStorage arg1,
364            bool safepoint_pc)
365
366template <size_t pointer_size>
367void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
368                                         RegStorage arg1, int arg2, bool safepoint_pc) {
369  RegStorage r_tgt = CallHelperSetup(helper_offset);
370  CopyToArgumentRegs(arg0, arg1);
371  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
372  ClobberCallerSave();
373  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
374}
375INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegRegImm, RegStorage arg0, RegStorage arg1, int arg2,
376            bool safepoint_pc)
377
378template <size_t pointer_size>
379void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
380                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
381  RegStorage r_tgt = CallHelperSetup(helper_offset);
382  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
383  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
384  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
385  ClobberCallerSave();
386  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
387}
388INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodRegLocation, int arg0, RegLocation arg2,
389            bool safepoint_pc)
390
391template <size_t pointer_size>
392void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0,
393                                            int arg2, bool safepoint_pc) {
394  RegStorage r_tgt = CallHelperSetup(helper_offset);
395  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
396  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
397  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
398  ClobberCallerSave();
399  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
400}
401INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodImm, int arg0, int arg2, bool safepoint_pc)
402
403template <size_t pointer_size>
404void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
405                                                         int arg0, RegLocation arg1,
406                                                         RegLocation arg2, bool safepoint_pc) {
407  RegStorage r_tgt = CallHelperSetup(helper_offset);
408  DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
409                                                        // instantiation bug in GCC.
410  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
411  if (arg2.wide == 0) {
412    LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
413  } else {
414    LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide));
415  }
416  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
417  ClobberCallerSave();
418  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
419}
420INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation, int arg0, RegLocation arg1,
421            RegLocation arg2, bool safepoint_pc)
422
423template <size_t pointer_size>
424void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
425    ThreadOffset<pointer_size> helper_offset,
426    RegLocation arg0,
427    RegLocation arg1,
428    RegLocation arg2,
429    bool safepoint_pc) {
430  RegStorage r_tgt = CallHelperSetup(helper_offset);
431  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
432  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
433  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
434  ClobberCallerSave();
435  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
436}
437INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation, RegLocation arg0,
438            RegLocation arg1, RegLocation arg2, bool safepoint_pc)
439
440/*
441 * If there are any ins passed in registers that have not been promoted
442 * to a callee-save register, flush them to the frame.  Perform initial
443 * assignment of promoted arguments.
444 *
445 * ArgLocs is an array of location records describing the incoming arguments
446 * with one location record per word of argument.
447 */
448void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
449  /*
450   * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
451   * It will attempt to keep kArg0 live (or copy it to home location
452   * if promoted).
453   */
454  RegLocation rl_src = rl_method;
455  rl_src.location = kLocPhysReg;
456  rl_src.reg = TargetReg(kArg0, kRef);
457  rl_src.home = false;
458  MarkLive(rl_src);
459  StoreValue(rl_method, rl_src);
460  // If Method* has been promoted, explicitly flush
461  if (rl_method.location == kLocPhysReg) {
462    StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
463  }
464
465  if (cu_->num_ins == 0) {
466    return;
467  }
468
469  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
470  /*
471   * Copy incoming arguments to their proper home locations.
472   * NOTE: an older version of dx had an issue in which
473   * it would reuse static method argument registers.
474   * This could result in the same Dalvik virtual register
475   * being promoted to both core and fp regs. To account for this,
476   * we only copy to the corresponding promoted physical register
477   * if it matches the type of the SSA name for the incoming
478   * argument.  It is also possible that long and double arguments
479   * end up half-promoted.  In those cases, we must flush the promoted
480   * half to memory as well.
481   */
482  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
483  for (int i = 0; i < cu_->num_ins; i++) {
484    PromotionMap* v_map = &promotion_map_[start_vreg + i];
485    RegStorage reg = GetArgMappingToPhysicalReg(i);
486
487    if (reg.Valid()) {
488      // If arriving in register
489      bool need_flush = true;
490      RegLocation* t_loc = &ArgLocs[i];
491      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
492        OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
493        need_flush = false;
494      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
495        OpRegCopy(RegStorage::Solo32(v_map->fp_reg), reg);
496        need_flush = false;
497      } else {
498        need_flush = true;
499      }
500
501      // For wide args, force flush if not fully promoted
502      if (t_loc->wide) {
503        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
504        // Is only half promoted?
505        need_flush |= (p_map->core_location != v_map->core_location) ||
506            (p_map->fp_location != v_map->fp_location);
507        if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
508          /*
509           * In Arm, a double is represented as a pair of consecutive single float
510           * registers starting at an even number.  It's possible that both Dalvik vRegs
511           * representing the incoming double were independently promoted as singles - but
512           * not in a form usable as a double.  If so, we need to flush - even though the
513           * incoming arg appears fully in register.  At this point in the code, both
514           * halves of the double are promoted.  Make sure they are in a usable form.
515           */
516          int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
517          int low_reg = promotion_map_[lowreg_index].fp_reg;
518          int high_reg = promotion_map_[lowreg_index + 1].fp_reg;
519          if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
520            need_flush = true;
521          }
522        }
523      }
524      if (need_flush) {
525        Store32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg);
526      }
527    } else {
528      // If arriving in frame & promoted
529      if (v_map->core_location == kLocPhysReg) {
530        Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i),
531                   RegStorage::Solo32(v_map->core_reg));
532      }
533      if (v_map->fp_location == kLocPhysReg) {
534        Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i),
535                   RegStorage::Solo32(v_map->fp_reg));
536      }
537    }
538  }
539}
540
541static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) {
542  RegLocation rl_arg = info->args[0];
543  cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef));
544}
545
546static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
547  cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags);
548  // get this->klass_ [use kArg1, set kArg0]
549  cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(),
550                  cg->TargetReg(kArg0, kRef),
551                  kNotVolatile);
552  cg->MarkPossibleNullPointerException(info->opt_flags);
553}
554
555static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
556                                                       const RegStorage* alt_from,
557                                                       const CompilationUnit* cu, Mir2Lir* cg) {
558  if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
559    // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
560    cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from,
561                     mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
562                     cg->TargetPtrReg(kInvokeTgt));
563    return true;
564  }
565  return false;
566}
567
568/*
569 * Bit of a hack here - in the absence of a real scheduling pass,
570 * emit the next instruction in static & direct invoke sequences.
571 */
572static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
573                          int state, const MethodReference& target_method,
574                          uint32_t unused,
575                          uintptr_t direct_code, uintptr_t direct_method,
576                          InvokeType type) {
577  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
578  if (direct_code != 0 && direct_method != 0) {
579    switch (state) {
580    case 0:  // Get the current Method* [sets kArg0]
581      if (direct_code != static_cast<uintptr_t>(-1)) {
582        if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
583          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
584        }
585      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
586        cg->LoadCodeAddress(target_method, type, kInvokeTgt);
587      }
588      if (direct_method != static_cast<uintptr_t>(-1)) {
589        cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
590      } else {
591        cg->LoadMethodAddress(target_method, type, kArg0);
592      }
593      break;
594    default:
595      return -1;
596    }
597  } else {
598    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
599    switch (state) {
600    case 0:  // Get the current Method* [sets kArg0]
601      // TUNING: we can save a reg copy if Method* has been promoted.
602      cg->LoadCurrMethodDirect(arg0_ref);
603      break;
604    case 1:  // Get method->dex_cache_resolved_methods_
605      cg->LoadRefDisp(arg0_ref,
606                      mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
607                      arg0_ref,
608                      kNotVolatile);
609      // Set up direct code if known.
610      if (direct_code != 0) {
611        if (direct_code != static_cast<uintptr_t>(-1)) {
612          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
613        } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
614          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
615          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
616        }
617      }
618      break;
619    case 2:  // Grab target method*
620      CHECK_EQ(cu->dex_file, target_method.dex_file);
621      cg->LoadRefDisp(arg0_ref,
622                      ObjArray::OffsetOfElement(target_method.dex_method_index).Int32Value(),
623                      arg0_ref,
624                      kNotVolatile);
625      break;
626    case 3:  // Grab the code from the method*
627      if (direct_code == 0) {
628        if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) {
629          break;                                    // kInvokeTgt := arg0_ref->entrypoint
630        }
631      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
632        break;
633      }
634      // Intentional fallthrough for x86
635    default:
636      return -1;
637    }
638  }
639  return state + 1;
640}
641
642/*
643 * Bit of a hack here - in the absence of a real scheduling pass,
644 * emit the next instruction in a virtual invoke sequence.
645 * We can use kLr as a temp prior to target address loading
646 * Note also that we'll load the first argument ("this") into
647 * kArg1 here rather than the standard LoadArgRegs.
648 */
649static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
650                         int state, const MethodReference& target_method,
651                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
652                         InvokeType unused3) {
653  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
654  /*
655   * This is the fast path in which the target virtual method is
656   * fully resolved at compile time.
657   */
658  switch (state) {
659    case 0:
660      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
661      break;
662    case 1:
663      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
664                                                  // Includes a null-check.
665      break;
666    case 2: {
667      // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0]
668      int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
669          method_idx * sizeof(mirror::Class::VTableEntry);
670      // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0]
671      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
672      break;
673    }
674    case 3:
675      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
676        break;                                    // kInvokeTgt := kArg0->entrypoint
677      }
678      // Intentional fallthrough for X86
679    default:
680      return -1;
681  }
682  return state + 1;
683}
684
685/*
686 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
687 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
688 * more than one interface method map to the same index. Note also that we'll load the first
689 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
690 */
691static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
692                                 const MethodReference& target_method,
693                                 uint32_t method_idx, uintptr_t unused,
694                                 uintptr_t direct_method, InvokeType unused2) {
695  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
696
697  switch (state) {
698    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
699      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
700      cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index);
701      if (cu->instruction_set == kX86) {
702        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide));
703      }
704      break;
705    case 1:
706      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
707      break;
708    case 2:
709      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
710                                                  // Includes a null-check.
711      break;
712    case 3: {  // Get target method [use kInvokeTgt, set kArg0]
713      int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
714          (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
715      // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0]
716      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
717      break;
718    }
719    case 4:
720      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
721        break;                                    // kInvokeTgt := kArg0->entrypoint
722      }
723      // Intentional fallthrough for X86
724    default:
725      return -1;
726  }
727  return state + 1;
728}
729
730template <size_t pointer_size>
731static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
732                            ThreadOffset<pointer_size> trampoline, int state,
733                            const MethodReference& target_method, uint32_t method_idx) {
734  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
735  /*
736   * This handles the case in which the base method is not fully
737   * resolved at compile time, we bail to a runtime helper.
738   */
739  if (state == 0) {
740    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
741      // Load trampoline target
742      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(),
743                       cg->TargetPtrReg(kInvokeTgt));
744    }
745    // Load kArg0 with method index
746    CHECK_EQ(cu->dex_file, target_method.dex_file);
747    cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index);
748    return 1;
749  }
750  return -1;
751}
752
753static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
754                                int state,
755                                const MethodReference& target_method,
756                                uint32_t unused, uintptr_t unused2,
757                                uintptr_t unused3, InvokeType unused4) {
758  if (cu->target64) {
759    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeStaticTrampolineWithAccessCheck);
760    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
761  } else {
762    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
763    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
764  }
765}
766
767static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
768                                const MethodReference& target_method,
769                                uint32_t unused, uintptr_t unused2,
770                                uintptr_t unused3, InvokeType unused4) {
771  if (cu->target64) {
772    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeDirectTrampolineWithAccessCheck);
773    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
774  } else {
775    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
776    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
777  }
778}
779
780static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
781                               const MethodReference& target_method,
782                               uint32_t unused, uintptr_t unused2,
783                               uintptr_t unused3, InvokeType unused4) {
784  if (cu->target64) {
785    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeSuperTrampolineWithAccessCheck);
786    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
787  } else {
788    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
789    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
790  }
791}
792
793static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
794                           const MethodReference& target_method,
795                           uint32_t unused, uintptr_t unused2,
796                           uintptr_t unused3, InvokeType unused4) {
797  if (cu->target64) {
798    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
799        pInvokeVirtualTrampolineWithAccessCheck);
800    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
801  } else {
802    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
803        pInvokeVirtualTrampolineWithAccessCheck);
804    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
805  }
806}
807
808static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
809                                                CallInfo* info, int state,
810                                                const MethodReference& target_method,
811                                                uint32_t unused, uintptr_t unused2,
812                                                uintptr_t unused3, InvokeType unused4) {
813  if (cu->target64) {
814      ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
815          pInvokeInterfaceTrampolineWithAccessCheck);
816      return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
817    } else {
818      ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
819          pInvokeInterfaceTrampolineWithAccessCheck);
820      return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
821    }
822}
823
824int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
825                         NextCallInsn next_call_insn,
826                         const MethodReference& target_method,
827                         uint32_t vtable_idx, uintptr_t direct_code,
828                         uintptr_t direct_method, InvokeType type, bool skip_this) {
829  int last_arg_reg = 3 - 1;
830  int arg_regs[3] = {TargetReg(kArg1, kNotWide).GetReg(), TargetReg(kArg2, kNotWide).GetReg(),
831                     TargetReg(kArg3, kNotWide).GetReg()};
832
833  int next_reg = 0;
834  int next_arg = 0;
835  if (skip_this) {
836    next_reg++;
837    next_arg++;
838  }
839  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
840    RegLocation rl_arg = info->args[next_arg++];
841    rl_arg = UpdateRawLoc(rl_arg);
842    if (rl_arg.wide && (next_reg <= last_arg_reg - 1)) {
843      RegStorage r_tmp(RegStorage::k64BitPair, arg_regs[next_reg], arg_regs[next_reg + 1]);
844      LoadValueDirectWideFixed(rl_arg, r_tmp);
845      next_reg++;
846      next_arg++;
847    } else {
848      if (rl_arg.wide) {
849        rl_arg = NarrowRegLoc(rl_arg);
850        rl_arg.is_const = false;
851      }
852      LoadValueDirectFixed(rl_arg, RegStorage::Solo32(arg_regs[next_reg]));
853    }
854    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
855                                direct_code, direct_method, type);
856  }
857  return call_state;
858}
859
860/*
861 * Load up to 5 arguments, the first three of which will be in
862 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
863 * and as part of the load sequence, it must be replaced with
864 * the target method pointer.  Note, this may also be called
865 * for "range" variants if the number of arguments is 5 or fewer.
866 */
867int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
868                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
869                                  const MethodReference& target_method,
870                                  uint32_t vtable_idx, uintptr_t direct_code,
871                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
872  RegLocation rl_arg;
873
874  /* If no arguments, just return */
875  if (info->num_arg_words == 0)
876    return call_state;
877
878  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
879                              direct_code, direct_method, type);
880
881  DCHECK_LE(info->num_arg_words, 5);
882  if (info->num_arg_words > 3) {
883    int32_t next_use = 3;
884    // Detect special case of wide arg spanning arg3/arg4
885    RegLocation rl_use0 = info->args[0];
886    RegLocation rl_use1 = info->args[1];
887    RegLocation rl_use2 = info->args[2];
888    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) && rl_use2.wide) {
889      RegStorage reg;
890      // Wide spans, we need the 2nd half of uses[2].
891      rl_arg = UpdateLocWide(rl_use2);
892      if (rl_arg.location == kLocPhysReg) {
893        if (rl_arg.reg.IsPair()) {
894          reg = rl_arg.reg.GetHigh();
895        } else {
896          RegisterInfo* info = GetRegInfo(rl_arg.reg);
897          info = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
898          if (info == nullptr) {
899            // NOTE: For hard float convention we won't split arguments across reg/mem.
900            UNIMPLEMENTED(FATAL) << "Needs hard float api.";
901          }
902          reg = info->GetReg();
903        }
904      } else {
905        // kArg2 & rArg3 can safely be used here
906        reg = TargetReg(kArg3, kNotWide);
907        {
908          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
909          Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
910        }
911        call_state = next_call_insn(cu_, info, call_state, target_method,
912                                    vtable_idx, direct_code, direct_method, type);
913      }
914      {
915        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
916        Store32Disp(TargetPtrReg(kSp), (next_use + 1) * 4, reg);
917      }
918      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
919                                  direct_code, direct_method, type);
920      next_use++;
921    }
922    // Loop through the rest
923    while (next_use < info->num_arg_words) {
924      RegStorage arg_reg;
925      rl_arg = info->args[next_use];
926      rl_arg = UpdateRawLoc(rl_arg);
927      if (rl_arg.location == kLocPhysReg) {
928        arg_reg = rl_arg.reg;
929      } else {
930        arg_reg = TargetReg(kArg2, rl_arg.wide ? kWide : kNotWide);
931        if (rl_arg.wide) {
932          LoadValueDirectWideFixed(rl_arg, arg_reg);
933        } else {
934          LoadValueDirectFixed(rl_arg, arg_reg);
935        }
936        call_state = next_call_insn(cu_, info, call_state, target_method,
937                                    vtable_idx, direct_code, direct_method, type);
938      }
939      int outs_offset = (next_use + 1) * 4;
940      {
941        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
942        if (rl_arg.wide) {
943          StoreBaseDisp(TargetPtrReg(kSp), outs_offset, arg_reg, k64, kNotVolatile);
944          next_use += 2;
945        } else {
946          Store32Disp(TargetPtrReg(kSp), outs_offset, arg_reg);
947          next_use++;
948        }
949      }
950      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
951                               direct_code, direct_method, type);
952    }
953  }
954
955  call_state = LoadArgRegs(info, call_state, next_call_insn,
956                           target_method, vtable_idx, direct_code, direct_method,
957                           type, skip_this);
958
959  if (pcrLabel) {
960    if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
961      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
962    } else {
963      *pcrLabel = nullptr;
964      // In lieu of generating a check for kArg1 being null, we need to
965      // perform a load when doing implicit checks.
966      RegStorage tmp = AllocTemp();
967      Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
968      MarkPossibleNullPointerException(info->opt_flags);
969      FreeTemp(tmp);
970    }
971  }
972  return call_state;
973}
974
975/*
976 * May have 0+ arguments (also used for jumbo).  Note that
977 * source virtual registers may be in physical registers, so may
978 * need to be flushed to home location before copying.  This
979 * applies to arg3 and above (see below).
980 *
981 * Two general strategies:
982 *    If < 20 arguments
983 *       Pass args 3-18 using vldm/vstm block copy
984 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
985 *    If 20+ arguments
986 *       Pass args arg19+ using memcpy block copy
987 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
988 *
989 */
990int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
991                                LIR** pcrLabel, NextCallInsn next_call_insn,
992                                const MethodReference& target_method,
993                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
994                                InvokeType type, bool skip_this) {
995  // If we can treat it as non-range (Jumbo ops will use range form)
996  if (info->num_arg_words <= 5)
997    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
998                                next_call_insn, target_method, vtable_idx,
999                                direct_code, direct_method, type, skip_this);
1000  /*
1001   * First load the non-register arguments.  Both forms expect all
1002   * of the source arguments to be in their home frame location, so
1003   * scan the s_reg names and flush any that have been promoted to
1004   * frame backing storage.
1005   */
1006  // Scan the rest of the args - if in phys_reg flush to memory
1007  for (int next_arg = 0; next_arg < info->num_arg_words;) {
1008    RegLocation loc = info->args[next_arg];
1009    if (loc.wide) {
1010      loc = UpdateLocWide(loc);
1011      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
1012        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1013        StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
1014      }
1015      next_arg += 2;
1016    } else {
1017      loc = UpdateLoc(loc);
1018      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
1019        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1020        Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
1021      }
1022      next_arg++;
1023    }
1024  }
1025
1026  // Logic below assumes that Method pointer is at offset zero from SP.
1027  DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
1028
1029  // The first 3 arguments are passed via registers.
1030  // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
1031  // get size of uintptr_t or size of object reference according to model being used.
1032  int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
1033  int start_offset = SRegOffset(info->args[3].s_reg_low);
1034  int regs_left_to_pass_via_stack = info->num_arg_words - 3;
1035  DCHECK_GT(regs_left_to_pass_via_stack, 0);
1036
1037  if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
1038    // Use vldm/vstm pair using kArg3 as a temp
1039    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1040                             direct_code, direct_method, type);
1041    OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), start_offset);
1042    LIR* ld = nullptr;
1043    {
1044      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1045      ld = OpVldm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack);
1046    }
1047    // TUNING: loosen barrier
1048    ld->u.m.def_mask = &kEncodeAll;
1049    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1050                             direct_code, direct_method, type);
1051    OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), 4 /* Method* */ + (3 * 4));
1052    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1053                             direct_code, direct_method, type);
1054    LIR* st = nullptr;
1055    {
1056      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1057      st = OpVstm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack);
1058    }
1059    st->u.m.def_mask = &kEncodeAll;
1060    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1061                             direct_code, direct_method, type);
1062  } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
1063    int current_src_offset = start_offset;
1064    int current_dest_offset = outs_offset;
1065
1066    // Only davik regs are accessed in this loop; no next_call_insn() calls.
1067    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1068    while (regs_left_to_pass_via_stack > 0) {
1069      // This is based on the knowledge that the stack itself is 16-byte aligned.
1070      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
1071      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
1072      size_t bytes_to_move;
1073
1074      /*
1075       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
1076       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
1077       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
1078       * We do this because we could potentially do a smaller move to align.
1079       */
1080      if (regs_left_to_pass_via_stack == 4 ||
1081          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
1082        // Moving 128-bits via xmm register.
1083        bytes_to_move = sizeof(uint32_t) * 4;
1084
1085        // Allocate a free xmm temp. Since we are working through the calling sequence,
1086        // we expect to have an xmm temporary available.  AllocTempDouble will abort if
1087        // there are no free registers.
1088        RegStorage temp = AllocTempDouble();
1089
1090        LIR* ld1 = nullptr;
1091        LIR* ld2 = nullptr;
1092        LIR* st1 = nullptr;
1093        LIR* st2 = nullptr;
1094
1095        /*
1096         * The logic is similar for both loads and stores. If we have 16-byte alignment,
1097         * do an aligned move. If we have 8-byte alignment, then do the move in two
1098         * parts. This approach prevents possible cache line splits. Finally, fall back
1099         * to doing an unaligned move. In most cases we likely won't split the cache
1100         * line but we cannot prove it and thus take a conservative approach.
1101         */
1102        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
1103        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
1104
1105        if (src_is_16b_aligned) {
1106          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovA128FP);
1107        } else if (src_is_8b_aligned) {
1108          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovLo128FP);
1109          ld2 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset + (bytes_to_move >> 1),
1110                            kMovHi128FP);
1111        } else {
1112          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovU128FP);
1113        }
1114
1115        if (dest_is_16b_aligned) {
1116          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovA128FP);
1117        } else if (dest_is_8b_aligned) {
1118          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovLo128FP);
1119          st2 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset + (bytes_to_move >> 1),
1120                            temp, kMovHi128FP);
1121        } else {
1122          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovU128FP);
1123        }
1124
1125        // TODO If we could keep track of aliasing information for memory accesses that are wider
1126        // than 64-bit, we wouldn't need to set up a barrier.
1127        if (ld1 != nullptr) {
1128          if (ld2 != nullptr) {
1129            // For 64-bit load we can actually set up the aliasing information.
1130            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
1131            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true,
1132                                    true);
1133          } else {
1134            // Set barrier for 128-bit load.
1135            ld1->u.m.def_mask = &kEncodeAll;
1136          }
1137        }
1138        if (st1 != nullptr) {
1139          if (st2 != nullptr) {
1140            // For 64-bit store we can actually set up the aliasing information.
1141            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
1142            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false,
1143                                    true);
1144          } else {
1145            // Set barrier for 128-bit store.
1146            st1->u.m.def_mask = &kEncodeAll;
1147          }
1148        }
1149
1150        // Free the temporary used for the data movement.
1151        FreeTemp(temp);
1152      } else {
1153        // Moving 32-bits via general purpose register.
1154        bytes_to_move = sizeof(uint32_t);
1155
1156        // Instead of allocating a new temp, simply reuse one of the registers being used
1157        // for argument passing.
1158        RegStorage temp = TargetReg(kArg3, kNotWide);
1159
1160        // Now load the argument VR and store to the outs.
1161        Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
1162        Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
1163      }
1164
1165      current_src_offset += bytes_to_move;
1166      current_dest_offset += bytes_to_move;
1167      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
1168    }
1169  } else {
1170    // Generate memcpy
1171    OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
1172    OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
1173    if (cu_->target64) {
1174      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetReg(kArg0, kRef),
1175                                 TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
1176    } else {
1177      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0, kRef),
1178                                 TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
1179    }
1180  }
1181
1182  call_state = LoadArgRegs(info, call_state, next_call_insn,
1183                           target_method, vtable_idx, direct_code, direct_method,
1184                           type, skip_this);
1185
1186  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1187                           direct_code, direct_method, type);
1188  if (pcrLabel) {
1189    if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
1190      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
1191    } else {
1192      *pcrLabel = nullptr;
1193      // In lieu of generating a check for kArg1 being null, we need to
1194      // perform a load when doing implicit checks.
1195      RegStorage tmp = AllocTemp();
1196      Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
1197      MarkPossibleNullPointerException(info->opt_flags);
1198      FreeTemp(tmp);
1199    }
1200  }
1201  return call_state;
1202}
1203
1204RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
1205  RegLocation res;
1206  if (info->result.location == kLocInvalid) {
1207    res = GetReturn(LocToRegClass(info->result));
1208  } else {
1209    res = info->result;
1210  }
1211  return res;
1212}
1213
1214RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
1215  RegLocation res;
1216  if (info->result.location == kLocInvalid) {
1217    res = GetReturnWide(kCoreReg);
1218  } else {
1219    res = info->result;
1220  }
1221  return res;
1222}
1223
1224bool Mir2Lir::GenInlinedGet(CallInfo* info) {
1225  if (cu_->instruction_set == kMips) {
1226    // TODO - add Mips implementation
1227    return false;
1228  }
1229
1230  // the refrence class is stored in the image dex file which might not be the same as the cu's
1231  // dex file. Query the reference class for the image dex file then reset to starting dex file
1232  // in after loading class type.
1233  uint16_t type_idx = 0;
1234  const DexFile* ref_dex_file = nullptr;
1235  {
1236    ScopedObjectAccess soa(Thread::Current());
1237    type_idx = mirror::Reference::GetJavaLangRefReference()->GetDexTypeIndex();
1238    ref_dex_file = mirror::Reference::GetJavaLangRefReference()->GetDexCache()->GetDexFile();
1239  }
1240  CHECK(LIKELY(ref_dex_file != nullptr));
1241
1242  // address is either static within the image file, or needs to be patched up after compilation.
1243  bool unused_type_initialized;
1244  bool use_direct_type_ptr;
1245  uintptr_t direct_type_ptr;
1246  bool is_finalizable;
1247  const DexFile* old_dex = cu_->dex_file;
1248  cu_->dex_file = ref_dex_file;
1249  RegStorage reg_class = TargetPtrReg(kArg1);
1250  if (!cu_->compiler_driver->CanEmbedTypeInCode(*ref_dex_file, type_idx, &unused_type_initialized,
1251                                                &use_direct_type_ptr, &direct_type_ptr,
1252                                                &is_finalizable) || is_finalizable) {
1253    cu_->dex_file = old_dex;
1254    // address is not known and post-compile patch is not possible, cannot insert intrinsic.
1255    return false;
1256  }
1257  if (use_direct_type_ptr) {
1258    LoadConstant(reg_class, direct_type_ptr);
1259  } else {
1260    LoadClassType(type_idx, kArg1);
1261  }
1262  cu_->dex_file = old_dex;
1263
1264  // get the offset for flags in reference class.
1265  uint32_t slow_path_flag_offset = 0;
1266  uint32_t disable_flag_offset = 0;
1267  {
1268    ScopedObjectAccess soa(Thread::Current());
1269    mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
1270    slow_path_flag_offset = reference_class->GetSlowPathFlagOffset().Uint32Value();
1271    disable_flag_offset = reference_class->GetDisableIntrinsicFlagOffset().Uint32Value();
1272  }
1273  CHECK(slow_path_flag_offset && disable_flag_offset &&
1274        (slow_path_flag_offset != disable_flag_offset));
1275
1276  // intrinsic logic start.
1277  RegLocation rl_obj = info->args[0];
1278  rl_obj = LoadValue(rl_obj);
1279
1280  RegStorage reg_slow_path = AllocTemp();
1281  RegStorage reg_disabled = AllocTemp();
1282  Load32Disp(reg_class, slow_path_flag_offset, reg_slow_path);
1283  Load32Disp(reg_class, disable_flag_offset, reg_disabled);
1284  OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
1285  FreeTemp(reg_disabled);
1286
1287  // if slow path, jump to JNI path target
1288  LIR* slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
1289  FreeTemp(reg_slow_path);
1290
1291  // slow path not enabled, simply load the referent of the reference object
1292  RegLocation rl_dest = InlineTarget(info);
1293  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1294  GenNullCheck(rl_obj.reg, info->opt_flags);
1295  LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
1296      kNotVolatile);
1297  MarkPossibleNullPointerException(info->opt_flags);
1298  StoreValue(rl_dest, rl_result);
1299
1300  LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
1301  AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
1302
1303  return true;
1304}
1305
1306bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
1307  if (cu_->instruction_set == kMips) {
1308    // TODO - add Mips implementation
1309    return false;
1310  }
1311  // Location of reference to data array
1312  int value_offset = mirror::String::ValueOffset().Int32Value();
1313  // Location of count
1314  int count_offset = mirror::String::CountOffset().Int32Value();
1315  // Starting offset within data array
1316  int offset_offset = mirror::String::OffsetOffset().Int32Value();
1317  // Start of char data with array_
1318  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1319
1320  RegLocation rl_obj = info->args[0];
1321  RegLocation rl_idx = info->args[1];
1322  rl_obj = LoadValue(rl_obj, kRefReg);
1323  // X86 wants to avoid putting a constant index into a register.
1324  if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) {
1325    rl_idx = LoadValue(rl_idx, kCoreReg);
1326  }
1327  RegStorage reg_max;
1328  GenNullCheck(rl_obj.reg, info->opt_flags);
1329  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
1330  LIR* range_check_branch = nullptr;
1331  RegStorage reg_off;
1332  RegStorage reg_ptr;
1333  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1334    reg_off = AllocTemp();
1335    reg_ptr = AllocTempRef();
1336    if (range_check) {
1337      reg_max = AllocTemp();
1338      Load32Disp(rl_obj.reg, count_offset, reg_max);
1339      MarkPossibleNullPointerException(info->opt_flags);
1340    }
1341    Load32Disp(rl_obj.reg, offset_offset, reg_off);
1342    MarkPossibleNullPointerException(info->opt_flags);
1343    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1344    if (range_check) {
1345      // Set up a slow path to allow retry in case of bounds violation */
1346      OpRegReg(kOpCmp, rl_idx.reg, reg_max);
1347      FreeTemp(reg_max);
1348      range_check_branch = OpCondBranch(kCondUge, nullptr);
1349    }
1350    OpRegImm(kOpAdd, reg_ptr, data_offset);
1351  } else {
1352    if (range_check) {
1353      // On x86, we can compare to memory directly
1354      // Set up a launch pad to allow retry in case of bounds violation */
1355      if (rl_idx.is_const) {
1356        range_check_branch = OpCmpMemImmBranch(
1357            kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
1358            mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr);
1359      } else {
1360        OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
1361        range_check_branch = OpCondBranch(kCondUge, nullptr);
1362      }
1363    }
1364    reg_off = AllocTemp();
1365    reg_ptr = AllocTempRef();
1366    Load32Disp(rl_obj.reg, offset_offset, reg_off);
1367    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1368  }
1369  if (rl_idx.is_const) {
1370    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1371  } else {
1372    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
1373  }
1374  FreeTemp(rl_obj.reg);
1375  if (rl_idx.location == kLocPhysReg) {
1376    FreeTemp(rl_idx.reg);
1377  }
1378  RegLocation rl_dest = InlineTarget(info);
1379  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1380  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1381    LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
1382  } else {
1383    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
1384  }
1385  FreeTemp(reg_off);
1386  FreeTemp(reg_ptr);
1387  StoreValue(rl_dest, rl_result);
1388  if (range_check) {
1389    DCHECK(range_check_branch != nullptr);
1390    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1391    AddIntrinsicSlowPath(info, range_check_branch);
1392  }
1393  return true;
1394}
1395
1396// Generates an inlined String.is_empty or String.length.
1397bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1398  if (cu_->instruction_set == kMips) {
1399    // TODO - add Mips implementation
1400    return false;
1401  }
1402  // dst = src.length();
1403  RegLocation rl_obj = info->args[0];
1404  rl_obj = LoadValue(rl_obj, kRefReg);
1405  RegLocation rl_dest = InlineTarget(info);
1406  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1407  GenNullCheck(rl_obj.reg, info->opt_flags);
1408  Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1409  MarkPossibleNullPointerException(info->opt_flags);
1410  if (is_empty) {
1411    // dst = (dst == 0);
1412    if (cu_->instruction_set == kThumb2) {
1413      RegStorage t_reg = AllocTemp();
1414      OpRegReg(kOpNeg, t_reg, rl_result.reg);
1415      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1416    } else if (cu_->instruction_set == kArm64) {
1417      OpRegImm(kOpSub, rl_result.reg, 1);
1418      OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31);
1419    } else {
1420      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1421      OpRegImm(kOpSub, rl_result.reg, 1);
1422      OpRegImm(kOpLsr, rl_result.reg, 31);
1423    }
1424  }
1425  StoreValue(rl_dest, rl_result);
1426  return true;
1427}
1428
1429bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1430  if (cu_->instruction_set == kMips) {
1431    // TODO - add Mips implementation.
1432    return false;
1433  }
1434  RegLocation rl_src_i = info->args[0];
1435  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1436  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1437  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1438  if (size == k64) {
1439    if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
1440      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
1441      StoreValueWide(rl_dest, rl_result);
1442      return true;
1443    }
1444    RegStorage r_i_low = rl_i.reg.GetLow();
1445    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1446      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1447      r_i_low = AllocTemp();
1448      OpRegCopy(r_i_low, rl_i.reg);
1449    }
1450    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1451    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1452    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1453      FreeTemp(r_i_low);
1454    }
1455    StoreValueWide(rl_dest, rl_result);
1456  } else {
1457    DCHECK(size == k32 || size == kSignedHalf);
1458    OpKind op = (size == k32) ? kOpRev : kOpRevsh;
1459    OpRegReg(op, rl_result.reg, rl_i.reg);
1460    StoreValue(rl_dest, rl_result);
1461  }
1462  return true;
1463}
1464
1465bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1466  if (cu_->instruction_set == kMips) {
1467    // TODO - add Mips implementation
1468    return false;
1469  }
1470  RegLocation rl_src = info->args[0];
1471  rl_src = LoadValue(rl_src, kCoreReg);
1472  RegLocation rl_dest = InlineTarget(info);
1473  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1474  RegStorage sign_reg = AllocTemp();
1475  // abs(x) = y<=x>>31, (x+y)^y.
1476  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1477  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1478  OpRegReg(kOpXor, rl_result.reg, sign_reg);
1479  StoreValue(rl_dest, rl_result);
1480  return true;
1481}
1482
1483bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1484  if (cu_->instruction_set == kMips) {
1485    // TODO - add Mips implementation
1486    return false;
1487  }
1488  RegLocation rl_src = info->args[0];
1489  rl_src = LoadValueWide(rl_src, kCoreReg);
1490  RegLocation rl_dest = InlineTargetWide(info);
1491  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1492
1493  // If on x86 or if we would clobber a register needed later, just copy the source first.
1494  if (cu_->instruction_set != kX86_64 &&
1495      (cu_->instruction_set == kX86 ||
1496       rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) {
1497    OpRegCopyWide(rl_result.reg, rl_src.reg);
1498    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1499        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1500        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1501        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1502      // Reuse source registers to avoid running out of temps.
1503      FreeTemp(rl_src.reg);
1504    }
1505    rl_src = rl_result;
1506  }
1507
1508  // abs(x) = y<=x>>31, (x+y)^y.
1509  RegStorage sign_reg;
1510  if (cu_->instruction_set == kX86_64) {
1511    sign_reg = AllocTempWide();
1512    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
1513    OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1514    OpRegReg(kOpXor, rl_result.reg, sign_reg);
1515  } else {
1516    sign_reg = AllocTemp();
1517    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1518    OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1519    OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1520    OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1521    OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1522  }
1523  FreeTemp(sign_reg);
1524  StoreValueWide(rl_dest, rl_result);
1525  return true;
1526}
1527
1528bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
1529  if (cu_->instruction_set == kMips) {
1530    // TODO - add Mips implementation
1531    return false;
1532  }
1533  RegLocation rl_src = info->args[0];
1534  rl_src = LoadValue(rl_src, kCoreReg);
1535  RegLocation rl_dest = InlineTarget(info);
1536  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1537  OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
1538  StoreValue(rl_dest, rl_result);
1539  return true;
1540}
1541
1542bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1543  // Currently implemented only for ARM64
1544  return false;
1545}
1546
1547bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
1548  // Currently implemented only for ARM64
1549  return false;
1550}
1551
1552bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
1553  if (cu_->instruction_set == kMips) {
1554    // TODO - add Mips implementation
1555    return false;
1556  }
1557  RegLocation rl_src = info->args[0];
1558  rl_src = LoadValueWide(rl_src, kCoreReg);
1559  RegLocation rl_dest = InlineTargetWide(info);
1560  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1561
1562  OpRegCopyWide(rl_result.reg, rl_src.reg);
1563  OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
1564  StoreValueWide(rl_dest, rl_result);
1565  return true;
1566}
1567
1568bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1569  if (cu_->instruction_set == kMips) {
1570    // TODO - add Mips implementation
1571    return false;
1572  }
1573  RegLocation rl_src = info->args[0];
1574  RegLocation rl_dest = InlineTarget(info);
1575  StoreValue(rl_dest, rl_src);
1576  return true;
1577}
1578
1579bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1580  if (cu_->instruction_set == kMips) {
1581    // TODO - add Mips implementation
1582    return false;
1583  }
1584  RegLocation rl_src = info->args[0];
1585  RegLocation rl_dest = InlineTargetWide(info);
1586  StoreValueWide(rl_dest, rl_src);
1587  return true;
1588}
1589
1590bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1591  return false;
1592}
1593
1594
1595/*
1596 * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1597 * otherwise bails to standard library code.
1598 */
1599bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1600  if (cu_->instruction_set == kMips) {
1601    // TODO - add Mips implementation
1602    return false;
1603  }
1604  if (cu_->instruction_set == kX86_64) {
1605    // TODO - add kX86_64 implementation
1606    return false;
1607  }
1608  RegLocation rl_obj = info->args[0];
1609  RegLocation rl_char = info->args[1];
1610  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1611    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1612    return false;
1613  }
1614
1615  ClobberCallerSave();
1616  LockCallTemps();  // Using fixed registers
1617  RegStorage reg_ptr = TargetReg(kArg0, kRef);
1618  RegStorage reg_char = TargetReg(kArg1, kNotWide);
1619  RegStorage reg_start = TargetReg(kArg2, kNotWide);
1620
1621  LoadValueDirectFixed(rl_obj, reg_ptr);
1622  LoadValueDirectFixed(rl_char, reg_char);
1623  if (zero_based) {
1624    LoadConstant(reg_start, 0);
1625  } else {
1626    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1627    LoadValueDirectFixed(rl_start, reg_start);
1628  }
1629  RegStorage r_tgt = cu_->target64 ?
1630      LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pIndexOf)) :
1631      LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pIndexOf));
1632  GenExplicitNullCheck(reg_ptr, info->opt_flags);
1633  LIR* high_code_point_branch =
1634      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1635  // NOTE: not a safepoint
1636  OpReg(kOpBlx, r_tgt);
1637  if (!rl_char.is_const) {
1638    // Add the slow path for code points beyond 0xFFFF.
1639    DCHECK(high_code_point_branch != nullptr);
1640    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1641    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1642    AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
1643  } else {
1644    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1645    DCHECK(high_code_point_branch == nullptr);
1646  }
1647  RegLocation rl_return = GetReturn(kCoreReg);
1648  RegLocation rl_dest = InlineTarget(info);
1649  StoreValue(rl_dest, rl_return);
1650  return true;
1651}
1652
1653/* Fast string.compareTo(Ljava/lang/string;)I. */
1654bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1655  if (cu_->instruction_set == kMips) {
1656    // TODO - add Mips implementation
1657    return false;
1658  }
1659  ClobberCallerSave();
1660  LockCallTemps();  // Using fixed registers
1661  RegStorage reg_this = TargetReg(kArg0, kRef);
1662  RegStorage reg_cmp = TargetReg(kArg1, kRef);
1663
1664  RegLocation rl_this = info->args[0];
1665  RegLocation rl_cmp = info->args[1];
1666  LoadValueDirectFixed(rl_this, reg_this);
1667  LoadValueDirectFixed(rl_cmp, reg_cmp);
1668  RegStorage r_tgt;
1669  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1670    if (cu_->target64) {
1671      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
1672    } else {
1673      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1674    }
1675  } else {
1676    r_tgt = RegStorage::InvalidReg();
1677  }
1678  GenExplicitNullCheck(reg_this, info->opt_flags);
1679  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1680  // TUNING: check if rl_cmp.s_reg_low is already null checked
1681  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1682  AddIntrinsicSlowPath(info, cmp_null_check_branch);
1683  // NOTE: not a safepoint
1684  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1685    OpReg(kOpBlx, r_tgt);
1686  } else {
1687    if (cu_->target64) {
1688      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
1689    } else {
1690      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1691    }
1692  }
1693  RegLocation rl_return = GetReturn(kCoreReg);
1694  RegLocation rl_dest = InlineTarget(info);
1695  StoreValue(rl_dest, rl_return);
1696  return true;
1697}
1698
1699bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1700  RegLocation rl_dest = InlineTarget(info);
1701
1702  // Early exit if the result is unused.
1703  if (rl_dest.orig_sreg < 0) {
1704    return true;
1705  }
1706
1707  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1708
1709  switch (cu_->instruction_set) {
1710    case kArm:
1711      // Fall-through.
1712    case kThumb2:
1713      // Fall-through.
1714    case kMips:
1715      Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
1716      break;
1717
1718    case kArm64:
1719      LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
1720                  kNotVolatile);
1721      break;
1722
1723    case kX86:
1724      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
1725                                                          Thread::PeerOffset<4>());
1726      break;
1727
1728    case kX86_64:
1729      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
1730                                                          Thread::PeerOffset<8>());
1731      break;
1732
1733    default:
1734      LOG(FATAL) << "Unexpected isa " << cu_->instruction_set;
1735  }
1736  StoreValue(rl_dest, rl_result);
1737  return true;
1738}
1739
1740bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1741                                  bool is_long, bool is_volatile) {
1742  if (cu_->instruction_set == kMips) {
1743    // TODO - add Mips implementation
1744    return false;
1745  }
1746  // Unused - RegLocation rl_src_unsafe = info->args[0];
1747  RegLocation rl_src_obj = info->args[1];  // Object
1748  RegLocation rl_src_offset = info->args[2];  // long low
1749  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1750  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1751
1752  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1753  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1754  RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
1755  if (is_long) {
1756    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1757        || cu_->instruction_set == kArm64) {
1758      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
1759    } else {
1760      RegStorage rl_temp_offset = AllocTemp();
1761      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1762      LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
1763      FreeTemp(rl_temp_offset);
1764    }
1765  } else {
1766    if (rl_result.ref) {
1767      LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
1768    } else {
1769      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
1770    }
1771  }
1772
1773  if (is_volatile) {
1774    GenMemBarrier(kLoadAny);
1775  }
1776
1777  if (is_long) {
1778    StoreValueWide(rl_dest, rl_result);
1779  } else {
1780    StoreValue(rl_dest, rl_result);
1781  }
1782  return true;
1783}
1784
1785bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1786                                  bool is_object, bool is_volatile, bool is_ordered) {
1787  if (cu_->instruction_set == kMips) {
1788    // TODO - add Mips implementation
1789    return false;
1790  }
1791  // Unused - RegLocation rl_src_unsafe = info->args[0];
1792  RegLocation rl_src_obj = info->args[1];  // Object
1793  RegLocation rl_src_offset = info->args[2];  // long low
1794  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1795  RegLocation rl_src_value = info->args[4];  // value to store
1796  if (is_volatile || is_ordered) {
1797    GenMemBarrier(kAnyStore);
1798  }
1799  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1800  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1801  RegLocation rl_value;
1802  if (is_long) {
1803    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1804    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1805        || cu_->instruction_set == kArm64) {
1806      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
1807    } else {
1808      RegStorage rl_temp_offset = AllocTemp();
1809      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1810      StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
1811      FreeTemp(rl_temp_offset);
1812    }
1813  } else {
1814    rl_value = LoadValue(rl_src_value);
1815    if (rl_value.ref) {
1816      StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
1817    } else {
1818      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
1819    }
1820  }
1821
1822  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1823  FreeTemp(rl_offset.reg);
1824
1825  if (is_volatile) {
1826    // Prevent reordering with a subsequent volatile load.
1827    // May also be needed to address store atomicity issues.
1828    GenMemBarrier(kAnyAny);
1829  }
1830  if (is_object) {
1831    MarkGCCard(rl_value.reg, rl_object.reg);
1832  }
1833  return true;
1834}
1835
1836void Mir2Lir::GenInvoke(CallInfo* info) {
1837  if ((info->opt_flags & MIR_INLINED) != 0) {
1838    // Already inlined but we may still need the null check.
1839    if (info->type != kStatic &&
1840        ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
1841         (info->opt_flags & MIR_IGNORE_NULL_CHECK) == 0))  {
1842      RegLocation rl_obj = LoadValue(info->args[0], kRefReg);
1843      GenNullCheck(rl_obj.reg);
1844    }
1845    return;
1846  }
1847  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1848  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1849      ->GenIntrinsic(this, info)) {
1850    return;
1851  }
1852  GenInvokeNoInline(info);
1853}
1854
1855template <size_t pointer_size>
1856static LIR* GenInvokeNoInlineCall(Mir2Lir* mir_to_lir, InvokeType type) {
1857  ThreadOffset<pointer_size> trampoline(-1);
1858  switch (type) {
1859    case kInterface:
1860      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeInterfaceTrampolineWithAccessCheck);
1861      break;
1862    case kDirect:
1863      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeDirectTrampolineWithAccessCheck);
1864      break;
1865    case kStatic:
1866      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeStaticTrampolineWithAccessCheck);
1867      break;
1868    case kSuper:
1869      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeSuperTrampolineWithAccessCheck);
1870      break;
1871    case kVirtual:
1872      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeVirtualTrampolineWithAccessCheck);
1873      break;
1874    default:
1875      LOG(FATAL) << "Unexpected invoke type";
1876  }
1877  return mir_to_lir->OpThreadMem(kOpBlx, trampoline);
1878}
1879
1880void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1881  int call_state = 0;
1882  LIR* null_ck;
1883  LIR** p_null_ck = NULL;
1884  NextCallInsn next_call_insn;
1885  FlushAllRegs();  /* Everything to home location */
1886  // Explicit register usage
1887  LockCallTemps();
1888
1889  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1890  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1891  BeginInvoke(info);
1892  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1893  info->type = static_cast<InvokeType>(method_info.GetSharpType());
1894  bool fast_path = method_info.FastPath();
1895  bool skip_this;
1896  if (info->type == kInterface) {
1897    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1898    skip_this = fast_path;
1899  } else if (info->type == kDirect) {
1900    if (fast_path) {
1901      p_null_ck = &null_ck;
1902    }
1903    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1904    skip_this = false;
1905  } else if (info->type == kStatic) {
1906    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1907    skip_this = false;
1908  } else if (info->type == kSuper) {
1909    DCHECK(!fast_path);  // Fast path is a direct call.
1910    next_call_insn = NextSuperCallInsnSP;
1911    skip_this = false;
1912  } else {
1913    DCHECK_EQ(info->type, kVirtual);
1914    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1915    skip_this = fast_path;
1916  }
1917  MethodReference target_method = method_info.GetTargetMethod();
1918  if (!info->is_range) {
1919    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1920                                      next_call_insn, target_method, method_info.VTableIndex(),
1921                                      method_info.DirectCode(), method_info.DirectMethod(),
1922                                      original_type, skip_this);
1923  } else {
1924    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1925                                    next_call_insn, target_method, method_info.VTableIndex(),
1926                                    method_info.DirectCode(), method_info.DirectMethod(),
1927                                    original_type, skip_this);
1928  }
1929  // Finish up any of the call sequence not interleaved in arg loading
1930  while (call_state >= 0) {
1931    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1932                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
1933  }
1934  LIR* call_inst;
1935  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1936    call_inst = OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
1937  } else {
1938    if (fast_path) {
1939      if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
1940        // We can have the linker fixup a call relative.
1941        call_inst =
1942          reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type);
1943      } else {
1944        call_inst = OpMem(kOpBlx, TargetReg(kArg0, kRef),
1945                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
1946      }
1947    } else {
1948      // TODO: Extract?
1949      if (cu_->target64) {
1950        call_inst = GenInvokeNoInlineCall<8>(this, info->type);
1951      } else {
1952        call_inst = GenInvokeNoInlineCall<4>(this, info->type);
1953      }
1954    }
1955  }
1956  EndInvoke(info);
1957  MarkSafepointPC(call_inst);
1958
1959  ClobberCallerSave();
1960  if (info->result.location != kLocInvalid) {
1961    // We have a following MOVE_RESULT - do it now.
1962    if (info->result.wide) {
1963      RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
1964      StoreValueWide(info->result, ret_loc);
1965    } else {
1966      RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
1967      StoreValue(info->result, ret_loc);
1968    }
1969  }
1970}
1971
1972}  // namespace art
1973