gen_invoke.cc revision 69dfe51b684dd9d510dbcb63295fe180f998efde
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
21#include "dex_file-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "invoke_type.h"
24#include "mirror/array.h"
25#include "mirror/class-inl.h"
26#include "mirror/dex_cache.h"
27#include "mirror/object_array-inl.h"
28#include "mirror/reference-inl.h"
29#include "mirror/string.h"
30#include "mir_to_lir-inl.h"
31#include "scoped_thread_state_change.h"
32#include "x86/codegen_x86.h"
33
34namespace art {
35
36// Shortcuts to repeatedly used long types.
37typedef mirror::ObjectArray<mirror::Object> ObjArray;
38
39/*
40 * This source files contains "gen" codegen routines that should
41 * be applicable to most targets.  Only mid-level support utilities
42 * and "op" calls may be used here.
43 */
44
45void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
46  class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
47   public:
48    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
49        : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
50    }
51
52    void Compile() {
53      m2l_->ResetRegPool();
54      m2l_->ResetDefTracking();
55      GenerateTargetLabel(kPseudoIntrinsicRetry);
56      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
57      m2l_->GenInvokeNoInline(info_);
58      if (cont_ != nullptr) {
59        m2l_->OpUnconditionalBranch(cont_);
60      }
61    }
62
63   private:
64    CallInfo* const info_;
65  };
66
67  AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
68}
69
70// Macro to help instantiate.
71// TODO: This might be used to only instantiate <4> on pure 32b systems.
72#define INSTANTIATE(sig_part1, ...) \
73  template sig_part1(ThreadOffset<4>, __VA_ARGS__); \
74  template sig_part1(ThreadOffset<8>, __VA_ARGS__); \
75
76
77/*
78 * To save scheduling time, helper calls are broken into two parts: generation of
79 * the helper target address, and the actual call to the helper.  Because x86
80 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
81 * load arguments between the two parts.
82 */
83// template <size_t pointer_size>
84RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) {
85  // All CallRuntimeHelperXXX call this first. So make a central check here.
86  DCHECK_EQ(4U, GetInstructionSetPointerSize(cu_->instruction_set));
87
88  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
89    return RegStorage::InvalidReg();
90  } else {
91    return LoadHelper(helper_offset);
92  }
93}
94
95RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<8> helper_offset) {
96  // All CallRuntimeHelperXXX call this first. So make a central check here.
97  DCHECK_EQ(8U, GetInstructionSetPointerSize(cu_->instruction_set));
98
99  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
100    return RegStorage::InvalidReg();
101  } else {
102    return LoadHelper(helper_offset);
103  }
104}
105
106/* NOTE: if r_tgt is a temp, it will be freed following use */
107template <size_t pointer_size>
108LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset,
109                         bool safepoint_pc, bool use_link) {
110  LIR* call_inst;
111  OpKind op = use_link ? kOpBlx : kOpBx;
112  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
113    call_inst = OpThreadMem(op, helper_offset);
114  } else {
115    call_inst = OpReg(op, r_tgt);
116    FreeTemp(r_tgt);
117  }
118  if (safepoint_pc) {
119    MarkSafepointPC(call_inst);
120  }
121  return call_inst;
122}
123template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<4> helper_offset,
124                                        bool safepoint_pc, bool use_link);
125template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<8> helper_offset,
126                                        bool safepoint_pc, bool use_link);
127
128template <size_t pointer_size>
129void Mir2Lir::CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc) {
130  RegStorage r_tgt = CallHelperSetup(helper_offset);
131  ClobberCallerSave();
132  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
133}
134INSTANTIATE(void Mir2Lir::CallRuntimeHelper, bool safepoint_pc)
135
136template <size_t pointer_size>
137void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0,
138                                   bool safepoint_pc) {
139  RegStorage r_tgt = CallHelperSetup(helper_offset);
140  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
141  ClobberCallerSave();
142  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
143}
144INSTANTIATE(void Mir2Lir::CallRuntimeHelperImm, int arg0, bool safepoint_pc)
145
146template <size_t pointer_size>
147void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
148                                   bool safepoint_pc) {
149  RegStorage r_tgt = CallHelperSetup(helper_offset);
150  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
151  ClobberCallerSave();
152  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
153}
154INSTANTIATE(void Mir2Lir::CallRuntimeHelperReg, RegStorage arg0, bool safepoint_pc)
155
156template <size_t pointer_size>
157void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset,
158                                           RegLocation arg0, bool safepoint_pc) {
159  RegStorage r_tgt = CallHelperSetup(helper_offset);
160  if (arg0.wide == 0) {
161    LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
162  } else {
163    LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
164  }
165  ClobberCallerSave();
166  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
167}
168INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocation, RegLocation arg0, bool safepoint_pc)
169
170template <size_t pointer_size>
171void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1,
172                                      bool safepoint_pc) {
173  RegStorage r_tgt = CallHelperSetup(helper_offset);
174  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
175  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
176  ClobberCallerSave();
177  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
178}
179INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmImm, int arg0, int arg1, bool safepoint_pc)
180
181template <size_t pointer_size>
182void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
183                                              RegLocation arg1, bool safepoint_pc) {
184  RegStorage r_tgt = CallHelperSetup(helper_offset);
185  DCHECK(!arg1.fp);
186  if (arg1.wide == 0) {
187    LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
188  } else {
189    RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
190    LoadValueDirectWideFixed(arg1, r_tmp);
191  }
192  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
193  ClobberCallerSave();
194  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
195}
196INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocation, int arg0, RegLocation arg1,
197            bool safepoint_pc)
198
199template <size_t pointer_size>
200void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset,
201                                              RegLocation arg0, int arg1, bool safepoint_pc) {
202  RegStorage r_tgt = CallHelperSetup(helper_offset);
203  DCHECK(!arg0.wide);
204  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
205  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
206  ClobberCallerSave();
207  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
208}
209INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationImm, RegLocation arg0, int arg1,
210            bool safepoint_pc)
211
212template <size_t pointer_size>
213void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0,
214                                      RegStorage arg1, bool safepoint_pc) {
215  RegStorage r_tgt = CallHelperSetup(helper_offset);
216  OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
217  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
218  ClobberCallerSave();
219  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
220}
221INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmReg, int arg0, RegStorage arg1, bool safepoint_pc)
222
223template <size_t pointer_size>
224void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
225                                      int arg1, bool safepoint_pc) {
226  RegStorage r_tgt = CallHelperSetup(helper_offset);
227  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
228  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
229  ClobberCallerSave();
230  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
231}
232INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegImm, RegStorage arg0, int arg1, bool safepoint_pc)
233
234template <size_t pointer_size>
235void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0,
236                                         bool safepoint_pc) {
237  RegStorage r_tgt = CallHelperSetup(helper_offset);
238  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
239  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
240  ClobberCallerSave();
241  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
242}
243INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethod, int arg0, bool safepoint_pc)
244
245template <size_t pointer_size>
246void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
247                                         bool safepoint_pc) {
248  RegStorage r_tgt = CallHelperSetup(helper_offset);
249  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
250  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
251  if (r_tmp.NotExactlyEquals(arg0)) {
252    OpRegCopy(r_tmp, arg0);
253  }
254  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
255  ClobberCallerSave();
256  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
257}
258INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethod, RegStorage arg0, bool safepoint_pc)
259
260template <size_t pointer_size>
261void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
262                                                    RegStorage arg0, RegLocation arg2,
263                                                    bool safepoint_pc) {
264  RegStorage r_tgt = CallHelperSetup(helper_offset);
265  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
266  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
267  if (r_tmp.NotExactlyEquals(arg0)) {
268    OpRegCopy(r_tmp, arg0);
269  }
270  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
271  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
272  ClobberCallerSave();
273  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
274}
275INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethodRegLocation, RegStorage arg0, RegLocation arg2,
276            bool safepoint_pc)
277
278template <size_t pointer_size>
279void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
280                                                      RegLocation arg0, RegLocation arg1,
281                                                      bool safepoint_pc) {
282  RegStorage r_tgt = CallHelperSetup(helper_offset);
283  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
284    RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
285
286    RegStorage arg1_reg;
287    if (arg1.fp == arg0.fp) {
288      arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
289    } else {
290      arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
291    }
292
293    if (arg0.wide == 0) {
294      LoadValueDirectFixed(arg0, arg0_reg);
295    } else {
296      LoadValueDirectWideFixed(arg0, arg0_reg);
297    }
298
299    if (arg1.wide == 0) {
300      LoadValueDirectFixed(arg1, arg1_reg);
301    } else {
302      LoadValueDirectWideFixed(arg1, arg1_reg);
303    }
304  } else {
305    DCHECK(!cu_->target64);
306    if (arg0.wide == 0) {
307      LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
308      if (arg1.wide == 0) {
309        if (cu_->instruction_set == kMips) {
310          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide));
311        } else {
312          LoadValueDirectFixed(arg1, TargetReg(kArg1, kNotWide));
313        }
314      } else {
315        if (cu_->instruction_set == kMips) {
316          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
317        } else {
318          LoadValueDirectWideFixed(arg1, TargetReg(kArg1, kWide));
319        }
320      }
321    } else {
322      LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
323      if (arg1.wide == 0) {
324        LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
325      } else {
326        LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
327      }
328    }
329  }
330  ClobberCallerSave();
331  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
332}
333INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
334            RegLocation arg1, bool safepoint_pc)
335
336void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
337  WideKind arg0_kind = arg0.GetWideKind();
338  WideKind arg1_kind = arg1.GetWideKind();
339  if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) {
340    if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) {
341      // Swap kArg0 and kArg1 with kArg2 as temp.
342      OpRegCopy(TargetReg(kArg2, arg1_kind), arg1);
343      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
344      OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind));
345    } else {
346      OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
347      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
348    }
349  } else {
350    OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
351    OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
352  }
353}
354
355template <size_t pointer_size>
356void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
357                                      RegStorage arg1, bool safepoint_pc) {
358  RegStorage r_tgt = CallHelperSetup(helper_offset);
359  CopyToArgumentRegs(arg0, arg1);
360  ClobberCallerSave();
361  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
362}
363INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegReg, RegStorage arg0, RegStorage arg1,
364            bool safepoint_pc)
365
366template <size_t pointer_size>
367void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
368                                         RegStorage arg1, int arg2, bool safepoint_pc) {
369  RegStorage r_tgt = CallHelperSetup(helper_offset);
370  CopyToArgumentRegs(arg0, arg1);
371  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
372  ClobberCallerSave();
373  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
374}
375INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegRegImm, RegStorage arg0, RegStorage arg1, int arg2,
376            bool safepoint_pc)
377
378template <size_t pointer_size>
379void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
380                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
381  RegStorage r_tgt = CallHelperSetup(helper_offset);
382  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
383  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
384  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
385  ClobberCallerSave();
386  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
387}
388INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodRegLocation, int arg0, RegLocation arg2,
389            bool safepoint_pc)
390
391template <size_t pointer_size>
392void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0,
393                                            int arg2, bool safepoint_pc) {
394  RegStorage r_tgt = CallHelperSetup(helper_offset);
395  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
396  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
397  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
398  ClobberCallerSave();
399  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
400}
401INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodImm, int arg0, int arg2, bool safepoint_pc)
402
403template <size_t pointer_size>
404void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
405                                                         int arg0, RegLocation arg1,
406                                                         RegLocation arg2, bool safepoint_pc) {
407  RegStorage r_tgt = CallHelperSetup(helper_offset);
408  DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
409                                                        // instantiation bug in GCC.
410  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
411  if (arg2.wide == 0) {
412    LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
413  } else {
414    LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide));
415  }
416  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
417  ClobberCallerSave();
418  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
419}
420INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation, int arg0, RegLocation arg1,
421            RegLocation arg2, bool safepoint_pc)
422
423template <size_t pointer_size>
424void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
425    ThreadOffset<pointer_size> helper_offset,
426    RegLocation arg0,
427    RegLocation arg1,
428    RegLocation arg2,
429    bool safepoint_pc) {
430  RegStorage r_tgt = CallHelperSetup(helper_offset);
431  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
432  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
433  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
434  ClobberCallerSave();
435  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
436}
437INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation, RegLocation arg0,
438            RegLocation arg1, RegLocation arg2, bool safepoint_pc)
439
440/*
441 * If there are any ins passed in registers that have not been promoted
442 * to a callee-save register, flush them to the frame.  Perform initial
443 * assignment of promoted arguments.
444 *
445 * ArgLocs is an array of location records describing the incoming arguments
446 * with one location record per word of argument.
447 */
448void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
449  /*
450   * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
451   * It will attempt to keep kArg0 live (or copy it to home location
452   * if promoted).
453   */
454  RegLocation rl_src = rl_method;
455  rl_src.location = kLocPhysReg;
456  rl_src.reg = TargetReg(kArg0, kRef);
457  rl_src.home = false;
458  MarkLive(rl_src);
459  StoreValue(rl_method, rl_src);
460  // If Method* has been promoted, explicitly flush
461  if (rl_method.location == kLocPhysReg) {
462    StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
463  }
464
465  if (cu_->num_ins == 0) {
466    return;
467  }
468
469  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
470  /*
471   * Copy incoming arguments to their proper home locations.
472   * NOTE: an older version of dx had an issue in which
473   * it would reuse static method argument registers.
474   * This could result in the same Dalvik virtual register
475   * being promoted to both core and fp regs. To account for this,
476   * we only copy to the corresponding promoted physical register
477   * if it matches the type of the SSA name for the incoming
478   * argument.  It is also possible that long and double arguments
479   * end up half-promoted.  In those cases, we must flush the promoted
480   * half to memory as well.
481   */
482  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
483  for (int i = 0; i < cu_->num_ins; i++) {
484    PromotionMap* v_map = &promotion_map_[start_vreg + i];
485    RegStorage reg = GetArgMappingToPhysicalReg(i);
486
487    if (reg.Valid()) {
488      // If arriving in register
489      bool need_flush = true;
490      RegLocation* t_loc = &ArgLocs[i];
491      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
492        OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
493        need_flush = false;
494      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
495        OpRegCopy(RegStorage::Solo32(v_map->fp_reg), reg);
496        need_flush = false;
497      } else {
498        need_flush = true;
499      }
500
501      // For wide args, force flush if not fully promoted
502      if (t_loc->wide) {
503        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
504        // Is only half promoted?
505        need_flush |= (p_map->core_location != v_map->core_location) ||
506            (p_map->fp_location != v_map->fp_location);
507        if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
508          /*
509           * In Arm, a double is represented as a pair of consecutive single float
510           * registers starting at an even number.  It's possible that both Dalvik vRegs
511           * representing the incoming double were independently promoted as singles - but
512           * not in a form usable as a double.  If so, we need to flush - even though the
513           * incoming arg appears fully in register.  At this point in the code, both
514           * halves of the double are promoted.  Make sure they are in a usable form.
515           */
516          int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
517          int low_reg = promotion_map_[lowreg_index].fp_reg;
518          int high_reg = promotion_map_[lowreg_index + 1].fp_reg;
519          if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
520            need_flush = true;
521          }
522        }
523      }
524      if (need_flush) {
525        Store32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg);
526      }
527    } else {
528      // If arriving in frame & promoted
529      if (v_map->core_location == kLocPhysReg) {
530        Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i),
531                   RegStorage::Solo32(v_map->core_reg));
532      }
533      if (v_map->fp_location == kLocPhysReg) {
534        Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i),
535                   RegStorage::Solo32(v_map->fp_reg));
536      }
537    }
538  }
539}
540
541static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) {
542  RegLocation rl_arg = info->args[0];
543  cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef));
544}
545
546static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
547  cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags);
548  // get this->klass_ [use kArg1, set kArg0]
549  cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(),
550                  cg->TargetReg(kArg0, kRef),
551                  kNotVolatile);
552  cg->MarkPossibleNullPointerException(info->opt_flags);
553}
554
555static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
556                                                       const RegStorage* alt_from,
557                                                       const CompilationUnit* cu, Mir2Lir* cg) {
558  if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
559    // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
560    cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from,
561                     mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
562                     cg->TargetPtrReg(kInvokeTgt));
563    return true;
564  }
565  return false;
566}
567
568/*
569 * Bit of a hack here - in the absence of a real scheduling pass,
570 * emit the next instruction in static & direct invoke sequences.
571 */
572static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
573                          int state, const MethodReference& target_method,
574                          uint32_t unused,
575                          uintptr_t direct_code, uintptr_t direct_method,
576                          InvokeType type) {
577  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
578  if (direct_code != 0 && direct_method != 0) {
579    switch (state) {
580    case 0:  // Get the current Method* [sets kArg0]
581      if (direct_code != static_cast<uintptr_t>(-1)) {
582        if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
583          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
584        }
585      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
586        cg->LoadCodeAddress(target_method, type, kInvokeTgt);
587      }
588      if (direct_method != static_cast<uintptr_t>(-1)) {
589        cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
590      } else {
591        cg->LoadMethodAddress(target_method, type, kArg0);
592      }
593      break;
594    default:
595      return -1;
596    }
597  } else {
598    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
599    switch (state) {
600    case 0:  // Get the current Method* [sets kArg0]
601      // TUNING: we can save a reg copy if Method* has been promoted.
602      cg->LoadCurrMethodDirect(arg0_ref);
603      break;
604    case 1:  // Get method->dex_cache_resolved_methods_
605      cg->LoadRefDisp(arg0_ref,
606                      mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
607                      arg0_ref,
608                      kNotVolatile);
609      // Set up direct code if known.
610      if (direct_code != 0) {
611        if (direct_code != static_cast<uintptr_t>(-1)) {
612          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
613        } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
614          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
615          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
616        }
617      }
618      break;
619    case 2:  // Grab target method*
620      CHECK_EQ(cu->dex_file, target_method.dex_file);
621      cg->LoadRefDisp(arg0_ref,
622                      ObjArray::OffsetOfElement(target_method.dex_method_index).Int32Value(),
623                      arg0_ref,
624                      kNotVolatile);
625      break;
626    case 3:  // Grab the code from the method*
627      if (direct_code == 0) {
628        if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) {
629          break;                                    // kInvokeTgt := arg0_ref->entrypoint
630        }
631      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
632        break;
633      }
634      // Intentional fallthrough for x86
635    default:
636      return -1;
637    }
638  }
639  return state + 1;
640}
641
642/*
643 * Bit of a hack here - in the absence of a real scheduling pass,
644 * emit the next instruction in a virtual invoke sequence.
645 * We can use kLr as a temp prior to target address loading
646 * Note also that we'll load the first argument ("this") into
647 * kArg1 here rather than the standard LoadArgRegs.
648 */
649static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
650                         int state, const MethodReference& target_method,
651                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
652                         InvokeType unused3) {
653  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
654  /*
655   * This is the fast path in which the target virtual method is
656   * fully resolved at compile time.
657   */
658  switch (state) {
659    case 0:
660      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
661      break;
662    case 1:
663      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
664                                                  // Includes a null-check.
665      break;
666    case 2: {
667      // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0]
668      int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
669          method_idx * sizeof(mirror::Class::VTableEntry);
670      // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0]
671      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
672      break;
673    }
674    case 3:
675      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
676        break;                                    // kInvokeTgt := kArg0->entrypoint
677      }
678      // Intentional fallthrough for X86
679    default:
680      return -1;
681  }
682  return state + 1;
683}
684
685/*
686 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
687 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
688 * more than one interface method map to the same index. Note also that we'll load the first
689 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
690 */
691static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
692                                 const MethodReference& target_method,
693                                 uint32_t method_idx, uintptr_t unused,
694                                 uintptr_t direct_method, InvokeType unused2) {
695  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
696
697  switch (state) {
698    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
699      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
700      cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index);
701      if (cu->instruction_set == kX86) {
702        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide));
703      }
704      break;
705    case 1:
706      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
707      break;
708    case 2:
709      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
710                                                  // Includes a null-check.
711      break;
712    case 3: {  // Get target method [use kInvokeTgt, set kArg0]
713      int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
714          (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
715      // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0]
716      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
717      break;
718    }
719    case 4:
720      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
721        break;                                    // kInvokeTgt := kArg0->entrypoint
722      }
723      // Intentional fallthrough for X86
724    default:
725      return -1;
726  }
727  return state + 1;
728}
729
730template <size_t pointer_size>
731static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
732                            ThreadOffset<pointer_size> trampoline, int state,
733                            const MethodReference& target_method, uint32_t method_idx) {
734  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
735  /*
736   * This handles the case in which the base method is not fully
737   * resolved at compile time, we bail to a runtime helper.
738   */
739  if (state == 0) {
740    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
741      // Load trampoline target
742      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(),
743                       cg->TargetPtrReg(kInvokeTgt));
744    }
745    // Load kArg0 with method index
746    CHECK_EQ(cu->dex_file, target_method.dex_file);
747    cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index);
748    return 1;
749  }
750  return -1;
751}
752
753static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
754                                int state,
755                                const MethodReference& target_method,
756                                uint32_t unused, uintptr_t unused2,
757                                uintptr_t unused3, InvokeType unused4) {
758  if (cu->target64) {
759    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeStaticTrampolineWithAccessCheck);
760    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
761  } else {
762    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
763    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
764  }
765}
766
767static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
768                                const MethodReference& target_method,
769                                uint32_t unused, uintptr_t unused2,
770                                uintptr_t unused3, InvokeType unused4) {
771  if (cu->target64) {
772    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeDirectTrampolineWithAccessCheck);
773    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
774  } else {
775    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
776    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
777  }
778}
779
780static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
781                               const MethodReference& target_method,
782                               uint32_t unused, uintptr_t unused2,
783                               uintptr_t unused3, InvokeType unused4) {
784  if (cu->target64) {
785    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeSuperTrampolineWithAccessCheck);
786    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
787  } else {
788    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
789    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
790  }
791}
792
793static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
794                           const MethodReference& target_method,
795                           uint32_t unused, uintptr_t unused2,
796                           uintptr_t unused3, InvokeType unused4) {
797  if (cu->target64) {
798    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
799        pInvokeVirtualTrampolineWithAccessCheck);
800    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
801  } else {
802    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
803        pInvokeVirtualTrampolineWithAccessCheck);
804    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
805  }
806}
807
808static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
809                                                CallInfo* info, int state,
810                                                const MethodReference& target_method,
811                                                uint32_t unused, uintptr_t unused2,
812                                                uintptr_t unused3, InvokeType unused4) {
813  if (cu->target64) {
814      ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
815          pInvokeInterfaceTrampolineWithAccessCheck);
816      return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
817    } else {
818      ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
819          pInvokeInterfaceTrampolineWithAccessCheck);
820      return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
821    }
822}
823
824int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
825                         NextCallInsn next_call_insn,
826                         const MethodReference& target_method,
827                         uint32_t vtable_idx, uintptr_t direct_code,
828                         uintptr_t direct_method, InvokeType type, bool skip_this) {
829  int last_arg_reg = 3 - 1;
830  int arg_regs[3] = {TargetReg(kArg1, kNotWide).GetReg(), TargetReg(kArg2, kNotWide).GetReg(),
831                     TargetReg(kArg3, kNotWide).GetReg()};
832
833  int next_reg = 0;
834  int next_arg = 0;
835  if (skip_this) {
836    next_reg++;
837    next_arg++;
838  }
839  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
840    RegLocation rl_arg = info->args[next_arg++];
841    rl_arg = UpdateRawLoc(rl_arg);
842    if (rl_arg.wide && (next_reg <= last_arg_reg - 1)) {
843      RegStorage r_tmp(RegStorage::k64BitPair, arg_regs[next_reg], arg_regs[next_reg + 1]);
844      LoadValueDirectWideFixed(rl_arg, r_tmp);
845      next_reg++;
846      next_arg++;
847    } else {
848      if (rl_arg.wide) {
849        rl_arg = NarrowRegLoc(rl_arg);
850        rl_arg.is_const = false;
851      }
852      LoadValueDirectFixed(rl_arg, RegStorage::Solo32(arg_regs[next_reg]));
853    }
854    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
855                                direct_code, direct_method, type);
856  }
857  return call_state;
858}
859
860/*
861 * Load up to 5 arguments, the first three of which will be in
862 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
863 * and as part of the load sequence, it must be replaced with
864 * the target method pointer.  Note, this may also be called
865 * for "range" variants if the number of arguments is 5 or fewer.
866 */
867int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
868                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
869                                  const MethodReference& target_method,
870                                  uint32_t vtable_idx, uintptr_t direct_code,
871                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
872  RegLocation rl_arg;
873
874  /* If no arguments, just return */
875  if (info->num_arg_words == 0)
876    return call_state;
877
878  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
879                              direct_code, direct_method, type);
880
881  DCHECK_LE(info->num_arg_words, 5);
882  if (info->num_arg_words > 3) {
883    int32_t next_use = 3;
884    // Detect special case of wide arg spanning arg3/arg4
885    RegLocation rl_use0 = info->args[0];
886    RegLocation rl_use1 = info->args[1];
887    RegLocation rl_use2 = info->args[2];
888    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) && rl_use2.wide) {
889      RegStorage reg;
890      // Wide spans, we need the 2nd half of uses[2].
891      rl_arg = UpdateLocWide(rl_use2);
892      if (rl_arg.location == kLocPhysReg) {
893        if (rl_arg.reg.IsPair()) {
894          reg = rl_arg.reg.GetHigh();
895        } else {
896          RegisterInfo* info = GetRegInfo(rl_arg.reg);
897          info = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
898          if (info == nullptr) {
899            // NOTE: For hard float convention we won't split arguments across reg/mem.
900            UNIMPLEMENTED(FATAL) << "Needs hard float api.";
901          }
902          reg = info->GetReg();
903        }
904      } else {
905        // kArg2 & rArg3 can safely be used here
906        reg = TargetReg(kArg3, kNotWide);
907        {
908          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
909          Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
910        }
911        call_state = next_call_insn(cu_, info, call_state, target_method,
912                                    vtable_idx, direct_code, direct_method, type);
913      }
914      {
915        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
916        Store32Disp(TargetPtrReg(kSp), (next_use + 1) * 4, reg);
917      }
918      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
919                                  direct_code, direct_method, type);
920      next_use++;
921    }
922    // Loop through the rest
923    while (next_use < info->num_arg_words) {
924      RegStorage arg_reg;
925      rl_arg = info->args[next_use];
926      rl_arg = UpdateRawLoc(rl_arg);
927      if (rl_arg.location == kLocPhysReg) {
928        arg_reg = rl_arg.reg;
929      } else {
930        arg_reg = TargetReg(kArg2, rl_arg.wide ? kWide : kNotWide);
931        if (rl_arg.wide) {
932          LoadValueDirectWideFixed(rl_arg, arg_reg);
933        } else {
934          LoadValueDirectFixed(rl_arg, arg_reg);
935        }
936        call_state = next_call_insn(cu_, info, call_state, target_method,
937                                    vtable_idx, direct_code, direct_method, type);
938      }
939      int outs_offset = (next_use + 1) * 4;
940      {
941        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
942        if (rl_arg.wide) {
943          StoreBaseDisp(TargetPtrReg(kSp), outs_offset, arg_reg, k64, kNotVolatile);
944          next_use += 2;
945        } else {
946          Store32Disp(TargetPtrReg(kSp), outs_offset, arg_reg);
947          next_use++;
948        }
949      }
950      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
951                               direct_code, direct_method, type);
952    }
953  }
954
955  call_state = LoadArgRegs(info, call_state, next_call_insn,
956                           target_method, vtable_idx, direct_code, direct_method,
957                           type, skip_this);
958
959  if (pcrLabel) {
960    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
961      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
962    } else {
963      *pcrLabel = nullptr;
964      if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
965          (info->opt_flags & MIR_IGNORE_NULL_CHECK)) {
966        return call_state;
967      }
968      // In lieu of generating a check for kArg1 being null, we need to
969      // perform a load when doing implicit checks.
970      GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
971    }
972  }
973  return call_state;
974}
975
976// Default implementation of implicit null pointer check.
977// Overridden by arch specific as necessary.
978void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
979  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
980    return;
981  }
982  RegStorage tmp = AllocTemp();
983  Load32Disp(reg, 0, tmp);
984  MarkPossibleNullPointerException(opt_flags);
985  FreeTemp(tmp);
986}
987
988
989/*
990 * May have 0+ arguments (also used for jumbo).  Note that
991 * source virtual registers may be in physical registers, so may
992 * need to be flushed to home location before copying.  This
993 * applies to arg3 and above (see below).
994 *
995 * Two general strategies:
996 *    If < 20 arguments
997 *       Pass args 3-18 using vldm/vstm block copy
998 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
999 *    If 20+ arguments
1000 *       Pass args arg19+ using memcpy block copy
1001 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
1002 *
1003 */
1004int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
1005                                LIR** pcrLabel, NextCallInsn next_call_insn,
1006                                const MethodReference& target_method,
1007                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
1008                                InvokeType type, bool skip_this) {
1009  // If we can treat it as non-range (Jumbo ops will use range form)
1010  if (info->num_arg_words <= 5)
1011    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
1012                                next_call_insn, target_method, vtable_idx,
1013                                direct_code, direct_method, type, skip_this);
1014  /*
1015   * First load the non-register arguments.  Both forms expect all
1016   * of the source arguments to be in their home frame location, so
1017   * scan the s_reg names and flush any that have been promoted to
1018   * frame backing storage.
1019   */
1020  // Scan the rest of the args - if in phys_reg flush to memory
1021  for (int next_arg = 0; next_arg < info->num_arg_words;) {
1022    RegLocation loc = info->args[next_arg];
1023    if (loc.wide) {
1024      loc = UpdateLocWide(loc);
1025      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
1026        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1027        StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
1028      }
1029      next_arg += 2;
1030    } else {
1031      loc = UpdateLoc(loc);
1032      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
1033        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1034        Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
1035      }
1036      next_arg++;
1037    }
1038  }
1039
1040  // Logic below assumes that Method pointer is at offset zero from SP.
1041  DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
1042
1043  // The first 3 arguments are passed via registers.
1044  // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
1045  // get size of uintptr_t or size of object reference according to model being used.
1046  int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
1047  int start_offset = SRegOffset(info->args[3].s_reg_low);
1048  int regs_left_to_pass_via_stack = info->num_arg_words - 3;
1049  DCHECK_GT(regs_left_to_pass_via_stack, 0);
1050
1051  if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
1052    // Use vldm/vstm pair using kArg3 as a temp
1053    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1054                             direct_code, direct_method, type);
1055    OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), start_offset);
1056    LIR* ld = nullptr;
1057    {
1058      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1059      ld = OpVldm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack);
1060    }
1061    // TUNING: loosen barrier
1062    ld->u.m.def_mask = &kEncodeAll;
1063    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1064                             direct_code, direct_method, type);
1065    OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), 4 /* Method* */ + (3 * 4));
1066    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1067                             direct_code, direct_method, type);
1068    LIR* st = nullptr;
1069    {
1070      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1071      st = OpVstm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack);
1072    }
1073    st->u.m.def_mask = &kEncodeAll;
1074    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1075                             direct_code, direct_method, type);
1076  } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
1077    int current_src_offset = start_offset;
1078    int current_dest_offset = outs_offset;
1079
1080    // Only davik regs are accessed in this loop; no next_call_insn() calls.
1081    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1082    while (regs_left_to_pass_via_stack > 0) {
1083      // This is based on the knowledge that the stack itself is 16-byte aligned.
1084      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
1085      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
1086      size_t bytes_to_move;
1087
1088      /*
1089       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
1090       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
1091       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
1092       * We do this because we could potentially do a smaller move to align.
1093       */
1094      if (regs_left_to_pass_via_stack == 4 ||
1095          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
1096        // Moving 128-bits via xmm register.
1097        bytes_to_move = sizeof(uint32_t) * 4;
1098
1099        // Allocate a free xmm temp. Since we are working through the calling sequence,
1100        // we expect to have an xmm temporary available.  AllocTempDouble will abort if
1101        // there are no free registers.
1102        RegStorage temp = AllocTempDouble();
1103
1104        LIR* ld1 = nullptr;
1105        LIR* ld2 = nullptr;
1106        LIR* st1 = nullptr;
1107        LIR* st2 = nullptr;
1108
1109        /*
1110         * The logic is similar for both loads and stores. If we have 16-byte alignment,
1111         * do an aligned move. If we have 8-byte alignment, then do the move in two
1112         * parts. This approach prevents possible cache line splits. Finally, fall back
1113         * to doing an unaligned move. In most cases we likely won't split the cache
1114         * line but we cannot prove it and thus take a conservative approach.
1115         */
1116        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
1117        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
1118
1119        if (src_is_16b_aligned) {
1120          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovA128FP);
1121        } else if (src_is_8b_aligned) {
1122          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovLo128FP);
1123          ld2 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset + (bytes_to_move >> 1),
1124                            kMovHi128FP);
1125        } else {
1126          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovU128FP);
1127        }
1128
1129        if (dest_is_16b_aligned) {
1130          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovA128FP);
1131        } else if (dest_is_8b_aligned) {
1132          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovLo128FP);
1133          st2 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset + (bytes_to_move >> 1),
1134                            temp, kMovHi128FP);
1135        } else {
1136          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovU128FP);
1137        }
1138
1139        // TODO If we could keep track of aliasing information for memory accesses that are wider
1140        // than 64-bit, we wouldn't need to set up a barrier.
1141        if (ld1 != nullptr) {
1142          if (ld2 != nullptr) {
1143            // For 64-bit load we can actually set up the aliasing information.
1144            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
1145            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true,
1146                                    true);
1147          } else {
1148            // Set barrier for 128-bit load.
1149            ld1->u.m.def_mask = &kEncodeAll;
1150          }
1151        }
1152        if (st1 != nullptr) {
1153          if (st2 != nullptr) {
1154            // For 64-bit store we can actually set up the aliasing information.
1155            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
1156            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false,
1157                                    true);
1158          } else {
1159            // Set barrier for 128-bit store.
1160            st1->u.m.def_mask = &kEncodeAll;
1161          }
1162        }
1163
1164        // Free the temporary used for the data movement.
1165        FreeTemp(temp);
1166      } else {
1167        // Moving 32-bits via general purpose register.
1168        bytes_to_move = sizeof(uint32_t);
1169
1170        // Instead of allocating a new temp, simply reuse one of the registers being used
1171        // for argument passing.
1172        RegStorage temp = TargetReg(kArg3, kNotWide);
1173
1174        // Now load the argument VR and store to the outs.
1175        Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
1176        Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
1177      }
1178
1179      current_src_offset += bytes_to_move;
1180      current_dest_offset += bytes_to_move;
1181      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
1182    }
1183  } else {
1184    // Generate memcpy
1185    OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
1186    OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
1187    if (cu_->target64) {
1188      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetReg(kArg0, kRef),
1189                                 TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
1190    } else {
1191      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0, kRef),
1192                                 TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
1193    }
1194  }
1195
1196  call_state = LoadArgRegs(info, call_state, next_call_insn,
1197                           target_method, vtable_idx, direct_code, direct_method,
1198                           type, skip_this);
1199
1200  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1201                           direct_code, direct_method, type);
1202  if (pcrLabel) {
1203    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
1204      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
1205    } else {
1206      *pcrLabel = nullptr;
1207      if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
1208          (info->opt_flags & MIR_IGNORE_NULL_CHECK)) {
1209        return call_state;
1210      }
1211      // In lieu of generating a check for kArg1 being null, we need to
1212      // perform a load when doing implicit checks.
1213      GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
1214    }
1215  }
1216  return call_state;
1217}
1218
1219RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
1220  RegLocation res;
1221  if (info->result.location == kLocInvalid) {
1222    res = GetReturn(LocToRegClass(info->result));
1223  } else {
1224    res = info->result;
1225  }
1226  return res;
1227}
1228
1229RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
1230  RegLocation res;
1231  if (info->result.location == kLocInvalid) {
1232    res = GetReturnWide(kCoreReg);
1233  } else {
1234    res = info->result;
1235  }
1236  return res;
1237}
1238
1239bool Mir2Lir::GenInlinedGet(CallInfo* info) {
1240  if (cu_->instruction_set == kMips) {
1241    // TODO - add Mips implementation
1242    return false;
1243  }
1244
1245  // the refrence class is stored in the image dex file which might not be the same as the cu's
1246  // dex file. Query the reference class for the image dex file then reset to starting dex file
1247  // in after loading class type.
1248  uint16_t type_idx = 0;
1249  const DexFile* ref_dex_file = nullptr;
1250  {
1251    ScopedObjectAccess soa(Thread::Current());
1252    type_idx = mirror::Reference::GetJavaLangRefReference()->GetDexTypeIndex();
1253    ref_dex_file = mirror::Reference::GetJavaLangRefReference()->GetDexCache()->GetDexFile();
1254  }
1255  CHECK(LIKELY(ref_dex_file != nullptr));
1256
1257  // address is either static within the image file, or needs to be patched up after compilation.
1258  bool unused_type_initialized;
1259  bool use_direct_type_ptr;
1260  uintptr_t direct_type_ptr;
1261  bool is_finalizable;
1262  const DexFile* old_dex = cu_->dex_file;
1263  cu_->dex_file = ref_dex_file;
1264  RegStorage reg_class = TargetPtrReg(kArg1);
1265  if (!cu_->compiler_driver->CanEmbedTypeInCode(*ref_dex_file, type_idx, &unused_type_initialized,
1266                                                &use_direct_type_ptr, &direct_type_ptr,
1267                                                &is_finalizable) || is_finalizable) {
1268    cu_->dex_file = old_dex;
1269    // address is not known and post-compile patch is not possible, cannot insert intrinsic.
1270    return false;
1271  }
1272  if (use_direct_type_ptr) {
1273    LoadConstant(reg_class, direct_type_ptr);
1274  } else {
1275    LoadClassType(type_idx, kArg1);
1276  }
1277  cu_->dex_file = old_dex;
1278
1279  // get the offset for flags in reference class.
1280  uint32_t slow_path_flag_offset = 0;
1281  uint32_t disable_flag_offset = 0;
1282  {
1283    ScopedObjectAccess soa(Thread::Current());
1284    mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
1285    slow_path_flag_offset = reference_class->GetSlowPathFlagOffset().Uint32Value();
1286    disable_flag_offset = reference_class->GetDisableIntrinsicFlagOffset().Uint32Value();
1287  }
1288  CHECK(slow_path_flag_offset && disable_flag_offset &&
1289        (slow_path_flag_offset != disable_flag_offset));
1290
1291  // intrinsic logic start.
1292  RegLocation rl_obj = info->args[0];
1293  rl_obj = LoadValue(rl_obj);
1294
1295  RegStorage reg_slow_path = AllocTemp();
1296  RegStorage reg_disabled = AllocTemp();
1297  Load32Disp(reg_class, slow_path_flag_offset, reg_slow_path);
1298  Load32Disp(reg_class, disable_flag_offset, reg_disabled);
1299  OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
1300  FreeTemp(reg_disabled);
1301
1302  // if slow path, jump to JNI path target
1303  LIR* slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
1304  FreeTemp(reg_slow_path);
1305
1306  // slow path not enabled, simply load the referent of the reference object
1307  RegLocation rl_dest = InlineTarget(info);
1308  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1309  GenNullCheck(rl_obj.reg, info->opt_flags);
1310  LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
1311      kNotVolatile);
1312  MarkPossibleNullPointerException(info->opt_flags);
1313  StoreValue(rl_dest, rl_result);
1314
1315  LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
1316  AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
1317
1318  return true;
1319}
1320
1321bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
1322  if (cu_->instruction_set == kMips) {
1323    // TODO - add Mips implementation
1324    return false;
1325  }
1326  // Location of reference to data array
1327  int value_offset = mirror::String::ValueOffset().Int32Value();
1328  // Location of count
1329  int count_offset = mirror::String::CountOffset().Int32Value();
1330  // Starting offset within data array
1331  int offset_offset = mirror::String::OffsetOffset().Int32Value();
1332  // Start of char data with array_
1333  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1334
1335  RegLocation rl_obj = info->args[0];
1336  RegLocation rl_idx = info->args[1];
1337  rl_obj = LoadValue(rl_obj, kRefReg);
1338  // X86 wants to avoid putting a constant index into a register.
1339  if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) {
1340    rl_idx = LoadValue(rl_idx, kCoreReg);
1341  }
1342  RegStorage reg_max;
1343  GenNullCheck(rl_obj.reg, info->opt_flags);
1344  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
1345  LIR* range_check_branch = nullptr;
1346  RegStorage reg_off;
1347  RegStorage reg_ptr;
1348  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1349    reg_off = AllocTemp();
1350    reg_ptr = AllocTempRef();
1351    if (range_check) {
1352      reg_max = AllocTemp();
1353      Load32Disp(rl_obj.reg, count_offset, reg_max);
1354      MarkPossibleNullPointerException(info->opt_flags);
1355    }
1356    Load32Disp(rl_obj.reg, offset_offset, reg_off);
1357    MarkPossibleNullPointerException(info->opt_flags);
1358    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1359    if (range_check) {
1360      // Set up a slow path to allow retry in case of bounds violation */
1361      OpRegReg(kOpCmp, rl_idx.reg, reg_max);
1362      FreeTemp(reg_max);
1363      range_check_branch = OpCondBranch(kCondUge, nullptr);
1364    }
1365    OpRegImm(kOpAdd, reg_ptr, data_offset);
1366  } else {
1367    if (range_check) {
1368      // On x86, we can compare to memory directly
1369      // Set up a launch pad to allow retry in case of bounds violation */
1370      if (rl_idx.is_const) {
1371        LIR* comparison;
1372        range_check_branch = OpCmpMemImmBranch(
1373            kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
1374            mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
1375        MarkPossibleNullPointerExceptionAfter(0, comparison);
1376     } else {
1377        OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
1378        MarkPossibleNullPointerException(0);
1379        range_check_branch = OpCondBranch(kCondUge, nullptr);
1380      }
1381    }
1382    reg_off = AllocTemp();
1383    reg_ptr = AllocTempRef();
1384    Load32Disp(rl_obj.reg, offset_offset, reg_off);
1385    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1386  }
1387  if (rl_idx.is_const) {
1388    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1389  } else {
1390    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
1391  }
1392  FreeTemp(rl_obj.reg);
1393  if (rl_idx.location == kLocPhysReg) {
1394    FreeTemp(rl_idx.reg);
1395  }
1396  RegLocation rl_dest = InlineTarget(info);
1397  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1398  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1399    LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
1400  } else {
1401    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
1402  }
1403  FreeTemp(reg_off);
1404  FreeTemp(reg_ptr);
1405  StoreValue(rl_dest, rl_result);
1406  if (range_check) {
1407    DCHECK(range_check_branch != nullptr);
1408    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1409    AddIntrinsicSlowPath(info, range_check_branch);
1410  }
1411  return true;
1412}
1413
1414// Generates an inlined String.is_empty or String.length.
1415bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1416  if (cu_->instruction_set == kMips) {
1417    // TODO - add Mips implementation
1418    return false;
1419  }
1420  // dst = src.length();
1421  RegLocation rl_obj = info->args[0];
1422  rl_obj = LoadValue(rl_obj, kRefReg);
1423  RegLocation rl_dest = InlineTarget(info);
1424  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1425  GenNullCheck(rl_obj.reg, info->opt_flags);
1426  Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1427  MarkPossibleNullPointerException(info->opt_flags);
1428  if (is_empty) {
1429    // dst = (dst == 0);
1430    if (cu_->instruction_set == kThumb2) {
1431      RegStorage t_reg = AllocTemp();
1432      OpRegReg(kOpNeg, t_reg, rl_result.reg);
1433      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1434    } else if (cu_->instruction_set == kArm64) {
1435      OpRegImm(kOpSub, rl_result.reg, 1);
1436      OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31);
1437    } else {
1438      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1439      OpRegImm(kOpSub, rl_result.reg, 1);
1440      OpRegImm(kOpLsr, rl_result.reg, 31);
1441    }
1442  }
1443  StoreValue(rl_dest, rl_result);
1444  return true;
1445}
1446
1447bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1448  if (cu_->instruction_set == kMips) {
1449    // TODO - add Mips implementation.
1450    return false;
1451  }
1452  RegLocation rl_src_i = info->args[0];
1453  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1454  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1455  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1456  if (size == k64) {
1457    if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
1458      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
1459      StoreValueWide(rl_dest, rl_result);
1460      return true;
1461    }
1462    RegStorage r_i_low = rl_i.reg.GetLow();
1463    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1464      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1465      r_i_low = AllocTemp();
1466      OpRegCopy(r_i_low, rl_i.reg);
1467    }
1468    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1469    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1470    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1471      FreeTemp(r_i_low);
1472    }
1473    StoreValueWide(rl_dest, rl_result);
1474  } else {
1475    DCHECK(size == k32 || size == kSignedHalf);
1476    OpKind op = (size == k32) ? kOpRev : kOpRevsh;
1477    OpRegReg(op, rl_result.reg, rl_i.reg);
1478    StoreValue(rl_dest, rl_result);
1479  }
1480  return true;
1481}
1482
1483bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1484  if (cu_->instruction_set == kMips) {
1485    // TODO - add Mips implementation
1486    return false;
1487  }
1488  RegLocation rl_src = info->args[0];
1489  rl_src = LoadValue(rl_src, kCoreReg);
1490  RegLocation rl_dest = InlineTarget(info);
1491  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1492  RegStorage sign_reg = AllocTemp();
1493  // abs(x) = y<=x>>31, (x+y)^y.
1494  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1495  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1496  OpRegReg(kOpXor, rl_result.reg, sign_reg);
1497  StoreValue(rl_dest, rl_result);
1498  return true;
1499}
1500
1501bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1502  if (cu_->instruction_set == kMips) {
1503    // TODO - add Mips implementation
1504    return false;
1505  }
1506  RegLocation rl_src = info->args[0];
1507  rl_src = LoadValueWide(rl_src, kCoreReg);
1508  RegLocation rl_dest = InlineTargetWide(info);
1509  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1510
1511  // If on x86 or if we would clobber a register needed later, just copy the source first.
1512  if (cu_->instruction_set != kX86_64 &&
1513      (cu_->instruction_set == kX86 ||
1514       rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) {
1515    OpRegCopyWide(rl_result.reg, rl_src.reg);
1516    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1517        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1518        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1519        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1520      // Reuse source registers to avoid running out of temps.
1521      FreeTemp(rl_src.reg);
1522    }
1523    rl_src = rl_result;
1524  }
1525
1526  // abs(x) = y<=x>>31, (x+y)^y.
1527  RegStorage sign_reg;
1528  if (cu_->instruction_set == kX86_64) {
1529    sign_reg = AllocTempWide();
1530    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
1531    OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1532    OpRegReg(kOpXor, rl_result.reg, sign_reg);
1533  } else {
1534    sign_reg = AllocTemp();
1535    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1536    OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1537    OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1538    OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1539    OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1540  }
1541  FreeTemp(sign_reg);
1542  StoreValueWide(rl_dest, rl_result);
1543  return true;
1544}
1545
1546bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
1547  if (cu_->instruction_set == kMips) {
1548    // TODO - add Mips implementation
1549    return false;
1550  }
1551  RegLocation rl_src = info->args[0];
1552  rl_src = LoadValue(rl_src, kCoreReg);
1553  RegLocation rl_dest = InlineTarget(info);
1554  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1555  OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
1556  StoreValue(rl_dest, rl_result);
1557  return true;
1558}
1559
1560bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1561  // Currently implemented only for ARM64
1562  return false;
1563}
1564
1565bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
1566  // Currently implemented only for ARM64
1567  return false;
1568}
1569
1570bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
1571  if (cu_->instruction_set == kMips) {
1572    // TODO - add Mips implementation
1573    return false;
1574  }
1575  RegLocation rl_src = info->args[0];
1576  rl_src = LoadValueWide(rl_src, kCoreReg);
1577  RegLocation rl_dest = InlineTargetWide(info);
1578  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1579
1580  OpRegCopyWide(rl_result.reg, rl_src.reg);
1581  OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
1582  StoreValueWide(rl_dest, rl_result);
1583  return true;
1584}
1585
1586bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1587  if (cu_->instruction_set == kMips) {
1588    // TODO - add Mips implementation
1589    return false;
1590  }
1591  RegLocation rl_src = info->args[0];
1592  RegLocation rl_dest = InlineTarget(info);
1593  StoreValue(rl_dest, rl_src);
1594  return true;
1595}
1596
1597bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1598  if (cu_->instruction_set == kMips) {
1599    // TODO - add Mips implementation
1600    return false;
1601  }
1602  RegLocation rl_src = info->args[0];
1603  RegLocation rl_dest = InlineTargetWide(info);
1604  StoreValueWide(rl_dest, rl_src);
1605  return true;
1606}
1607
1608bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1609  return false;
1610}
1611
1612
1613/*
1614 * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1615 * otherwise bails to standard library code.
1616 */
1617bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1618  if (cu_->instruction_set == kMips) {
1619    // TODO - add Mips implementation
1620    return false;
1621  }
1622  if (cu_->instruction_set == kX86_64) {
1623    // TODO - add kX86_64 implementation
1624    return false;
1625  }
1626  RegLocation rl_obj = info->args[0];
1627  RegLocation rl_char = info->args[1];
1628  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1629    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1630    return false;
1631  }
1632
1633  ClobberCallerSave();
1634  LockCallTemps();  // Using fixed registers
1635  RegStorage reg_ptr = TargetReg(kArg0, kRef);
1636  RegStorage reg_char = TargetReg(kArg1, kNotWide);
1637  RegStorage reg_start = TargetReg(kArg2, kNotWide);
1638
1639  LoadValueDirectFixed(rl_obj, reg_ptr);
1640  LoadValueDirectFixed(rl_char, reg_char);
1641  if (zero_based) {
1642    LoadConstant(reg_start, 0);
1643  } else {
1644    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1645    LoadValueDirectFixed(rl_start, reg_start);
1646  }
1647  RegStorage r_tgt = cu_->target64 ?
1648      LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pIndexOf)) :
1649      LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pIndexOf));
1650  GenExplicitNullCheck(reg_ptr, info->opt_flags);
1651  LIR* high_code_point_branch =
1652      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1653  // NOTE: not a safepoint
1654  OpReg(kOpBlx, r_tgt);
1655  if (!rl_char.is_const) {
1656    // Add the slow path for code points beyond 0xFFFF.
1657    DCHECK(high_code_point_branch != nullptr);
1658    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1659    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1660    AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
1661  } else {
1662    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1663    DCHECK(high_code_point_branch == nullptr);
1664  }
1665  RegLocation rl_return = GetReturn(kCoreReg);
1666  RegLocation rl_dest = InlineTarget(info);
1667  StoreValue(rl_dest, rl_return);
1668  return true;
1669}
1670
1671/* Fast string.compareTo(Ljava/lang/string;)I. */
1672bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1673  if (cu_->instruction_set == kMips) {
1674    // TODO - add Mips implementation
1675    return false;
1676  }
1677  ClobberCallerSave();
1678  LockCallTemps();  // Using fixed registers
1679  RegStorage reg_this = TargetReg(kArg0, kRef);
1680  RegStorage reg_cmp = TargetReg(kArg1, kRef);
1681
1682  RegLocation rl_this = info->args[0];
1683  RegLocation rl_cmp = info->args[1];
1684  LoadValueDirectFixed(rl_this, reg_this);
1685  LoadValueDirectFixed(rl_cmp, reg_cmp);
1686  RegStorage r_tgt;
1687  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1688    if (cu_->target64) {
1689      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
1690    } else {
1691      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1692    }
1693  } else {
1694    r_tgt = RegStorage::InvalidReg();
1695  }
1696  GenExplicitNullCheck(reg_this, info->opt_flags);
1697  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1698  // TUNING: check if rl_cmp.s_reg_low is already null checked
1699  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1700  AddIntrinsicSlowPath(info, cmp_null_check_branch);
1701  // NOTE: not a safepoint
1702  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1703    OpReg(kOpBlx, r_tgt);
1704  } else {
1705    if (cu_->target64) {
1706      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
1707    } else {
1708      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1709    }
1710  }
1711  RegLocation rl_return = GetReturn(kCoreReg);
1712  RegLocation rl_dest = InlineTarget(info);
1713  StoreValue(rl_dest, rl_return);
1714  return true;
1715}
1716
1717bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1718  RegLocation rl_dest = InlineTarget(info);
1719
1720  // Early exit if the result is unused.
1721  if (rl_dest.orig_sreg < 0) {
1722    return true;
1723  }
1724
1725  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1726
1727  switch (cu_->instruction_set) {
1728    case kArm:
1729      // Fall-through.
1730    case kThumb2:
1731      // Fall-through.
1732    case kMips:
1733      Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
1734      break;
1735
1736    case kArm64:
1737      LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
1738                  kNotVolatile);
1739      break;
1740
1741    case kX86:
1742      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
1743                                                          Thread::PeerOffset<4>());
1744      break;
1745
1746    case kX86_64:
1747      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
1748                                                          Thread::PeerOffset<8>());
1749      break;
1750
1751    default:
1752      LOG(FATAL) << "Unexpected isa " << cu_->instruction_set;
1753  }
1754  StoreValue(rl_dest, rl_result);
1755  return true;
1756}
1757
1758bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1759                                  bool is_long, bool is_volatile) {
1760  if (cu_->instruction_set == kMips) {
1761    // TODO - add Mips implementation
1762    return false;
1763  }
1764  // Unused - RegLocation rl_src_unsafe = info->args[0];
1765  RegLocation rl_src_obj = info->args[1];  // Object
1766  RegLocation rl_src_offset = info->args[2];  // long low
1767  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1768  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1769
1770  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1771  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1772  RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
1773  if (is_long) {
1774    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1775        || cu_->instruction_set == kArm64) {
1776      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
1777    } else {
1778      RegStorage rl_temp_offset = AllocTemp();
1779      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1780      LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
1781      FreeTemp(rl_temp_offset);
1782    }
1783  } else {
1784    if (rl_result.ref) {
1785      LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
1786    } else {
1787      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
1788    }
1789  }
1790
1791  if (is_volatile) {
1792    GenMemBarrier(kLoadAny);
1793  }
1794
1795  if (is_long) {
1796    StoreValueWide(rl_dest, rl_result);
1797  } else {
1798    StoreValue(rl_dest, rl_result);
1799  }
1800  return true;
1801}
1802
1803bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1804                                  bool is_object, bool is_volatile, bool is_ordered) {
1805  if (cu_->instruction_set == kMips) {
1806    // TODO - add Mips implementation
1807    return false;
1808  }
1809  // Unused - RegLocation rl_src_unsafe = info->args[0];
1810  RegLocation rl_src_obj = info->args[1];  // Object
1811  RegLocation rl_src_offset = info->args[2];  // long low
1812  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1813  RegLocation rl_src_value = info->args[4];  // value to store
1814  if (is_volatile || is_ordered) {
1815    GenMemBarrier(kAnyStore);
1816  }
1817  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1818  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1819  RegLocation rl_value;
1820  if (is_long) {
1821    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1822    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1823        || cu_->instruction_set == kArm64) {
1824      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
1825    } else {
1826      RegStorage rl_temp_offset = AllocTemp();
1827      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1828      StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
1829      FreeTemp(rl_temp_offset);
1830    }
1831  } else {
1832    rl_value = LoadValue(rl_src_value);
1833    if (rl_value.ref) {
1834      StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
1835    } else {
1836      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
1837    }
1838  }
1839
1840  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1841  FreeTemp(rl_offset.reg);
1842
1843  if (is_volatile) {
1844    // Prevent reordering with a subsequent volatile load.
1845    // May also be needed to address store atomicity issues.
1846    GenMemBarrier(kAnyAny);
1847  }
1848  if (is_object) {
1849    MarkGCCard(rl_value.reg, rl_object.reg);
1850  }
1851  return true;
1852}
1853
1854void Mir2Lir::GenInvoke(CallInfo* info) {
1855  if ((info->opt_flags & MIR_INLINED) != 0) {
1856    // Already inlined but we may still need the null check.
1857    if (info->type != kStatic &&
1858        ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
1859         (info->opt_flags & MIR_IGNORE_NULL_CHECK) == 0))  {
1860      RegLocation rl_obj = LoadValue(info->args[0], kRefReg);
1861      GenNullCheck(rl_obj.reg);
1862    }
1863    return;
1864  }
1865  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1866  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1867      ->GenIntrinsic(this, info)) {
1868    return;
1869  }
1870  GenInvokeNoInline(info);
1871}
1872
1873template <size_t pointer_size>
1874static LIR* GenInvokeNoInlineCall(Mir2Lir* mir_to_lir, InvokeType type) {
1875  ThreadOffset<pointer_size> trampoline(-1);
1876  switch (type) {
1877    case kInterface:
1878      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeInterfaceTrampolineWithAccessCheck);
1879      break;
1880    case kDirect:
1881      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeDirectTrampolineWithAccessCheck);
1882      break;
1883    case kStatic:
1884      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeStaticTrampolineWithAccessCheck);
1885      break;
1886    case kSuper:
1887      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeSuperTrampolineWithAccessCheck);
1888      break;
1889    case kVirtual:
1890      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeVirtualTrampolineWithAccessCheck);
1891      break;
1892    default:
1893      LOG(FATAL) << "Unexpected invoke type";
1894  }
1895  return mir_to_lir->OpThreadMem(kOpBlx, trampoline);
1896}
1897
1898void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1899  int call_state = 0;
1900  LIR* null_ck;
1901  LIR** p_null_ck = NULL;
1902  NextCallInsn next_call_insn;
1903  FlushAllRegs();  /* Everything to home location */
1904  // Explicit register usage
1905  LockCallTemps();
1906
1907  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1908  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1909  BeginInvoke(info);
1910  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1911  info->type = static_cast<InvokeType>(method_info.GetSharpType());
1912  bool fast_path = method_info.FastPath();
1913  bool skip_this;
1914  if (info->type == kInterface) {
1915    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1916    skip_this = fast_path;
1917  } else if (info->type == kDirect) {
1918    if (fast_path) {
1919      p_null_ck = &null_ck;
1920    }
1921    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1922    skip_this = false;
1923  } else if (info->type == kStatic) {
1924    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1925    skip_this = false;
1926  } else if (info->type == kSuper) {
1927    DCHECK(!fast_path);  // Fast path is a direct call.
1928    next_call_insn = NextSuperCallInsnSP;
1929    skip_this = false;
1930  } else {
1931    DCHECK_EQ(info->type, kVirtual);
1932    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1933    skip_this = fast_path;
1934  }
1935  MethodReference target_method = method_info.GetTargetMethod();
1936  if (!info->is_range) {
1937    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1938                                      next_call_insn, target_method, method_info.VTableIndex(),
1939                                      method_info.DirectCode(), method_info.DirectMethod(),
1940                                      original_type, skip_this);
1941  } else {
1942    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1943                                    next_call_insn, target_method, method_info.VTableIndex(),
1944                                    method_info.DirectCode(), method_info.DirectMethod(),
1945                                    original_type, skip_this);
1946  }
1947  // Finish up any of the call sequence not interleaved in arg loading
1948  while (call_state >= 0) {
1949    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1950                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
1951  }
1952  LIR* call_inst;
1953  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1954    call_inst = OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
1955  } else {
1956    if (fast_path) {
1957      if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
1958        // We can have the linker fixup a call relative.
1959        call_inst =
1960          reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type);
1961      } else {
1962        call_inst = OpMem(kOpBlx, TargetReg(kArg0, kRef),
1963                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
1964      }
1965    } else {
1966      // TODO: Extract?
1967      if (cu_->target64) {
1968        call_inst = GenInvokeNoInlineCall<8>(this, info->type);
1969      } else {
1970        call_inst = GenInvokeNoInlineCall<4>(this, info->type);
1971      }
1972    }
1973  }
1974  EndInvoke(info);
1975  MarkSafepointPC(call_inst);
1976
1977  ClobberCallerSave();
1978  if (info->result.location != kLocInvalid) {
1979    // We have a following MOVE_RESULT - do it now.
1980    if (info->result.wide) {
1981      RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
1982      StoreValueWide(info->result, ret_loc);
1983    } else {
1984      RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
1985      StoreValue(info->result, ret_loc);
1986    }
1987  }
1988}
1989
1990}  // namespace art
1991