gen_invoke.cc revision 460503b13bc894828a2d2d47d09e5534b3e91aa1
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
21#include "dex_file-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "invoke_type.h"
24#include "mirror/array.h"
25#include "mirror/class-inl.h"
26#include "mirror/dex_cache.h"
27#include "mirror/object_array-inl.h"
28#include "mirror/reference.h"
29#include "mirror/string.h"
30#include "mir_to_lir-inl.h"
31#include "scoped_thread_state_change.h"
32#include "x86/codegen_x86.h"
33
34namespace art {
35
36// Shortcuts to repeatedly used long types.
37typedef mirror::ObjectArray<mirror::Object> ObjArray;
38
39/*
40 * This source files contains "gen" codegen routines that should
41 * be applicable to most targets.  Only mid-level support utilities
42 * and "op" calls may be used here.
43 */
44
45void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
46  class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
47   public:
48    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
49        : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
50    }
51
52    void Compile() {
53      m2l_->ResetRegPool();
54      m2l_->ResetDefTracking();
55      GenerateTargetLabel(kPseudoIntrinsicRetry);
56      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
57      m2l_->GenInvokeNoInline(info_);
58      if (cont_ != nullptr) {
59        m2l_->OpUnconditionalBranch(cont_);
60      }
61    }
62
63   private:
64    CallInfo* const info_;
65  };
66
67  AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
68}
69
70// Macro to help instantiate.
71// TODO: This might be used to only instantiate <4> on pure 32b systems.
72#define INSTANTIATE(sig_part1, ...) \
73  template sig_part1(ThreadOffset<4>, __VA_ARGS__); \
74  template sig_part1(ThreadOffset<8>, __VA_ARGS__); \
75
76
77/*
78 * To save scheduling time, helper calls are broken into two parts: generation of
79 * the helper target address, and the actual call to the helper.  Because x86
80 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
81 * load arguments between the two parts.
82 */
83// template <size_t pointer_size>
84RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) {
85  // All CallRuntimeHelperXXX call this first. So make a central check here.
86  DCHECK_EQ(4U, GetInstructionSetPointerSize(cu_->instruction_set));
87
88  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
89    return RegStorage::InvalidReg();
90  } else {
91    return LoadHelper(helper_offset);
92  }
93}
94
95RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<8> helper_offset) {
96  // All CallRuntimeHelperXXX call this first. So make a central check here.
97  DCHECK_EQ(8U, GetInstructionSetPointerSize(cu_->instruction_set));
98
99  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
100    return RegStorage::InvalidReg();
101  } else {
102    return LoadHelper(helper_offset);
103  }
104}
105
106/* NOTE: if r_tgt is a temp, it will be freed following use */
107template <size_t pointer_size>
108LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset,
109                         bool safepoint_pc, bool use_link) {
110  LIR* call_inst;
111  OpKind op = use_link ? kOpBlx : kOpBx;
112  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
113    call_inst = OpThreadMem(op, helper_offset);
114  } else {
115    call_inst = OpReg(op, r_tgt);
116    FreeTemp(r_tgt);
117  }
118  if (safepoint_pc) {
119    MarkSafepointPC(call_inst);
120  }
121  return call_inst;
122}
123template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<4> helper_offset,
124                                        bool safepoint_pc, bool use_link);
125template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<8> helper_offset,
126                                        bool safepoint_pc, bool use_link);
127
128template <size_t pointer_size>
129void Mir2Lir::CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc) {
130  RegStorage r_tgt = CallHelperSetup(helper_offset);
131  ClobberCallerSave();
132  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
133}
134INSTANTIATE(void Mir2Lir::CallRuntimeHelper, bool safepoint_pc)
135
136template <size_t pointer_size>
137void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc) {
138  RegStorage r_tgt = CallHelperSetup(helper_offset);
139  LoadConstant(TargetReg(kArg0, false), arg0);
140  ClobberCallerSave();
141  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
142}
143INSTANTIATE(void Mir2Lir::CallRuntimeHelperImm, int arg0, bool safepoint_pc)
144
145template <size_t pointer_size>
146void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
147                                   bool safepoint_pc) {
148  RegStorage r_tgt = CallHelperSetup(helper_offset);
149  OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
150  ClobberCallerSave();
151  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
152}
153INSTANTIATE(void Mir2Lir::CallRuntimeHelperReg, RegStorage arg0, bool safepoint_pc)
154
155template <size_t pointer_size>
156void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset,
157                                           RegLocation arg0, bool safepoint_pc) {
158  RegStorage r_tgt = CallHelperSetup(helper_offset);
159  if (arg0.wide == 0) {
160    LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
161  } else {
162    RegStorage r_tmp;
163    if (cu_->target64) {
164      r_tmp = TargetReg(kArg0, true);
165    } else {
166      r_tmp = TargetReg(arg0.fp ? kFArg0 : kArg0, arg0.fp ? kFArg1 : kArg1);
167    }
168    LoadValueDirectWideFixed(arg0, r_tmp);
169  }
170  ClobberCallerSave();
171  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
172}
173INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocation, RegLocation arg0, bool safepoint_pc)
174
175template <size_t pointer_size>
176void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1,
177                                      bool safepoint_pc) {
178  RegStorage r_tgt = CallHelperSetup(helper_offset);
179  LoadConstant(TargetReg(kArg0, false), arg0);
180  LoadConstant(TargetReg(kArg1, false), arg1);
181  ClobberCallerSave();
182  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
183}
184INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmImm, int arg0, int arg1, bool safepoint_pc)
185
186template <size_t pointer_size>
187void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
188                                              RegLocation arg1, bool safepoint_pc) {
189  RegStorage r_tgt = CallHelperSetup(helper_offset);
190  if (arg1.wide == 0) {
191    LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
192  } else {
193    RegStorage r_tmp;
194    if (cu_->target64) {
195      r_tmp = TargetReg(kArg1, true);
196    } else {
197      if (cu_->instruction_set == kMips) {
198        // skip kArg1 for stack alignment.
199        r_tmp = TargetReg(kArg2, kArg3);
200      } else {
201        r_tmp = TargetReg(kArg1, kArg2);
202      }
203    }
204    LoadValueDirectWideFixed(arg1, r_tmp);
205  }
206  LoadConstant(TargetReg(kArg0, false), arg0);
207  ClobberCallerSave();
208  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
209}
210INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocation, int arg0, RegLocation arg1,
211            bool safepoint_pc)
212
213template <size_t pointer_size>
214void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset,
215                                              RegLocation arg0, int arg1, bool safepoint_pc) {
216  RegStorage r_tgt = CallHelperSetup(helper_offset);
217  DCHECK(!arg0.wide);
218  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
219  LoadConstant(TargetReg(kArg1, false), arg1);
220  ClobberCallerSave();
221  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
222}
223INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationImm, RegLocation arg0, int arg1,
224            bool safepoint_pc)
225
226template <size_t pointer_size>
227void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0,
228                                      RegStorage arg1, bool safepoint_pc) {
229  RegStorage r_tgt = CallHelperSetup(helper_offset);
230  OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1);
231  LoadConstant(TargetReg(kArg0, false), arg0);
232  ClobberCallerSave();
233  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
234}
235INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmReg, int arg0, RegStorage arg1, bool safepoint_pc)
236
237template <size_t pointer_size>
238void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
239                                      int arg1, bool safepoint_pc) {
240  RegStorage r_tgt = CallHelperSetup(helper_offset);
241  OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
242  LoadConstant(TargetReg(kArg1, false), arg1);
243  ClobberCallerSave();
244  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
245}
246INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegImm, RegStorage arg0, int arg1, bool safepoint_pc)
247
248template <size_t pointer_size>
249void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0,
250                                         bool safepoint_pc) {
251  RegStorage r_tgt = CallHelperSetup(helper_offset);
252  LoadCurrMethodDirect(TargetRefReg(kArg1));
253  LoadConstant(TargetReg(kArg0, false), arg0);
254  ClobberCallerSave();
255  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
256}
257INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethod, int arg0, bool safepoint_pc)
258
259template <size_t pointer_size>
260void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
261                                         bool safepoint_pc) {
262  RegStorage r_tgt = CallHelperSetup(helper_offset);
263  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.Is64Bit()), arg0));
264  if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) {
265    OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
266  }
267  LoadCurrMethodDirect(TargetRefReg(kArg1));
268  ClobberCallerSave();
269  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
270}
271INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethod, RegStorage arg0, bool safepoint_pc)
272
273template <size_t pointer_size>
274void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
275                                                    RegStorage arg0, RegLocation arg2,
276                                                    bool safepoint_pc) {
277  RegStorage r_tgt = CallHelperSetup(helper_offset);
278  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.Is64Bit()), arg0));
279  if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) {
280    OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
281  }
282  LoadCurrMethodDirect(TargetRefReg(kArg1));
283  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
284  ClobberCallerSave();
285  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
286}
287INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethodRegLocation, RegStorage arg0, RegLocation arg2,
288            bool safepoint_pc)
289
290template <size_t pointer_size>
291void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
292                                                      RegLocation arg0, RegLocation arg1,
293                                                      bool safepoint_pc) {
294  RegStorage r_tgt = CallHelperSetup(helper_offset);
295  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
296    RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
297
298    RegStorage arg1_reg;
299    if (arg1.fp == arg0.fp) {
300      arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
301    } else {
302      arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
303    }
304
305    if (arg0.wide == 0) {
306      LoadValueDirectFixed(arg0, arg0_reg);
307    } else {
308      LoadValueDirectWideFixed(arg0, arg0_reg);
309    }
310
311    if (arg1.wide == 0) {
312      LoadValueDirectFixed(arg1, arg1_reg);
313    } else {
314      LoadValueDirectWideFixed(arg1, arg1_reg);
315    }
316  } else {
317    DCHECK(!cu_->target64);
318    if (arg0.wide == 0) {
319      LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0, false) : TargetReg(kArg0, false));
320      if (arg1.wide == 0) {
321        if (cu_->instruction_set == kMips) {
322          LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2, false) : TargetReg(kArg1, false));
323        } else {
324          LoadValueDirectFixed(arg1, TargetReg(kArg1, false));
325        }
326      } else {
327        if (cu_->instruction_set == kMips) {
328          RegStorage r_tmp;
329          if (arg1.fp) {
330            r_tmp = TargetReg(kFArg2, kFArg3);
331          } else {
332            // skip kArg1 for stack alignment.
333            r_tmp = TargetReg(kArg2, kArg3);
334          }
335          LoadValueDirectWideFixed(arg1, r_tmp);
336        } else {
337          RegStorage r_tmp;
338          r_tmp = TargetReg(kArg1, kArg2);
339          LoadValueDirectWideFixed(arg1, r_tmp);
340        }
341      }
342    } else {
343      RegStorage r_tmp;
344      if (arg0.fp) {
345        r_tmp = TargetReg(kFArg0, kFArg1);
346      } else {
347        r_tmp = TargetReg(kArg0, kArg1);
348      }
349      LoadValueDirectWideFixed(arg0, r_tmp);
350      if (arg1.wide == 0) {
351        LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2, false) : TargetReg(kArg2, false));
352      } else {
353        RegStorage r_tmp;
354        if (arg1.fp) {
355          r_tmp = TargetReg(kFArg2, kFArg3);
356        } else {
357          r_tmp = TargetReg(kArg2, kArg3);
358        }
359        LoadValueDirectWideFixed(arg1, r_tmp);
360      }
361    }
362  }
363  ClobberCallerSave();
364  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
365}
366INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
367            RegLocation arg1, bool safepoint_pc)
368
369void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
370  if (IsSameReg(arg1, TargetReg(kArg0, arg1.Is64Bit()))) {
371    if (IsSameReg(arg0, TargetReg(kArg1, arg0.Is64Bit()))) {
372      // Swap kArg0 and kArg1 with kArg2 as temp.
373      OpRegCopy(TargetReg(kArg2, arg1.Is64Bit()), arg1);
374      OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
375      OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), TargetReg(kArg2, arg1.Is64Bit()));
376    } else {
377      OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1);
378      OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
379    }
380  } else {
381    OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0);
382    OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1);
383  }
384}
385
386template <size_t pointer_size>
387void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
388                                      RegStorage arg1, bool safepoint_pc) {
389  RegStorage r_tgt = CallHelperSetup(helper_offset);
390  CopyToArgumentRegs(arg0, arg1);
391  ClobberCallerSave();
392  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
393}
394INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegReg, RegStorage arg0, RegStorage arg1,
395            bool safepoint_pc)
396
397template <size_t pointer_size>
398void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
399                                         RegStorage arg1, int arg2, bool safepoint_pc) {
400  RegStorage r_tgt = CallHelperSetup(helper_offset);
401  CopyToArgumentRegs(arg0, arg1);
402  LoadConstant(TargetReg(kArg2, false), arg2);
403  ClobberCallerSave();
404  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
405}
406INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegRegImm, RegStorage arg0, RegStorage arg1, int arg2,
407            bool safepoint_pc)
408
409template <size_t pointer_size>
410void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
411                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
412  RegStorage r_tgt = CallHelperSetup(helper_offset);
413  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
414  LoadCurrMethodDirect(TargetRefReg(kArg1));
415  LoadConstant(TargetReg(kArg0, false), arg0);
416  ClobberCallerSave();
417  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
418}
419INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodRegLocation, int arg0, RegLocation arg2,
420            bool safepoint_pc)
421
422template <size_t pointer_size>
423void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0,
424                                            int arg2, bool safepoint_pc) {
425  RegStorage r_tgt = CallHelperSetup(helper_offset);
426  LoadCurrMethodDirect(TargetRefReg(kArg1));
427  LoadConstant(TargetReg(kArg2, false), arg2);
428  LoadConstant(TargetReg(kArg0, false), arg0);
429  ClobberCallerSave();
430  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
431}
432INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodImm, int arg0, int arg2, bool safepoint_pc)
433
434template <size_t pointer_size>
435void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
436                                                         int arg0, RegLocation arg1,
437                                                         RegLocation arg2, bool safepoint_pc) {
438  RegStorage r_tgt = CallHelperSetup(helper_offset);
439  DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
440                                                        // instantiation bug in GCC.
441  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
442  if (arg2.wide == 0) {
443    LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
444  } else {
445    RegStorage r_tmp;
446    if (cu_->target64) {
447      r_tmp = TargetReg(kArg2, true);
448    } else {
449      r_tmp = TargetReg(kArg2, kArg3);
450    }
451    LoadValueDirectWideFixed(arg2, r_tmp);
452  }
453  LoadConstant(TargetReg(kArg0, false), arg0);
454  ClobberCallerSave();
455  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
456}
457INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation, int arg0, RegLocation arg1,
458            RegLocation arg2, bool safepoint_pc)
459
460template <size_t pointer_size>
461void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
462                                                                 RegLocation arg0, RegLocation arg1,
463                                                                 RegLocation arg2,
464                                                                 bool safepoint_pc) {
465  RegStorage r_tgt = CallHelperSetup(helper_offset);
466  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
467  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
468  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
469  ClobberCallerSave();
470  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
471}
472INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation, RegLocation arg0,
473            RegLocation arg1, RegLocation arg2, bool safepoint_pc)
474
475/*
476 * If there are any ins passed in registers that have not been promoted
477 * to a callee-save register, flush them to the frame.  Perform initial
478 * assignment of promoted arguments.
479 *
480 * ArgLocs is an array of location records describing the incoming arguments
481 * with one location record per word of argument.
482 */
483void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
484  /*
485   * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
486   * It will attempt to keep kArg0 live (or copy it to home location
487   * if promoted).
488   */
489  RegLocation rl_src = rl_method;
490  rl_src.location = kLocPhysReg;
491  rl_src.reg = TargetRefReg(kArg0);
492  rl_src.home = false;
493  MarkLive(rl_src);
494  StoreValue(rl_method, rl_src);
495  // If Method* has been promoted, explicitly flush
496  if (rl_method.location == kLocPhysReg) {
497    StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
498  }
499
500  if (cu_->num_ins == 0) {
501    return;
502  }
503
504  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
505  /*
506   * Copy incoming arguments to their proper home locations.
507   * NOTE: an older version of dx had an issue in which
508   * it would reuse static method argument registers.
509   * This could result in the same Dalvik virtual register
510   * being promoted to both core and fp regs. To account for this,
511   * we only copy to the corresponding promoted physical register
512   * if it matches the type of the SSA name for the incoming
513   * argument.  It is also possible that long and double arguments
514   * end up half-promoted.  In those cases, we must flush the promoted
515   * half to memory as well.
516   */
517  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
518  for (int i = 0; i < cu_->num_ins; i++) {
519    PromotionMap* v_map = &promotion_map_[start_vreg + i];
520    RegStorage reg = GetArgMappingToPhysicalReg(i);
521
522    if (reg.Valid()) {
523      // If arriving in register
524      bool need_flush = true;
525      RegLocation* t_loc = &ArgLocs[i];
526      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
527        OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
528        need_flush = false;
529      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
530        OpRegCopy(RegStorage::Solo32(v_map->fp_reg), reg);
531        need_flush = false;
532      } else {
533        need_flush = true;
534      }
535
536      // For wide args, force flush if not fully promoted
537      if (t_loc->wide) {
538        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
539        // Is only half promoted?
540        need_flush |= (p_map->core_location != v_map->core_location) ||
541            (p_map->fp_location != v_map->fp_location);
542        if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
543          /*
544           * In Arm, a double is represented as a pair of consecutive single float
545           * registers starting at an even number.  It's possible that both Dalvik vRegs
546           * representing the incoming double were independently promoted as singles - but
547           * not in a form usable as a double.  If so, we need to flush - even though the
548           * incoming arg appears fully in register.  At this point in the code, both
549           * halves of the double are promoted.  Make sure they are in a usable form.
550           */
551          int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
552          int low_reg = promotion_map_[lowreg_index].fp_reg;
553          int high_reg = promotion_map_[lowreg_index + 1].fp_reg;
554          if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
555            need_flush = true;
556          }
557        }
558      }
559      if (need_flush) {
560        Store32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg);
561      }
562    } else {
563      // If arriving in frame & promoted
564      if (v_map->core_location == kLocPhysReg) {
565        Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg));
566      }
567      if (v_map->fp_location == kLocPhysReg) {
568        Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->fp_reg));
569      }
570    }
571  }
572}
573
574/*
575 * Bit of a hack here - in the absence of a real scheduling pass,
576 * emit the next instruction in static & direct invoke sequences.
577 */
578static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
579                          int state, const MethodReference& target_method,
580                          uint32_t unused,
581                          uintptr_t direct_code, uintptr_t direct_method,
582                          InvokeType type) {
583  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
584  if (direct_code != 0 && direct_method != 0) {
585    switch (state) {
586    case 0:  // Get the current Method* [sets kArg0]
587      if (direct_code != static_cast<uintptr_t>(-1)) {
588        if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
589          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
590        }
591      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
592        cg->LoadCodeAddress(target_method, type, kInvokeTgt);
593      }
594      if (direct_method != static_cast<uintptr_t>(-1)) {
595        cg->LoadConstant(cg->TargetRefReg(kArg0), direct_method);
596      } else {
597        cg->LoadMethodAddress(target_method, type, kArg0);
598      }
599      break;
600    default:
601      return -1;
602    }
603  } else {
604    RegStorage arg0_ref = cg->TargetRefReg(kArg0);
605    switch (state) {
606    case 0:  // Get the current Method* [sets kArg0]
607      // TUNING: we can save a reg copy if Method* has been promoted.
608      cg->LoadCurrMethodDirect(arg0_ref);
609      break;
610    case 1:  // Get method->dex_cache_resolved_methods_
611      cg->LoadRefDisp(arg0_ref,
612                      mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
613                      arg0_ref,
614                      kNotVolatile);
615      // Set up direct code if known.
616      if (direct_code != 0) {
617        if (direct_code != static_cast<uintptr_t>(-1)) {
618          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
619        } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
620          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
621          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
622        }
623      }
624      break;
625    case 2:  // Grab target method*
626      CHECK_EQ(cu->dex_file, target_method.dex_file);
627      cg->LoadRefDisp(arg0_ref,
628                      ObjArray::OffsetOfElement(target_method.dex_method_index).Int32Value(),
629                      arg0_ref,
630                      kNotVolatile);
631      break;
632    case 3:  // Grab the code from the method*
633      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
634        if (direct_code == 0) {
635          cg->LoadWordDisp(arg0_ref,
636                           mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
637                           cg->TargetPtrReg(kInvokeTgt));
638        }
639        break;
640      }
641      // Intentional fallthrough for x86
642    default:
643      return -1;
644    }
645  }
646  return state + 1;
647}
648
649/*
650 * Bit of a hack here - in the absence of a real scheduling pass,
651 * emit the next instruction in a virtual invoke sequence.
652 * We can use kLr as a temp prior to target address loading
653 * Note also that we'll load the first argument ("this") into
654 * kArg1 here rather than the standard LoadArgRegs.
655 */
656static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
657                         int state, const MethodReference& target_method,
658                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
659                         InvokeType unused3) {
660  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
661  /*
662   * This is the fast path in which the target virtual method is
663   * fully resolved at compile time.
664   */
665  switch (state) {
666    case 0: {  // Get "this" [set kArg1]
667      RegLocation  rl_arg = info->args[0];
668      cg->LoadValueDirectFixed(rl_arg, cg->TargetRefReg(kArg1));
669      break;
670    }
671    case 1:  // Is "this" null? [use kArg1]
672      cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags);
673      // get this->klass_ [use kArg1, set kInvokeTgt]
674      cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
675                      cg->TargetPtrReg(kInvokeTgt),
676                      kNotVolatile);
677      cg->MarkPossibleNullPointerException(info->opt_flags);
678      break;
679    case 2:  // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
680      cg->LoadRefDisp(cg->TargetPtrReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
681                      cg->TargetPtrReg(kInvokeTgt),
682                      kNotVolatile);
683      break;
684    case 3:  // Get target method [use kInvokeTgt, set kArg0]
685      cg->LoadRefDisp(cg->TargetPtrReg(kInvokeTgt),
686                      ObjArray::OffsetOfElement(method_idx).Int32Value(),
687                      cg->TargetRefReg(kArg0),
688                      kNotVolatile);
689      break;
690    case 4:  // Get the compiled code address [uses kArg0, sets kInvokeTgt]
691      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
692        cg->LoadWordDisp(cg->TargetRefReg(kArg0),
693                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
694                         cg->TargetPtrReg(kInvokeTgt));
695        break;
696      }
697      // Intentional fallthrough for X86
698    default:
699      return -1;
700  }
701  return state + 1;
702}
703
704/*
705 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
706 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
707 * more than one interface method map to the same index. Note also that we'll load the first
708 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
709 */
710static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
711                                 const MethodReference& target_method,
712                                 uint32_t method_idx, uintptr_t unused,
713                                 uintptr_t direct_method, InvokeType unused2) {
714  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
715
716  switch (state) {
717    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
718      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
719      cg->LoadConstant(cg->TargetReg(kHiddenArg, false), target_method.dex_method_index);
720      if (cu->instruction_set == kX86) {
721        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, false), cg->TargetReg(kHiddenArg, false));
722      }
723      break;
724    case 1: {  // Get "this" [set kArg1]
725      RegLocation  rl_arg = info->args[0];
726      cg->LoadValueDirectFixed(rl_arg, cg->TargetRefReg(kArg1));
727      break;
728    }
729    case 2:  // Is "this" null? [use kArg1]
730      cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags);
731      // Get this->klass_ [use kArg1, set kInvokeTgt]
732      cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
733                      cg->TargetPtrReg(kInvokeTgt),
734                      kNotVolatile);
735      cg->MarkPossibleNullPointerException(info->opt_flags);
736      break;
737    case 3:  // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
738      // NOTE: native pointer.
739      cg->LoadRefDisp(cg->TargetPtrReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
740                      cg->TargetPtrReg(kInvokeTgt),
741                      kNotVolatile);
742      break;
743    case 4:  // Get target method [use kInvokeTgt, set kArg0]
744      // NOTE: native pointer.
745      cg->LoadRefDisp(cg->TargetPtrReg(kInvokeTgt),
746                       ObjArray::OffsetOfElement(method_idx % ClassLinker::kImtSize).Int32Value(),
747                       cg->TargetRefReg(kArg0),
748                       kNotVolatile);
749      break;
750    case 5:  // Get the compiled code address [use kArg0, set kInvokeTgt]
751      if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
752        cg->LoadWordDisp(cg->TargetRefReg(kArg0),
753                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
754                         cg->TargetPtrReg(kInvokeTgt));
755        break;
756      }
757      // Intentional fallthrough for X86
758    default:
759      return -1;
760  }
761  return state + 1;
762}
763
764template <size_t pointer_size>
765static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset<pointer_size> trampoline,
766                            int state, const MethodReference& target_method,
767                            uint32_t method_idx) {
768  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
769  /*
770   * This handles the case in which the base method is not fully
771   * resolved at compile time, we bail to a runtime helper.
772   */
773  if (state == 0) {
774    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
775      // Load trampoline target
776      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(), cg->TargetPtrReg(kInvokeTgt));
777    }
778    // Load kArg0 with method index
779    CHECK_EQ(cu->dex_file, target_method.dex_file);
780    cg->LoadConstant(cg->TargetReg(kArg0, false), target_method.dex_method_index);
781    return 1;
782  }
783  return -1;
784}
785
786static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
787                                int state,
788                                const MethodReference& target_method,
789                                uint32_t unused, uintptr_t unused2,
790                                uintptr_t unused3, InvokeType unused4) {
791  if (cu->target64) {
792    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeStaticTrampolineWithAccessCheck);
793    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
794  } else {
795    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
796    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
797  }
798}
799
800static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
801                                const MethodReference& target_method,
802                                uint32_t unused, uintptr_t unused2,
803                                uintptr_t unused3, InvokeType unused4) {
804  if (cu->target64) {
805    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeDirectTrampolineWithAccessCheck);
806    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
807  } else {
808    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
809    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
810  }
811}
812
813static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
814                               const MethodReference& target_method,
815                               uint32_t unused, uintptr_t unused2,
816                               uintptr_t unused3, InvokeType unused4) {
817  if (cu->target64) {
818    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeSuperTrampolineWithAccessCheck);
819    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
820  } else {
821    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
822    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
823  }
824}
825
826static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
827                           const MethodReference& target_method,
828                           uint32_t unused, uintptr_t unused2,
829                           uintptr_t unused3, InvokeType unused4) {
830  if (cu->target64) {
831    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeVirtualTrampolineWithAccessCheck);
832    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
833  } else {
834    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeVirtualTrampolineWithAccessCheck);
835    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
836  }
837}
838
839static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
840                                                CallInfo* info, int state,
841                                                const MethodReference& target_method,
842                                                uint32_t unused, uintptr_t unused2,
843                                                uintptr_t unused3, InvokeType unused4) {
844  if (cu->target64) {
845      ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeInterfaceTrampolineWithAccessCheck);
846      return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
847    } else {
848      ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeInterfaceTrampolineWithAccessCheck);
849      return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
850    }
851}
852
853int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
854                         NextCallInsn next_call_insn,
855                         const MethodReference& target_method,
856                         uint32_t vtable_idx, uintptr_t direct_code,
857                         uintptr_t direct_method, InvokeType type, bool skip_this) {
858  int last_arg_reg = 3 - 1;
859  int arg_regs[3] = {TargetReg(kArg1, false).GetReg(), TargetReg(kArg2, false).GetReg(), TargetReg(kArg3, false).GetReg()};
860
861  int next_reg = 0;
862  int next_arg = 0;
863  if (skip_this) {
864    next_reg++;
865    next_arg++;
866  }
867  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
868    RegLocation rl_arg = info->args[next_arg++];
869    rl_arg = UpdateRawLoc(rl_arg);
870    if (rl_arg.wide && (next_reg <= last_arg_reg - 1)) {
871      RegStorage r_tmp(RegStorage::k64BitPair, arg_regs[next_reg], arg_regs[next_reg + 1]);
872      LoadValueDirectWideFixed(rl_arg, r_tmp);
873      next_reg++;
874      next_arg++;
875    } else {
876      if (rl_arg.wide) {
877        rl_arg = NarrowRegLoc(rl_arg);
878        rl_arg.is_const = false;
879      }
880      LoadValueDirectFixed(rl_arg, RegStorage::Solo32(arg_regs[next_reg]));
881    }
882    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
883                                direct_code, direct_method, type);
884  }
885  return call_state;
886}
887
888/*
889 * Load up to 5 arguments, the first three of which will be in
890 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
891 * and as part of the load sequence, it must be replaced with
892 * the target method pointer.  Note, this may also be called
893 * for "range" variants if the number of arguments is 5 or fewer.
894 */
895int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
896                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
897                                  const MethodReference& target_method,
898                                  uint32_t vtable_idx, uintptr_t direct_code,
899                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
900  RegLocation rl_arg;
901
902  /* If no arguments, just return */
903  if (info->num_arg_words == 0)
904    return call_state;
905
906  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
907                              direct_code, direct_method, type);
908
909  DCHECK_LE(info->num_arg_words, 5);
910  if (info->num_arg_words > 3) {
911    int32_t next_use = 3;
912    // Detect special case of wide arg spanning arg3/arg4
913    RegLocation rl_use0 = info->args[0];
914    RegLocation rl_use1 = info->args[1];
915    RegLocation rl_use2 = info->args[2];
916    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) && rl_use2.wide) {
917      RegStorage reg;
918      // Wide spans, we need the 2nd half of uses[2].
919      rl_arg = UpdateLocWide(rl_use2);
920      if (rl_arg.location == kLocPhysReg) {
921        if (rl_arg.reg.IsPair()) {
922          reg = rl_arg.reg.GetHigh();
923        } else {
924          RegisterInfo* info = GetRegInfo(rl_arg.reg);
925          info = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
926          if (info == nullptr) {
927            // NOTE: For hard float convention we won't split arguments across reg/mem.
928            UNIMPLEMENTED(FATAL) << "Needs hard float api.";
929          }
930          reg = info->GetReg();
931        }
932      } else {
933        // kArg2 & rArg3 can safely be used here
934        reg = TargetReg(kArg3, false);
935        {
936          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
937          Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
938        }
939        call_state = next_call_insn(cu_, info, call_state, target_method,
940                                    vtable_idx, direct_code, direct_method, type);
941      }
942      {
943        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
944        Store32Disp(TargetPtrReg(kSp), (next_use + 1) * 4, reg);
945      }
946      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
947                                  direct_code, direct_method, type);
948      next_use++;
949    }
950    // Loop through the rest
951    while (next_use < info->num_arg_words) {
952      RegStorage arg_reg;
953      rl_arg = info->args[next_use];
954      rl_arg = UpdateRawLoc(rl_arg);
955      if (rl_arg.location == kLocPhysReg) {
956        arg_reg = rl_arg.reg;
957      } else {
958        arg_reg = rl_arg.wide ? TargetReg(kArg2, kArg3) : TargetReg(kArg2, false);
959        if (rl_arg.wide) {
960          LoadValueDirectWideFixed(rl_arg, arg_reg);
961        } else {
962          LoadValueDirectFixed(rl_arg, arg_reg);
963        }
964        call_state = next_call_insn(cu_, info, call_state, target_method,
965                                    vtable_idx, direct_code, direct_method, type);
966      }
967      int outs_offset = (next_use + 1) * 4;
968      {
969        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
970        if (rl_arg.wide) {
971          StoreBaseDisp(TargetPtrReg(kSp), outs_offset, arg_reg, k64, kNotVolatile);
972          next_use += 2;
973        } else {
974          Store32Disp(TargetPtrReg(kSp), outs_offset, arg_reg);
975          next_use++;
976        }
977      }
978      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
979                               direct_code, direct_method, type);
980    }
981  }
982
983  call_state = LoadArgRegs(info, call_state, next_call_insn,
984                           target_method, vtable_idx, direct_code, direct_method,
985                           type, skip_this);
986
987  if (pcrLabel) {
988    if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
989      *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags);
990    } else {
991      *pcrLabel = nullptr;
992      // In lieu of generating a check for kArg1 being null, we need to
993      // perform a load when doing implicit checks.
994      RegStorage tmp = AllocTemp();
995      Load32Disp(TargetRefReg(kArg1), 0, tmp);
996      MarkPossibleNullPointerException(info->opt_flags);
997      FreeTemp(tmp);
998    }
999  }
1000  return call_state;
1001}
1002
1003/*
1004 * May have 0+ arguments (also used for jumbo).  Note that
1005 * source virtual registers may be in physical registers, so may
1006 * need to be flushed to home location before copying.  This
1007 * applies to arg3 and above (see below).
1008 *
1009 * Two general strategies:
1010 *    If < 20 arguments
1011 *       Pass args 3-18 using vldm/vstm block copy
1012 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
1013 *    If 20+ arguments
1014 *       Pass args arg19+ using memcpy block copy
1015 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
1016 *
1017 */
1018int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
1019                                LIR** pcrLabel, NextCallInsn next_call_insn,
1020                                const MethodReference& target_method,
1021                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
1022                                InvokeType type, bool skip_this) {
1023  // If we can treat it as non-range (Jumbo ops will use range form)
1024  if (info->num_arg_words <= 5)
1025    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
1026                                next_call_insn, target_method, vtable_idx,
1027                                direct_code, direct_method, type, skip_this);
1028  /*
1029   * First load the non-register arguments.  Both forms expect all
1030   * of the source arguments to be in their home frame location, so
1031   * scan the s_reg names and flush any that have been promoted to
1032   * frame backing storage.
1033   */
1034  // Scan the rest of the args - if in phys_reg flush to memory
1035  for (int next_arg = 0; next_arg < info->num_arg_words;) {
1036    RegLocation loc = info->args[next_arg];
1037    if (loc.wide) {
1038      loc = UpdateLocWide(loc);
1039      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
1040        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1041        StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
1042      }
1043      next_arg += 2;
1044    } else {
1045      loc = UpdateLoc(loc);
1046      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
1047        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1048        Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
1049      }
1050      next_arg++;
1051    }
1052  }
1053
1054  // Logic below assumes that Method pointer is at offset zero from SP.
1055  DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
1056
1057  // The first 3 arguments are passed via registers.
1058  // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
1059  // get size of uintptr_t or size of object reference according to model being used.
1060  int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
1061  int start_offset = SRegOffset(info->args[3].s_reg_low);
1062  int regs_left_to_pass_via_stack = info->num_arg_words - 3;
1063  DCHECK_GT(regs_left_to_pass_via_stack, 0);
1064
1065  if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
1066    // Use vldm/vstm pair using kArg3 as a temp
1067    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1068                             direct_code, direct_method, type);
1069    OpRegRegImm(kOpAdd, TargetRefReg(kArg3), TargetPtrReg(kSp), start_offset);
1070    LIR* ld = nullptr;
1071    {
1072      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1073      ld = OpVldm(TargetRefReg(kArg3), regs_left_to_pass_via_stack);
1074    }
1075    // TUNING: loosen barrier
1076    ld->u.m.def_mask = &kEncodeAll;
1077    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1078                             direct_code, direct_method, type);
1079    OpRegRegImm(kOpAdd, TargetRefReg(kArg3), TargetPtrReg(kSp), 4 /* Method* */ + (3 * 4));
1080    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1081                             direct_code, direct_method, type);
1082    LIR* st = nullptr;
1083    {
1084      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1085      st = OpVstm(TargetRefReg(kArg3), regs_left_to_pass_via_stack);
1086    }
1087    st->u.m.def_mask = &kEncodeAll;
1088    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1089                             direct_code, direct_method, type);
1090  } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
1091    int current_src_offset = start_offset;
1092    int current_dest_offset = outs_offset;
1093
1094    // Only davik regs are accessed in this loop; no next_call_insn() calls.
1095    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1096    while (regs_left_to_pass_via_stack > 0) {
1097      // This is based on the knowledge that the stack itself is 16-byte aligned.
1098      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
1099      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
1100      size_t bytes_to_move;
1101
1102      /*
1103       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
1104       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
1105       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
1106       * We do this because we could potentially do a smaller move to align.
1107       */
1108      if (regs_left_to_pass_via_stack == 4 ||
1109          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
1110        // Moving 128-bits via xmm register.
1111        bytes_to_move = sizeof(uint32_t) * 4;
1112
1113        // Allocate a free xmm temp. Since we are working through the calling sequence,
1114        // we expect to have an xmm temporary available.  AllocTempDouble will abort if
1115        // there are no free registers.
1116        RegStorage temp = AllocTempDouble();
1117
1118        LIR* ld1 = nullptr;
1119        LIR* ld2 = nullptr;
1120        LIR* st1 = nullptr;
1121        LIR* st2 = nullptr;
1122
1123        /*
1124         * The logic is similar for both loads and stores. If we have 16-byte alignment,
1125         * do an aligned move. If we have 8-byte alignment, then do the move in two
1126         * parts. This approach prevents possible cache line splits. Finally, fall back
1127         * to doing an unaligned move. In most cases we likely won't split the cache
1128         * line but we cannot prove it and thus take a conservative approach.
1129         */
1130        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
1131        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
1132
1133        if (src_is_16b_aligned) {
1134          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovA128FP);
1135        } else if (src_is_8b_aligned) {
1136          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovLo128FP);
1137          ld2 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset + (bytes_to_move >> 1),
1138                            kMovHi128FP);
1139        } else {
1140          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovU128FP);
1141        }
1142
1143        if (dest_is_16b_aligned) {
1144          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovA128FP);
1145        } else if (dest_is_8b_aligned) {
1146          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovLo128FP);
1147          st2 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset + (bytes_to_move >> 1),
1148                            temp, kMovHi128FP);
1149        } else {
1150          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovU128FP);
1151        }
1152
1153        // TODO If we could keep track of aliasing information for memory accesses that are wider
1154        // than 64-bit, we wouldn't need to set up a barrier.
1155        if (ld1 != nullptr) {
1156          if (ld2 != nullptr) {
1157            // For 64-bit load we can actually set up the aliasing information.
1158            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
1159            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
1160          } else {
1161            // Set barrier for 128-bit load.
1162            ld1->u.m.def_mask = &kEncodeAll;
1163          }
1164        }
1165        if (st1 != nullptr) {
1166          if (st2 != nullptr) {
1167            // For 64-bit store we can actually set up the aliasing information.
1168            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
1169            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
1170          } else {
1171            // Set barrier for 128-bit store.
1172            st1->u.m.def_mask = &kEncodeAll;
1173          }
1174        }
1175
1176        // Free the temporary used for the data movement.
1177        FreeTemp(temp);
1178      } else {
1179        // Moving 32-bits via general purpose register.
1180        bytes_to_move = sizeof(uint32_t);
1181
1182        // Instead of allocating a new temp, simply reuse one of the registers being used
1183        // for argument passing.
1184        RegStorage temp = TargetReg(kArg3, false);
1185
1186        // Now load the argument VR and store to the outs.
1187        Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
1188        Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
1189      }
1190
1191      current_src_offset += bytes_to_move;
1192      current_dest_offset += bytes_to_move;
1193      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
1194    }
1195  } else {
1196    // Generate memcpy
1197    OpRegRegImm(kOpAdd, TargetRefReg(kArg0), TargetPtrReg(kSp), outs_offset);
1198    OpRegRegImm(kOpAdd, TargetRefReg(kArg1), TargetPtrReg(kSp), start_offset);
1199    if (cu_->target64) {
1200      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetRefReg(kArg0),
1201                                 TargetRefReg(kArg1), (info->num_arg_words - 3) * 4, false);
1202    } else {
1203      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetRefReg(kArg0),
1204                                 TargetRefReg(kArg1), (info->num_arg_words - 3) * 4, false);
1205    }
1206  }
1207
1208  call_state = LoadArgRegs(info, call_state, next_call_insn,
1209                           target_method, vtable_idx, direct_code, direct_method,
1210                           type, skip_this);
1211
1212  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1213                           direct_code, direct_method, type);
1214  if (pcrLabel) {
1215    if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
1216      *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags);
1217    } else {
1218      *pcrLabel = nullptr;
1219      // In lieu of generating a check for kArg1 being null, we need to
1220      // perform a load when doing implicit checks.
1221      RegStorage tmp = AllocTemp();
1222      Load32Disp(TargetRefReg(kArg1), 0, tmp);
1223      MarkPossibleNullPointerException(info->opt_flags);
1224      FreeTemp(tmp);
1225    }
1226  }
1227  return call_state;
1228}
1229
1230RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
1231  RegLocation res;
1232  if (info->result.location == kLocInvalid) {
1233    res = GetReturn(LocToRegClass(info->result));
1234  } else {
1235    res = info->result;
1236  }
1237  return res;
1238}
1239
1240RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
1241  RegLocation res;
1242  if (info->result.location == kLocInvalid) {
1243    res = GetReturnWide(kCoreReg);
1244  } else {
1245    res = info->result;
1246  }
1247  return res;
1248}
1249
1250bool Mir2Lir::GenInlinedGet(CallInfo* info) {
1251  if (cu_->instruction_set == kMips) {
1252    // TODO - add Mips implementation
1253    return false;
1254  }
1255
1256  // the refrence class is stored in the image dex file which might not be the same as the cu's
1257  // dex file. Query the reference class for the image dex file then reset to starting dex file
1258  // in after loading class type.
1259  uint16_t type_idx = 0;
1260  const DexFile* ref_dex_file = nullptr;
1261  {
1262    ScopedObjectAccess soa(Thread::Current());
1263    type_idx = mirror::Reference::GetJavaLangRefReference()->GetDexTypeIndex();
1264    ref_dex_file = mirror::Reference::GetJavaLangRefReference()->GetDexCache()->GetDexFile();
1265  }
1266  CHECK(LIKELY(ref_dex_file != nullptr));
1267
1268  // address is either static within the image file, or needs to be patched up after compilation.
1269  bool unused_type_initialized;
1270  bool use_direct_type_ptr;
1271  uintptr_t direct_type_ptr;
1272  bool is_finalizable;
1273  const DexFile* old_dex = cu_->dex_file;
1274  cu_->dex_file = ref_dex_file;
1275  if (!cu_->compiler_driver->CanEmbedTypeInCode(*ref_dex_file, type_idx, &unused_type_initialized,
1276                                                &use_direct_type_ptr, &direct_type_ptr,
1277                                                &is_finalizable) || is_finalizable) {
1278    cu_->dex_file = old_dex;
1279    // address is not known and post-compile patch is not possible, cannot insert intrinsic.
1280    return false;
1281  }
1282  if (use_direct_type_ptr) {
1283    LoadConstant(TargetReg(kArg1), direct_type_ptr);
1284  } else {
1285    LoadClassType(type_idx, kArg1);
1286  }
1287  cu_->dex_file = old_dex;
1288
1289  // intrinsic logic start.
1290  RegLocation rl_obj = info->args[0];
1291  rl_obj = LoadValue(rl_obj);
1292
1293  RegStorage reg_class = TargetReg(kArg1, cu_->target64);
1294  RegStorage reg_slow_path = AllocTemp();
1295  RegStorage reg_disabled = AllocTemp();
1296  Load32Disp(reg_class, mirror::ReferenceClass::SlowPathEnabledOffset().Int32Value(),
1297      reg_slow_path);
1298  Load32Disp(reg_class, mirror::ReferenceClass::DisableIntrinsicOffset().Int32Value(),
1299      reg_disabled);
1300  OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
1301  FreeTemp(reg_disabled);
1302
1303  // if slow path, jump to JNI path target
1304  LIR* slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
1305  FreeTemp(reg_slow_path);
1306
1307  // slow path not enabled, simply load the referent of the reference object
1308  RegLocation rl_dest = InlineTarget(info);
1309  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1310  GenNullCheck(rl_obj.reg, info->opt_flags);
1311  LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
1312      kNotVolatile);
1313  MarkPossibleNullPointerException(info->opt_flags);
1314  StoreValue(rl_dest, rl_result);
1315  LIR* jump_finished = OpUnconditionalBranch(nullptr);
1316
1317  // JNI target
1318  LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
1319  slow_path_branch->target = slow_path_target;
1320  ResetRegPool();
1321  GenInvokeNoInline(info);
1322
1323  LIR* finished_target = NewLIR0(kPseudoTargetLabel);
1324  jump_finished->target = finished_target;
1325
1326  return true;
1327}
1328
1329bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
1330  if (cu_->instruction_set == kMips) {
1331    // TODO - add Mips implementation
1332    return false;
1333  }
1334  // Location of reference to data array
1335  int value_offset = mirror::String::ValueOffset().Int32Value();
1336  // Location of count
1337  int count_offset = mirror::String::CountOffset().Int32Value();
1338  // Starting offset within data array
1339  int offset_offset = mirror::String::OffsetOffset().Int32Value();
1340  // Start of char data with array_
1341  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1342
1343  RegLocation rl_obj = info->args[0];
1344  RegLocation rl_idx = info->args[1];
1345  rl_obj = LoadValue(rl_obj, kRefReg);
1346  // X86 wants to avoid putting a constant index into a register.
1347  if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) {
1348    rl_idx = LoadValue(rl_idx, kCoreReg);
1349  }
1350  RegStorage reg_max;
1351  GenNullCheck(rl_obj.reg, info->opt_flags);
1352  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
1353  LIR* range_check_branch = nullptr;
1354  RegStorage reg_off;
1355  RegStorage reg_ptr;
1356  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1357    reg_off = AllocTemp();
1358    reg_ptr = AllocTempRef();
1359    if (range_check) {
1360      reg_max = AllocTemp();
1361      Load32Disp(rl_obj.reg, count_offset, reg_max);
1362      MarkPossibleNullPointerException(info->opt_flags);
1363    }
1364    Load32Disp(rl_obj.reg, offset_offset, reg_off);
1365    MarkPossibleNullPointerException(info->opt_flags);
1366    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1367    if (range_check) {
1368      // Set up a slow path to allow retry in case of bounds violation */
1369      OpRegReg(kOpCmp, rl_idx.reg, reg_max);
1370      FreeTemp(reg_max);
1371      range_check_branch = OpCondBranch(kCondUge, nullptr);
1372    }
1373    OpRegImm(kOpAdd, reg_ptr, data_offset);
1374  } else {
1375    if (range_check) {
1376      // On x86, we can compare to memory directly
1377      // Set up a launch pad to allow retry in case of bounds violation */
1378      if (rl_idx.is_const) {
1379        range_check_branch = OpCmpMemImmBranch(
1380            kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
1381            mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr);
1382      } else {
1383        OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
1384        range_check_branch = OpCondBranch(kCondUge, nullptr);
1385      }
1386    }
1387    reg_off = AllocTemp();
1388    reg_ptr = AllocTempRef();
1389    Load32Disp(rl_obj.reg, offset_offset, reg_off);
1390    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1391  }
1392  if (rl_idx.is_const) {
1393    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1394  } else {
1395    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
1396  }
1397  FreeTemp(rl_obj.reg);
1398  if (rl_idx.location == kLocPhysReg) {
1399    FreeTemp(rl_idx.reg);
1400  }
1401  RegLocation rl_dest = InlineTarget(info);
1402  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1403  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1404    LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
1405  } else {
1406    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
1407  }
1408  FreeTemp(reg_off);
1409  FreeTemp(reg_ptr);
1410  StoreValue(rl_dest, rl_result);
1411  if (range_check) {
1412    DCHECK(range_check_branch != nullptr);
1413    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1414    AddIntrinsicSlowPath(info, range_check_branch);
1415  }
1416  return true;
1417}
1418
1419// Generates an inlined String.is_empty or String.length.
1420bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1421  if (cu_->instruction_set == kMips) {
1422    // TODO - add Mips implementation
1423    return false;
1424  }
1425  // dst = src.length();
1426  RegLocation rl_obj = info->args[0];
1427  rl_obj = LoadValue(rl_obj, kRefReg);
1428  RegLocation rl_dest = InlineTarget(info);
1429  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1430  GenNullCheck(rl_obj.reg, info->opt_flags);
1431  Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1432  MarkPossibleNullPointerException(info->opt_flags);
1433  if (is_empty) {
1434    // dst = (dst == 0);
1435    if (cu_->instruction_set == kThumb2) {
1436      RegStorage t_reg = AllocTemp();
1437      OpRegReg(kOpNeg, t_reg, rl_result.reg);
1438      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1439    } else if (cu_->instruction_set == kArm64) {
1440      OpRegImm(kOpSub, rl_result.reg, 1);
1441      OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31);
1442    } else {
1443      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1444      OpRegImm(kOpSub, rl_result.reg, 1);
1445      OpRegImm(kOpLsr, rl_result.reg, 31);
1446    }
1447  }
1448  StoreValue(rl_dest, rl_result);
1449  return true;
1450}
1451
1452bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1453  if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
1454    // TODO - add Mips implementation; Enable Arm64.
1455    return false;
1456  }
1457  RegLocation rl_src_i = info->args[0];
1458  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1459  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1460  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1461  if (size == k64) {
1462    if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
1463      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
1464      StoreValueWide(rl_dest, rl_result);
1465      return true;
1466    }
1467    RegStorage r_i_low = rl_i.reg.GetLow();
1468    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1469      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1470      r_i_low = AllocTemp();
1471      OpRegCopy(r_i_low, rl_i.reg);
1472    }
1473    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1474    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1475    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1476      FreeTemp(r_i_low);
1477    }
1478    StoreValueWide(rl_dest, rl_result);
1479  } else {
1480    DCHECK(size == k32 || size == kSignedHalf);
1481    OpKind op = (size == k32) ? kOpRev : kOpRevsh;
1482    OpRegReg(op, rl_result.reg, rl_i.reg);
1483    StoreValue(rl_dest, rl_result);
1484  }
1485  return true;
1486}
1487
1488bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1489  if (cu_->instruction_set == kMips) {
1490    // TODO - add Mips implementation
1491    return false;
1492  }
1493  RegLocation rl_src = info->args[0];
1494  rl_src = LoadValue(rl_src, kCoreReg);
1495  RegLocation rl_dest = InlineTarget(info);
1496  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1497  RegStorage sign_reg = AllocTemp();
1498  // abs(x) = y<=x>>31, (x+y)^y.
1499  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1500  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1501  OpRegReg(kOpXor, rl_result.reg, sign_reg);
1502  StoreValue(rl_dest, rl_result);
1503  return true;
1504}
1505
1506bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1507  if (cu_->instruction_set == kMips) {
1508    // TODO - add Mips implementation
1509    return false;
1510  }
1511  RegLocation rl_src = info->args[0];
1512  rl_src = LoadValueWide(rl_src, kCoreReg);
1513  RegLocation rl_dest = InlineTargetWide(info);
1514  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1515
1516  // If on x86 or if we would clobber a register needed later, just copy the source first.
1517  if (cu_->instruction_set != kX86_64 &&
1518      (cu_->instruction_set == kX86 ||
1519       rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) {
1520    OpRegCopyWide(rl_result.reg, rl_src.reg);
1521    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1522        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1523        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1524        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1525      // Reuse source registers to avoid running out of temps.
1526      FreeTemp(rl_src.reg);
1527    }
1528    rl_src = rl_result;
1529  }
1530
1531  // abs(x) = y<=x>>31, (x+y)^y.
1532  RegStorage sign_reg;
1533  if (cu_->instruction_set == kX86_64) {
1534    sign_reg = AllocTempWide();
1535    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
1536    OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1537    OpRegReg(kOpXor, rl_result.reg, sign_reg);
1538  } else {
1539    sign_reg = AllocTemp();
1540    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1541    OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1542    OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1543    OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1544    OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1545  }
1546  FreeTemp(sign_reg);
1547  StoreValueWide(rl_dest, rl_result);
1548  return true;
1549}
1550
1551bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
1552  if (cu_->instruction_set == kMips) {
1553    // TODO - add Mips implementation
1554    return false;
1555  }
1556  RegLocation rl_src = info->args[0];
1557  rl_src = LoadValue(rl_src, kCoreReg);
1558  RegLocation rl_dest = InlineTarget(info);
1559  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1560  OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
1561  StoreValue(rl_dest, rl_result);
1562  return true;
1563}
1564
1565bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1566  // Currently implemented only for ARM64
1567  return false;
1568}
1569
1570bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
1571  // Currently implemented only for ARM64
1572  return false;
1573}
1574
1575bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
1576  if (cu_->instruction_set == kMips) {
1577    // TODO - add Mips implementation
1578    return false;
1579  }
1580  RegLocation rl_src = info->args[0];
1581  rl_src = LoadValueWide(rl_src, kCoreReg);
1582  RegLocation rl_dest = InlineTargetWide(info);
1583  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1584
1585  OpRegCopyWide(rl_result.reg, rl_src.reg);
1586  OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
1587  StoreValueWide(rl_dest, rl_result);
1588  return true;
1589}
1590
1591bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1592  if (cu_->instruction_set == kMips) {
1593    // TODO - add Mips implementation
1594    return false;
1595  }
1596  RegLocation rl_src = info->args[0];
1597  RegLocation rl_dest = InlineTarget(info);
1598  StoreValue(rl_dest, rl_src);
1599  return true;
1600}
1601
1602bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1603  if (cu_->instruction_set == kMips) {
1604    // TODO - add Mips implementation
1605    return false;
1606  }
1607  RegLocation rl_src = info->args[0];
1608  RegLocation rl_dest = InlineTargetWide(info);
1609  StoreValueWide(rl_dest, rl_src);
1610  return true;
1611}
1612
1613bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1614  return false;
1615}
1616
1617
1618/*
1619 * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1620 * otherwise bails to standard library code.
1621 */
1622bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1623  if (cu_->instruction_set == kMips) {
1624    // TODO - add Mips implementation
1625    return false;
1626  }
1627  if (cu_->instruction_set == kX86_64) {
1628    // TODO - add kX86_64 implementation
1629    return false;
1630  }
1631  RegLocation rl_obj = info->args[0];
1632  RegLocation rl_char = info->args[1];
1633  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1634    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1635    return false;
1636  }
1637
1638  ClobberCallerSave();
1639  LockCallTemps();  // Using fixed registers
1640  RegStorage reg_ptr = TargetRefReg(kArg0);
1641  RegStorage reg_char = TargetReg(kArg1, false);
1642  RegStorage reg_start = TargetReg(kArg2, false);
1643
1644  LoadValueDirectFixed(rl_obj, reg_ptr);
1645  LoadValueDirectFixed(rl_char, reg_char);
1646  if (zero_based) {
1647    LoadConstant(reg_start, 0);
1648  } else {
1649    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1650    LoadValueDirectFixed(rl_start, reg_start);
1651  }
1652  RegStorage r_tgt = cu_->target64 ?
1653      LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pIndexOf)) :
1654      LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pIndexOf));
1655  GenExplicitNullCheck(reg_ptr, info->opt_flags);
1656  LIR* high_code_point_branch =
1657      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1658  // NOTE: not a safepoint
1659  OpReg(kOpBlx, r_tgt);
1660  if (!rl_char.is_const) {
1661    // Add the slow path for code points beyond 0xFFFF.
1662    DCHECK(high_code_point_branch != nullptr);
1663    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1664    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1665    AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
1666  } else {
1667    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1668    DCHECK(high_code_point_branch == nullptr);
1669  }
1670  RegLocation rl_return = GetReturn(kCoreReg);
1671  RegLocation rl_dest = InlineTarget(info);
1672  StoreValue(rl_dest, rl_return);
1673  return true;
1674}
1675
1676/* Fast string.compareTo(Ljava/lang/string;)I. */
1677bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1678  if (cu_->instruction_set == kMips) {
1679    // TODO - add Mips implementation
1680    return false;
1681  }
1682  ClobberCallerSave();
1683  LockCallTemps();  // Using fixed registers
1684  RegStorage reg_this = TargetRefReg(kArg0);
1685  RegStorage reg_cmp = TargetRefReg(kArg1);
1686
1687  RegLocation rl_this = info->args[0];
1688  RegLocation rl_cmp = info->args[1];
1689  LoadValueDirectFixed(rl_this, reg_this);
1690  LoadValueDirectFixed(rl_cmp, reg_cmp);
1691  RegStorage r_tgt;
1692  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1693    if (cu_->target64) {
1694      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
1695    } else {
1696      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1697    }
1698  } else {
1699    r_tgt = RegStorage::InvalidReg();
1700  }
1701  GenExplicitNullCheck(reg_this, info->opt_flags);
1702  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1703  // TUNING: check if rl_cmp.s_reg_low is already null checked
1704  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1705  AddIntrinsicSlowPath(info, cmp_null_check_branch);
1706  // NOTE: not a safepoint
1707  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1708    OpReg(kOpBlx, r_tgt);
1709  } else {
1710    if (cu_->target64) {
1711      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
1712    } else {
1713      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1714    }
1715  }
1716  RegLocation rl_return = GetReturn(kCoreReg);
1717  RegLocation rl_dest = InlineTarget(info);
1718  StoreValue(rl_dest, rl_return);
1719  return true;
1720}
1721
1722bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1723  RegLocation rl_dest = InlineTarget(info);
1724
1725  // Early exit if the result is unused.
1726  if (rl_dest.orig_sreg < 0) {
1727    return true;
1728  }
1729
1730  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1731
1732  switch (cu_->instruction_set) {
1733    case kArm:
1734      // Fall-through.
1735    case kThumb2:
1736      // Fall-through.
1737    case kMips:
1738      Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
1739      break;
1740
1741    case kArm64:
1742      LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
1743                  kNotVolatile);
1744      break;
1745
1746    case kX86:
1747      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
1748                                                          Thread::PeerOffset<4>());
1749      break;
1750
1751    case kX86_64:
1752      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
1753                                                          Thread::PeerOffset<8>());
1754      break;
1755
1756    default:
1757      LOG(FATAL) << "Unexpected isa " << cu_->instruction_set;
1758  }
1759  StoreValue(rl_dest, rl_result);
1760  return true;
1761}
1762
1763bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1764                                  bool is_long, bool is_volatile) {
1765  if (cu_->instruction_set == kMips) {
1766    // TODO - add Mips implementation
1767    return false;
1768  }
1769  // Unused - RegLocation rl_src_unsafe = info->args[0];
1770  RegLocation rl_src_obj = info->args[1];  // Object
1771  RegLocation rl_src_offset = info->args[2];  // long low
1772  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1773  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1774
1775  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1776  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1777  RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
1778  if (is_long) {
1779    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1780        || cu_->instruction_set == kArm64) {
1781      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
1782    } else {
1783      RegStorage rl_temp_offset = AllocTemp();
1784      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1785      LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
1786      FreeTemp(rl_temp_offset);
1787    }
1788  } else {
1789    if (rl_result.ref) {
1790      LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
1791    } else {
1792      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
1793    }
1794  }
1795
1796  if (is_volatile) {
1797    // Without context sensitive analysis, we must issue the most conservative barriers.
1798    // In this case, either a load or store may follow so we issue both barriers.
1799    GenMemBarrier(kLoadLoad);
1800    GenMemBarrier(kLoadStore);
1801  }
1802
1803  if (is_long) {
1804    StoreValueWide(rl_dest, rl_result);
1805  } else {
1806    StoreValue(rl_dest, rl_result);
1807  }
1808  return true;
1809}
1810
1811bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1812                                  bool is_object, bool is_volatile, bool is_ordered) {
1813  if (cu_->instruction_set == kMips) {
1814    // TODO - add Mips implementation
1815    return false;
1816  }
1817  // Unused - RegLocation rl_src_unsafe = info->args[0];
1818  RegLocation rl_src_obj = info->args[1];  // Object
1819  RegLocation rl_src_offset = info->args[2];  // long low
1820  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1821  RegLocation rl_src_value = info->args[4];  // value to store
1822  if (is_volatile || is_ordered) {
1823    // There might have been a store before this volatile one so insert StoreStore barrier.
1824    GenMemBarrier(kStoreStore);
1825  }
1826  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1827  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1828  RegLocation rl_value;
1829  if (is_long) {
1830    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1831    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1832        || cu_->instruction_set == kArm64) {
1833      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
1834    } else {
1835      RegStorage rl_temp_offset = AllocTemp();
1836      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1837      StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
1838      FreeTemp(rl_temp_offset);
1839    }
1840  } else {
1841    rl_value = LoadValue(rl_src_value);
1842    if (rl_value.ref) {
1843      StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
1844    } else {
1845      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
1846    }
1847  }
1848
1849  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1850  FreeTemp(rl_offset.reg);
1851
1852  if (is_volatile) {
1853    // A load might follow the volatile store so insert a StoreLoad barrier.
1854    GenMemBarrier(kStoreLoad);
1855  }
1856  if (is_object) {
1857    MarkGCCard(rl_value.reg, rl_object.reg);
1858  }
1859  return true;
1860}
1861
1862void Mir2Lir::GenInvoke(CallInfo* info) {
1863  if ((info->opt_flags & MIR_INLINED) != 0) {
1864    // Already inlined but we may still need the null check.
1865    if (info->type != kStatic &&
1866        ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
1867         (info->opt_flags & MIR_IGNORE_NULL_CHECK) == 0))  {
1868      RegLocation rl_obj = LoadValue(info->args[0], kRefReg);
1869      GenNullCheck(rl_obj.reg);
1870    }
1871    return;
1872  }
1873  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1874  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1875      ->GenIntrinsic(this, info)) {
1876    return;
1877  }
1878  GenInvokeNoInline(info);
1879}
1880
1881template <size_t pointer_size>
1882static LIR* GenInvokeNoInlineCall(Mir2Lir* mir_to_lir, InvokeType type) {
1883  ThreadOffset<pointer_size> trampoline(-1);
1884  switch (type) {
1885    case kInterface:
1886      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeInterfaceTrampolineWithAccessCheck);
1887      break;
1888    case kDirect:
1889      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeDirectTrampolineWithAccessCheck);
1890      break;
1891    case kStatic:
1892      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeStaticTrampolineWithAccessCheck);
1893      break;
1894    case kSuper:
1895      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeSuperTrampolineWithAccessCheck);
1896      break;
1897    case kVirtual:
1898      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeVirtualTrampolineWithAccessCheck);
1899      break;
1900    default:
1901      LOG(FATAL) << "Unexpected invoke type";
1902  }
1903  return mir_to_lir->OpThreadMem(kOpBlx, trampoline);
1904}
1905
1906void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1907  int call_state = 0;
1908  LIR* null_ck;
1909  LIR** p_null_ck = NULL;
1910  NextCallInsn next_call_insn;
1911  FlushAllRegs();  /* Everything to home location */
1912  // Explicit register usage
1913  LockCallTemps();
1914
1915  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1916  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1917  BeginInvoke(info);
1918  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1919  info->type = static_cast<InvokeType>(method_info.GetSharpType());
1920  bool fast_path = method_info.FastPath();
1921  bool skip_this;
1922  if (info->type == kInterface) {
1923    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1924    skip_this = fast_path;
1925  } else if (info->type == kDirect) {
1926    if (fast_path) {
1927      p_null_ck = &null_ck;
1928    }
1929    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1930    skip_this = false;
1931  } else if (info->type == kStatic) {
1932    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1933    skip_this = false;
1934  } else if (info->type == kSuper) {
1935    DCHECK(!fast_path);  // Fast path is a direct call.
1936    next_call_insn = NextSuperCallInsnSP;
1937    skip_this = false;
1938  } else {
1939    DCHECK_EQ(info->type, kVirtual);
1940    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1941    skip_this = fast_path;
1942  }
1943  MethodReference target_method = method_info.GetTargetMethod();
1944  if (!info->is_range) {
1945    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1946                                      next_call_insn, target_method, method_info.VTableIndex(),
1947                                      method_info.DirectCode(), method_info.DirectMethod(),
1948                                      original_type, skip_this);
1949  } else {
1950    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1951                                    next_call_insn, target_method, method_info.VTableIndex(),
1952                                    method_info.DirectCode(), method_info.DirectMethod(),
1953                                    original_type, skip_this);
1954  }
1955  // Finish up any of the call sequence not interleaved in arg loading
1956  while (call_state >= 0) {
1957    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1958                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
1959  }
1960  LIR* call_inst;
1961  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1962    call_inst = OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
1963  } else {
1964    if (fast_path) {
1965      if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
1966        // We can have the linker fixup a call relative.
1967        call_inst =
1968          reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type);
1969      } else {
1970        call_inst = OpMem(kOpBlx, TargetRefReg(kArg0),
1971                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
1972      }
1973    } else {
1974      // TODO: Extract?
1975      if (cu_->target64) {
1976        call_inst = GenInvokeNoInlineCall<8>(this, info->type);
1977      } else {
1978        call_inst = GenInvokeNoInlineCall<4>(this, info->type);
1979      }
1980    }
1981  }
1982  EndInvoke(info);
1983  MarkSafepointPC(call_inst);
1984
1985  ClobberCallerSave();
1986  if (info->result.location != kLocInvalid) {
1987    // We have a following MOVE_RESULT - do it now.
1988    if (info->result.wide) {
1989      RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
1990      StoreValueWide(info->result, ret_loc);
1991    } else {
1992      RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
1993      StoreValue(info->result, ret_loc);
1994    }
1995  }
1996}
1997
1998}  // namespace art
1999