gen_invoke.cc revision 00e1ec6581b5b7b46ca4c314c2854e9caa647dd2
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
21#include "dex_file-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "invoke_type.h"
24#include "mirror/array.h"
25#include "mirror/string.h"
26#include "mir_to_lir-inl.h"
27#include "x86/codegen_x86.h"
28
29namespace art {
30
31/*
32 * This source files contains "gen" codegen routines that should
33 * be applicable to most targets.  Only mid-level support utilities
34 * and "op" calls may be used here.
35 */
36
37/*
38 * To save scheduling time, helper calls are broken into two parts: generation of
39 * the helper target address, and the actuall call to the helper.  Because x86
40 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
41 * load arguments between the two parts.
42 */
43int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) {
44  return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
45}
46
47/* NOTE: if r_tgt is a temp, it will be freed following use */
48LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc) {
49  LIR* call_inst;
50  if (cu_->instruction_set == kX86) {
51    call_inst = OpThreadMem(kOpBlx, helper_offset);
52  } else {
53    call_inst = OpReg(kOpBlx, r_tgt);
54    FreeTemp(r_tgt);
55  }
56  if (safepoint_pc) {
57    MarkSafepointPC(call_inst);
58  }
59  return call_inst;
60}
61
62void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
63  int r_tgt = CallHelperSetup(helper_offset);
64  LoadConstant(TargetReg(kArg0), arg0);
65  ClobberCallerSave();
66  CallHelper(r_tgt, helper_offset, safepoint_pc);
67}
68
69void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
70  int r_tgt = CallHelperSetup(helper_offset);
71  OpRegCopy(TargetReg(kArg0), arg0);
72  ClobberCallerSave();
73  CallHelper(r_tgt, helper_offset, safepoint_pc);
74}
75
76void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
77                                           bool safepoint_pc) {
78  int r_tgt = CallHelperSetup(helper_offset);
79  if (arg0.wide == 0) {
80    LoadValueDirectFixed(arg0, TargetReg(kArg0));
81  } else {
82    LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
83  }
84  ClobberCallerSave();
85  CallHelper(r_tgt, helper_offset, safepoint_pc);
86}
87
88void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
89                                      bool safepoint_pc) {
90  int r_tgt = CallHelperSetup(helper_offset);
91  LoadConstant(TargetReg(kArg0), arg0);
92  LoadConstant(TargetReg(kArg1), arg1);
93  ClobberCallerSave();
94  CallHelper(r_tgt, helper_offset, safepoint_pc);
95}
96
97void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
98                                              RegLocation arg1, bool safepoint_pc) {
99  int r_tgt = CallHelperSetup(helper_offset);
100  if (arg1.wide == 0) {
101    LoadValueDirectFixed(arg1, TargetReg(kArg1));
102  } else {
103    LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
104  }
105  LoadConstant(TargetReg(kArg0), arg0);
106  ClobberCallerSave();
107  CallHelper(r_tgt, helper_offset, safepoint_pc);
108}
109
110void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1,
111                                              bool safepoint_pc) {
112  int r_tgt = CallHelperSetup(helper_offset);
113  LoadValueDirectFixed(arg0, TargetReg(kArg0));
114  LoadConstant(TargetReg(kArg1), arg1);
115  ClobberCallerSave();
116  CallHelper(r_tgt, helper_offset, safepoint_pc);
117}
118
119void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
120                                      bool safepoint_pc) {
121  int r_tgt = CallHelperSetup(helper_offset);
122  OpRegCopy(TargetReg(kArg1), arg1);
123  LoadConstant(TargetReg(kArg0), arg0);
124  ClobberCallerSave();
125  CallHelper(r_tgt, helper_offset, safepoint_pc);
126}
127
128void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
129                                      bool safepoint_pc) {
130  int r_tgt = CallHelperSetup(helper_offset);
131  OpRegCopy(TargetReg(kArg0), arg0);
132  LoadConstant(TargetReg(kArg1), arg1);
133  ClobberCallerSave();
134  CallHelper(r_tgt, helper_offset, safepoint_pc);
135}
136
137void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
138  int r_tgt = CallHelperSetup(helper_offset);
139  LoadCurrMethodDirect(TargetReg(kArg1));
140  LoadConstant(TargetReg(kArg0), arg0);
141  ClobberCallerSave();
142  CallHelper(r_tgt, helper_offset, safepoint_pc);
143}
144
145void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
146  int r_tgt = CallHelperSetup(helper_offset);
147  DCHECK_NE(TargetReg(kArg1), arg0);
148  if (TargetReg(kArg0) != arg0) {
149    OpRegCopy(TargetReg(kArg0), arg0);
150  }
151  LoadCurrMethodDirect(TargetReg(kArg1));
152  ClobberCallerSave();
153  CallHelper(r_tgt, helper_offset, safepoint_pc);
154}
155
156void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0,
157                                                    RegLocation arg2, bool safepoint_pc) {
158  int r_tgt = CallHelperSetup(helper_offset);
159  DCHECK_NE(TargetReg(kArg1), arg0);
160  if (TargetReg(kArg0) != arg0) {
161    OpRegCopy(TargetReg(kArg0), arg0);
162  }
163  LoadCurrMethodDirect(TargetReg(kArg1));
164  LoadValueDirectFixed(arg2, TargetReg(kArg2));
165  ClobberCallerSave();
166  CallHelper(r_tgt, helper_offset, safepoint_pc);
167}
168
169void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0,
170                                                      RegLocation arg1, bool safepoint_pc) {
171  int r_tgt = CallHelperSetup(helper_offset);
172  if (arg0.wide == 0) {
173    LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
174    if (arg1.wide == 0) {
175      if (cu_->instruction_set == kMips) {
176        LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
177      } else {
178        LoadValueDirectFixed(arg1, TargetReg(kArg1));
179      }
180    } else {
181      if (cu_->instruction_set == kMips) {
182        LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
183      } else {
184        LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
185      }
186    }
187  } else {
188    LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
189    if (arg1.wide == 0) {
190      LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
191    } else {
192      LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
193    }
194  }
195  ClobberCallerSave();
196  CallHelper(r_tgt, helper_offset, safepoint_pc);
197}
198
199void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
200                                      bool safepoint_pc) {
201  int r_tgt = CallHelperSetup(helper_offset);
202  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
203  OpRegCopy(TargetReg(kArg0), arg0);
204  OpRegCopy(TargetReg(kArg1), arg1);
205  ClobberCallerSave();
206  CallHelper(r_tgt, helper_offset, safepoint_pc);
207}
208
209void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
210                                         int arg2, bool safepoint_pc) {
211  int r_tgt = CallHelperSetup(helper_offset);
212  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
213  OpRegCopy(TargetReg(kArg0), arg0);
214  OpRegCopy(TargetReg(kArg1), arg1);
215  LoadConstant(TargetReg(kArg2), arg2);
216  ClobberCallerSave();
217  CallHelper(r_tgt, helper_offset, safepoint_pc);
218}
219
220void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset,
221                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
222  int r_tgt = CallHelperSetup(helper_offset);
223  LoadValueDirectFixed(arg2, TargetReg(kArg2));
224  LoadCurrMethodDirect(TargetReg(kArg1));
225  LoadConstant(TargetReg(kArg0), arg0);
226  ClobberCallerSave();
227  CallHelper(r_tgt, helper_offset, safepoint_pc);
228}
229
230void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0,
231                                            int arg2, bool safepoint_pc) {
232  int r_tgt = CallHelperSetup(helper_offset);
233  LoadCurrMethodDirect(TargetReg(kArg1));
234  LoadConstant(TargetReg(kArg2), arg2);
235  LoadConstant(TargetReg(kArg0), arg0);
236  ClobberCallerSave();
237  CallHelper(r_tgt, helper_offset, safepoint_pc);
238}
239
240void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
241                                                         int arg0, RegLocation arg1,
242                                                         RegLocation arg2, bool safepoint_pc) {
243  int r_tgt = CallHelperSetup(helper_offset);
244  DCHECK_EQ(arg1.wide, 0U);
245  LoadValueDirectFixed(arg1, TargetReg(kArg1));
246  if (arg2.wide == 0) {
247    LoadValueDirectFixed(arg2, TargetReg(kArg2));
248  } else {
249    LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
250  }
251  LoadConstant(TargetReg(kArg0), arg0);
252  ClobberCallerSave();
253  CallHelper(r_tgt, helper_offset, safepoint_pc);
254}
255
256void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset helper_offset,
257                                                                 RegLocation arg0, RegLocation arg1,
258                                                                 RegLocation arg2,
259                                                                 bool safepoint_pc) {
260  int r_tgt = CallHelperSetup(helper_offset);
261  DCHECK_EQ(arg0.wide, 0U);
262  LoadValueDirectFixed(arg0, TargetReg(kArg0));
263  DCHECK_EQ(arg1.wide, 0U);
264  LoadValueDirectFixed(arg1, TargetReg(kArg1));
265  DCHECK_EQ(arg1.wide, 0U);
266  LoadValueDirectFixed(arg2, TargetReg(kArg2));
267  ClobberCallerSave();
268  CallHelper(r_tgt, helper_offset, safepoint_pc);
269}
270
271/*
272 * If there are any ins passed in registers that have not been promoted
273 * to a callee-save register, flush them to the frame.  Perform intial
274 * assignment of promoted arguments.
275 *
276 * ArgLocs is an array of location records describing the incoming arguments
277 * with one location record per word of argument.
278 */
279void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
280  /*
281   * Dummy up a RegLocation for the incoming Method*
282   * It will attempt to keep kArg0 live (or copy it to home location
283   * if promoted).
284   */
285  RegLocation rl_src = rl_method;
286  rl_src.location = kLocPhysReg;
287  rl_src.reg = RegStorage(RegStorage::k32BitSolo, TargetReg(kArg0));
288  rl_src.home = false;
289  MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
290  StoreValue(rl_method, rl_src);
291  // If Method* has been promoted, explicitly flush
292  if (rl_method.location == kLocPhysReg) {
293    StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
294  }
295
296  if (cu_->num_ins == 0) {
297    return;
298  }
299
300  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
301  /*
302   * Copy incoming arguments to their proper home locations.
303   * NOTE: an older version of dx had an issue in which
304   * it would reuse static method argument registers.
305   * This could result in the same Dalvik virtual register
306   * being promoted to both core and fp regs. To account for this,
307   * we only copy to the corresponding promoted physical register
308   * if it matches the type of the SSA name for the incoming
309   * argument.  It is also possible that long and double arguments
310   * end up half-promoted.  In those cases, we must flush the promoted
311   * half to memory as well.
312   */
313  for (int i = 0; i < cu_->num_ins; i++) {
314    PromotionMap* v_map = &promotion_map_[start_vreg + i];
315    int reg = GetArgMappingToPhysicalReg(i);
316
317    if (reg != INVALID_REG) {
318      // If arriving in register
319      bool need_flush = true;
320      RegLocation* t_loc = &ArgLocs[i];
321      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
322        OpRegCopy(v_map->core_reg, reg);
323        need_flush = false;
324      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
325        OpRegCopy(v_map->FpReg, reg);
326        need_flush = false;
327      } else {
328        need_flush = true;
329      }
330
331      // For wide args, force flush if not fully promoted
332      if (t_loc->wide) {
333        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
334        // Is only half promoted?
335        need_flush |= (p_map->core_location != v_map->core_location) ||
336            (p_map->fp_location != v_map->fp_location);
337        if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
338          /*
339           * In Arm, a double is represented as a pair of consecutive single float
340           * registers starting at an even number.  It's possible that both Dalvik vRegs
341           * representing the incoming double were independently promoted as singles - but
342           * not in a form usable as a double.  If so, we need to flush - even though the
343           * incoming arg appears fully in register.  At this point in the code, both
344           * halves of the double are promoted.  Make sure they are in a usable form.
345           */
346          int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
347          int low_reg = promotion_map_[lowreg_index].FpReg;
348          int high_reg = promotion_map_[lowreg_index + 1].FpReg;
349          if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
350            need_flush = true;
351          }
352        }
353      }
354      if (need_flush) {
355        StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kWord);
356      }
357    } else {
358      // If arriving in frame & promoted
359      if (v_map->core_location == kLocPhysReg) {
360        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
361                     v_map->core_reg);
362      }
363      if (v_map->fp_location == kLocPhysReg) {
364        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
365                     v_map->FpReg);
366      }
367    }
368  }
369}
370
371/*
372 * Bit of a hack here - in the absence of a real scheduling pass,
373 * emit the next instruction in static & direct invoke sequences.
374 */
375static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
376                          int state, const MethodReference& target_method,
377                          uint32_t unused,
378                          uintptr_t direct_code, uintptr_t direct_method,
379                          InvokeType type) {
380  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
381  if (direct_code != 0 && direct_method != 0) {
382    switch (state) {
383    case 0:  // Get the current Method* [sets kArg0]
384      if (direct_code != static_cast<unsigned int>(-1)) {
385        if (cu->instruction_set != kX86) {
386          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
387        }
388      } else if (cu->instruction_set != kX86) {
389        CHECK_EQ(cu->dex_file, target_method.dex_file);
390        cg->LoadCodeAddress(target_method.dex_method_index, type, kInvokeTgt);
391      }
392      if (direct_method != static_cast<unsigned int>(-1)) {
393        cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
394      } else {
395        CHECK_EQ(cu->dex_file, target_method.dex_file);
396        cg->LoadMethodAddress(target_method.dex_method_index, type, kArg0);
397      }
398      break;
399    default:
400      return -1;
401    }
402  } else {
403    switch (state) {
404    case 0:  // Get the current Method* [sets kArg0]
405      // TUNING: we can save a reg copy if Method* has been promoted.
406      cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
407      break;
408    case 1:  // Get method->dex_cache_resolved_methods_
409      cg->LoadWordDisp(cg->TargetReg(kArg0),
410        mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
411      // Set up direct code if known.
412      if (direct_code != 0) {
413        if (direct_code != static_cast<unsigned int>(-1)) {
414          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
415        } else if (cu->instruction_set != kX86) {
416          CHECK_EQ(cu->dex_file, target_method.dex_file);
417          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
418          cg->LoadCodeAddress(target_method.dex_method_index, type, kInvokeTgt);
419        }
420      }
421      break;
422    case 2:  // Grab target method*
423      CHECK_EQ(cu->dex_file, target_method.dex_file);
424      cg->LoadWordDisp(cg->TargetReg(kArg0),
425                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
426                           (target_method.dex_method_index * 4),
427                       cg-> TargetReg(kArg0));
428      break;
429    case 3:  // Grab the code from the method*
430      if (cu->instruction_set != kX86) {
431        if (direct_code == 0) {
432          cg->LoadWordDisp(cg->TargetReg(kArg0),
433                           mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
434                           cg->TargetReg(kInvokeTgt));
435        }
436        break;
437      }
438      // Intentional fallthrough for x86
439    default:
440      return -1;
441    }
442  }
443  return state + 1;
444}
445
446/*
447 * Bit of a hack here - in the absence of a real scheduling pass,
448 * emit the next instruction in a virtual invoke sequence.
449 * We can use kLr as a temp prior to target address loading
450 * Note also that we'll load the first argument ("this") into
451 * kArg1 here rather than the standard LoadArgRegs.
452 */
453static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
454                         int state, const MethodReference& target_method,
455                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
456                         InvokeType unused3) {
457  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
458  /*
459   * This is the fast path in which the target virtual method is
460   * fully resolved at compile time.
461   */
462  switch (state) {
463    case 0: {  // Get "this" [set kArg1]
464      RegLocation  rl_arg = info->args[0];
465      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
466      break;
467    }
468    case 1:  // Is "this" null? [use kArg1]
469      cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
470      // get this->klass_ [use kArg1, set kInvokeTgt]
471      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
472                       cg->TargetReg(kInvokeTgt));
473      break;
474    case 2:  // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
475      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
476                       cg->TargetReg(kInvokeTgt));
477      break;
478    case 3:  // Get target method [use kInvokeTgt, set kArg0]
479      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
480                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
481                       cg->TargetReg(kArg0));
482      break;
483    case 4:  // Get the compiled code address [uses kArg0, sets kInvokeTgt]
484      if (cu->instruction_set != kX86) {
485        cg->LoadWordDisp(cg->TargetReg(kArg0),
486                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
487                         cg->TargetReg(kInvokeTgt));
488        break;
489      }
490      // Intentional fallthrough for X86
491    default:
492      return -1;
493  }
494  return state + 1;
495}
496
497/*
498 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
499 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
500 * more than one interface method map to the same index. Note also that we'll load the first
501 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
502 */
503static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
504                                 const MethodReference& target_method,
505                                 uint32_t method_idx, uintptr_t unused,
506                                 uintptr_t direct_method, InvokeType unused2) {
507  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
508
509  switch (state) {
510    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
511      CHECK_EQ(cu->dex_file, target_method.dex_file);
512      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
513      cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index);
514      if (cu->instruction_set == kX86) {
515        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg));
516      }
517      break;
518    case 1: {  // Get "this" [set kArg1]
519      RegLocation  rl_arg = info->args[0];
520      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
521      break;
522    }
523    case 2:  // Is "this" null? [use kArg1]
524      cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
525      // Get this->klass_ [use kArg1, set kInvokeTgt]
526      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
527                       cg->TargetReg(kInvokeTgt));
528      break;
529    case 3:  // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
530      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
531                       cg->TargetReg(kInvokeTgt));
532      break;
533    case 4:  // Get target method [use kInvokeTgt, set kArg0]
534      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) +
535                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
536                       cg->TargetReg(kArg0));
537      break;
538    case 5:  // Get the compiled code address [use kArg0, set kInvokeTgt]
539      if (cu->instruction_set != kX86) {
540        cg->LoadWordDisp(cg->TargetReg(kArg0),
541                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
542                         cg->TargetReg(kInvokeTgt));
543        break;
544      }
545      // Intentional fallthrough for X86
546    default:
547      return -1;
548  }
549  return state + 1;
550}
551
552static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline,
553                            int state, const MethodReference& target_method,
554                            uint32_t method_idx) {
555  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
556  /*
557   * This handles the case in which the base method is not fully
558   * resolved at compile time, we bail to a runtime helper.
559   */
560  if (state == 0) {
561    if (cu->instruction_set != kX86) {
562      // Load trampoline target
563      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
564    }
565    // Load kArg0 with method index
566    CHECK_EQ(cu->dex_file, target_method.dex_file);
567    cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
568    return 1;
569  }
570  return -1;
571}
572
573static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
574                                int state,
575                                const MethodReference& target_method,
576                                uint32_t method_idx,
577                                uintptr_t unused, uintptr_t unused2,
578                                InvokeType unused3) {
579  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
580  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
581}
582
583static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
584                                const MethodReference& target_method,
585                                uint32_t method_idx, uintptr_t unused,
586                                uintptr_t unused2, InvokeType unused3) {
587  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
588  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
589}
590
591static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
592                               const MethodReference& target_method,
593                               uint32_t method_idx, uintptr_t unused,
594                               uintptr_t unused2, InvokeType unused3) {
595  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
596  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
597}
598
599static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
600                           const MethodReference& target_method,
601                           uint32_t method_idx, uintptr_t unused,
602                           uintptr_t unused2, InvokeType unused3) {
603  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
604  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
605}
606
607static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
608                                                CallInfo* info, int state,
609                                                const MethodReference& target_method,
610                                                uint32_t unused,
611                                                uintptr_t unused2, uintptr_t unused3,
612                                                InvokeType unused4) {
613  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
614  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
615}
616
617int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
618                         NextCallInsn next_call_insn,
619                         const MethodReference& target_method,
620                         uint32_t vtable_idx, uintptr_t direct_code,
621                         uintptr_t direct_method, InvokeType type, bool skip_this) {
622  int last_arg_reg = TargetReg(kArg3);
623  int next_reg = TargetReg(kArg1);
624  int next_arg = 0;
625  if (skip_this) {
626    next_reg++;
627    next_arg++;
628  }
629  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
630    RegLocation rl_arg = info->args[next_arg++];
631    rl_arg = UpdateRawLoc(rl_arg);
632    if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
633      LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
634      next_reg++;
635      next_arg++;
636    } else {
637      if (rl_arg.wide) {
638        rl_arg.wide = false;
639        rl_arg.is_const = false;
640      }
641      LoadValueDirectFixed(rl_arg, next_reg);
642    }
643    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
644                                direct_code, direct_method, type);
645  }
646  return call_state;
647}
648
649/*
650 * Load up to 5 arguments, the first three of which will be in
651 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
652 * and as part of the load sequence, it must be replaced with
653 * the target method pointer.  Note, this may also be called
654 * for "range" variants if the number of arguments is 5 or fewer.
655 */
656int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
657                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
658                                  const MethodReference& target_method,
659                                  uint32_t vtable_idx, uintptr_t direct_code,
660                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
661  RegLocation rl_arg;
662
663  /* If no arguments, just return */
664  if (info->num_arg_words == 0)
665    return call_state;
666
667  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
668                              direct_code, direct_method, type);
669
670  DCHECK_LE(info->num_arg_words, 5);
671  if (info->num_arg_words > 3) {
672    int32_t next_use = 3;
673    // Detect special case of wide arg spanning arg3/arg4
674    RegLocation rl_use0 = info->args[0];
675    RegLocation rl_use1 = info->args[1];
676    RegLocation rl_use2 = info->args[2];
677    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
678      rl_use2.wide) {
679      int reg = -1;
680      // Wide spans, we need the 2nd half of uses[2].
681      rl_arg = UpdateLocWide(rl_use2);
682      if (rl_arg.location == kLocPhysReg) {
683        reg = rl_arg.reg.GetHighReg();
684      } else {
685        // kArg2 & rArg3 can safely be used here
686        reg = TargetReg(kArg3);
687        LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
688        call_state = next_call_insn(cu_, info, call_state, target_method,
689                                    vtable_idx, direct_code, direct_method, type);
690      }
691      StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
692      StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
693      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
694                                  direct_code, direct_method, type);
695      next_use++;
696    }
697    // Loop through the rest
698    while (next_use < info->num_arg_words) {
699      int low_reg;
700      int high_reg = -1;
701      rl_arg = info->args[next_use];
702      rl_arg = UpdateRawLoc(rl_arg);
703      if (rl_arg.location == kLocPhysReg) {
704        low_reg = rl_arg.reg.GetReg();
705        if (rl_arg.wide) {
706          high_reg = rl_arg.reg.GetHighReg();
707        }
708      } else {
709        low_reg = TargetReg(kArg2);
710        if (rl_arg.wide) {
711          high_reg = TargetReg(kArg3);
712          LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
713        } else {
714          LoadValueDirectFixed(rl_arg, low_reg);
715        }
716        call_state = next_call_insn(cu_, info, call_state, target_method,
717                                    vtable_idx, direct_code, direct_method, type);
718      }
719      int outs_offset = (next_use + 1) * 4;
720      if (rl_arg.wide) {
721        StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
722        next_use += 2;
723      } else {
724        StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
725        next_use++;
726      }
727      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
728                               direct_code, direct_method, type);
729    }
730  }
731
732  call_state = LoadArgRegs(info, call_state, next_call_insn,
733                           target_method, vtable_idx, direct_code, direct_method,
734                           type, skip_this);
735
736  if (pcrLabel) {
737    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
738  }
739  return call_state;
740}
741
742/*
743 * May have 0+ arguments (also used for jumbo).  Note that
744 * source virtual registers may be in physical registers, so may
745 * need to be flushed to home location before copying.  This
746 * applies to arg3 and above (see below).
747 *
748 * Two general strategies:
749 *    If < 20 arguments
750 *       Pass args 3-18 using vldm/vstm block copy
751 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
752 *    If 20+ arguments
753 *       Pass args arg19+ using memcpy block copy
754 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
755 *
756 */
757int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
758                                LIR** pcrLabel, NextCallInsn next_call_insn,
759                                const MethodReference& target_method,
760                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
761                                InvokeType type, bool skip_this) {
762  // If we can treat it as non-range (Jumbo ops will use range form)
763  if (info->num_arg_words <= 5)
764    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
765                                next_call_insn, target_method, vtable_idx,
766                                direct_code, direct_method, type, skip_this);
767  /*
768   * First load the non-register arguments.  Both forms expect all
769   * of the source arguments to be in their home frame location, so
770   * scan the s_reg names and flush any that have been promoted to
771   * frame backing storage.
772   */
773  // Scan the rest of the args - if in phys_reg flush to memory
774  for (int next_arg = 0; next_arg < info->num_arg_words;) {
775    RegLocation loc = info->args[next_arg];
776    if (loc.wide) {
777      loc = UpdateLocWide(loc);
778      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
779        StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
780                          loc.reg.GetReg(), loc.reg.GetHighReg());
781      }
782      next_arg += 2;
783    } else {
784      loc = UpdateLoc(loc);
785      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
786        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
787                      loc.reg.GetReg(), kWord);
788      }
789      next_arg++;
790    }
791  }
792
793  // Logic below assumes that Method pointer is at offset zero from SP.
794  DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
795
796  // The first 3 arguments are passed via registers.
797  // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
798  // get size of uintptr_t or size of object reference according to model being used.
799  int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
800  int start_offset = SRegOffset(info->args[3].s_reg_low);
801  int regs_left_to_pass_via_stack = info->num_arg_words - 3;
802  DCHECK_GT(regs_left_to_pass_via_stack, 0);
803
804  if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
805    // Use vldm/vstm pair using kArg3 as a temp
806    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
807                             direct_code, direct_method, type);
808    OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
809    LIR* ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack);
810    // TUNING: loosen barrier
811    ld->u.m.def_mask = ENCODE_ALL;
812    SetMemRefType(ld, true /* is_load */, kDalvikReg);
813    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
814                             direct_code, direct_method, type);
815    OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
816    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
817                             direct_code, direct_method, type);
818    LIR* st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack);
819    SetMemRefType(st, false /* is_load */, kDalvikReg);
820    st->u.m.def_mask = ENCODE_ALL;
821    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
822                             direct_code, direct_method, type);
823  } else if (cu_->instruction_set == kX86) {
824    int current_src_offset = start_offset;
825    int current_dest_offset = outs_offset;
826
827    while (regs_left_to_pass_via_stack > 0) {
828      // This is based on the knowledge that the stack itself is 16-byte aligned.
829      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
830      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
831      size_t bytes_to_move;
832
833      /*
834       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
835       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
836       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
837       * We do this because we could potentially do a smaller move to align.
838       */
839      if (regs_left_to_pass_via_stack == 4 ||
840          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
841        // Moving 128-bits via xmm register.
842        bytes_to_move = sizeof(uint32_t) * 4;
843
844        // Allocate a free xmm temp. Since we are working through the calling sequence,
845        // we expect to have an xmm temporary available.
846        int temp = AllocTempDouble();
847        CHECK_GT(temp, 0);
848
849        LIR* ld1 = nullptr;
850        LIR* ld2 = nullptr;
851        LIR* st1 = nullptr;
852        LIR* st2 = nullptr;
853
854        /*
855         * The logic is similar for both loads and stores. If we have 16-byte alignment,
856         * do an aligned move. If we have 8-byte alignment, then do the move in two
857         * parts. This approach prevents possible cache line splits. Finally, fall back
858         * to doing an unaligned move. In most cases we likely won't split the cache
859         * line but we cannot prove it and thus take a conservative approach.
860         */
861        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
862        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
863
864        if (src_is_16b_aligned) {
865          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
866        } else if (src_is_8b_aligned) {
867          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP);
868          ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), kMovHi128FP);
869        } else {
870          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP);
871        }
872
873        if (dest_is_16b_aligned) {
874          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP);
875        } else if (dest_is_8b_aligned) {
876          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP);
877          st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), temp, kMovHi128FP);
878        } else {
879          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP);
880        }
881
882        // TODO If we could keep track of aliasing information for memory accesses that are wider
883        // than 64-bit, we wouldn't need to set up a barrier.
884        if (ld1 != nullptr) {
885          if (ld2 != nullptr) {
886            // For 64-bit load we can actually set up the aliasing information.
887            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
888            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
889          } else {
890            // Set barrier for 128-bit load.
891            SetMemRefType(ld1, true /* is_load */, kDalvikReg);
892            ld1->u.m.def_mask = ENCODE_ALL;
893          }
894        }
895        if (st1 != nullptr) {
896          if (st2 != nullptr) {
897            // For 64-bit store we can actually set up the aliasing information.
898            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
899            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
900          } else {
901            // Set barrier for 128-bit store.
902            SetMemRefType(st1, false /* is_load */, kDalvikReg);
903            st1->u.m.def_mask = ENCODE_ALL;
904          }
905        }
906
907        // Free the temporary used for the data movement.
908        FreeTemp(temp);
909      } else {
910        // Moving 32-bits via general purpose register.
911        bytes_to_move = sizeof(uint32_t);
912
913        // Instead of allocating a new temp, simply reuse one of the registers being used
914        // for argument passing.
915        int temp = TargetReg(kArg3);
916
917        // Now load the argument VR and store to the outs.
918        LoadWordDisp(TargetReg(kSp), current_src_offset, temp);
919        StoreWordDisp(TargetReg(kSp), current_dest_offset, temp);
920      }
921
922      current_src_offset += bytes_to_move;
923      current_dest_offset += bytes_to_move;
924      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
925    }
926  } else {
927    // Generate memcpy
928    OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
929    OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
930    CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
931                               TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
932  }
933
934  call_state = LoadArgRegs(info, call_state, next_call_insn,
935                           target_method, vtable_idx, direct_code, direct_method,
936                           type, skip_this);
937
938  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
939                           direct_code, direct_method, type);
940  if (pcrLabel) {
941    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
942  }
943  return call_state;
944}
945
946RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
947  RegLocation res;
948  if (info->result.location == kLocInvalid) {
949    res = GetReturn(false);
950  } else {
951    res = info->result;
952  }
953  return res;
954}
955
956RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
957  RegLocation res;
958  if (info->result.location == kLocInvalid) {
959    res = GetReturnWide(false);
960  } else {
961    res = info->result;
962  }
963  return res;
964}
965
966bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
967  if (cu_->instruction_set == kMips) {
968    // TODO - add Mips implementation
969    return false;
970  }
971  // Location of reference to data array
972  int value_offset = mirror::String::ValueOffset().Int32Value();
973  // Location of count
974  int count_offset = mirror::String::CountOffset().Int32Value();
975  // Starting offset within data array
976  int offset_offset = mirror::String::OffsetOffset().Int32Value();
977  // Start of char data with array_
978  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
979
980  RegLocation rl_obj = info->args[0];
981  RegLocation rl_idx = info->args[1];
982  rl_obj = LoadValue(rl_obj, kCoreReg);
983  // X86 wants to avoid putting a constant index into a register.
984  if (!(cu_->instruction_set == kX86 && rl_idx.is_const)) {
985    rl_idx = LoadValue(rl_idx, kCoreReg);
986  }
987  int reg_max;
988  GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
989  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
990  LIR* launch_pad = NULL;
991  int reg_off = INVALID_REG;
992  int reg_ptr = INVALID_REG;
993  if (cu_->instruction_set != kX86) {
994    reg_off = AllocTemp();
995    reg_ptr = AllocTemp();
996    if (range_check) {
997      reg_max = AllocTemp();
998      LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max);
999    }
1000    LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
1001    LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
1002    if (range_check) {
1003      // Set up a launch pad to allow retry in case of bounds violation */
1004      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
1005      intrinsic_launchpads_.Insert(launch_pad);
1006      OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max);
1007      FreeTemp(reg_max);
1008      OpCondBranch(kCondUge, launch_pad);
1009    }
1010    OpRegImm(kOpAdd, reg_ptr, data_offset);
1011  } else {
1012    if (range_check) {
1013      // On x86, we can compare to memory directly
1014      // Set up a launch pad to allow retry in case of bounds violation */
1015      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
1016      intrinsic_launchpads_.Insert(launch_pad);
1017      if (rl_idx.is_const) {
1018        OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
1019                          mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad);
1020      } else {
1021        OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset);
1022        OpCondBranch(kCondUge, launch_pad);
1023      }
1024    }
1025    reg_off = AllocTemp();
1026    reg_ptr = AllocTemp();
1027    LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
1028    LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
1029  }
1030  if (rl_idx.is_const) {
1031    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1032  } else {
1033    OpRegReg(kOpAdd, reg_off, rl_idx.reg.GetReg());
1034  }
1035  FreeTemp(rl_obj.reg.GetReg());
1036  if (rl_idx.location == kLocPhysReg) {
1037    FreeTemp(rl_idx.reg.GetReg());
1038  }
1039  RegLocation rl_dest = InlineTarget(info);
1040  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1041  if (cu_->instruction_set != kX86) {
1042    LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg.GetReg(), 1, kUnsignedHalf);
1043  } else {
1044    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg.GetReg(),
1045                        INVALID_REG, kUnsignedHalf, INVALID_SREG);
1046  }
1047  FreeTemp(reg_off);
1048  FreeTemp(reg_ptr);
1049  StoreValue(rl_dest, rl_result);
1050  if (range_check) {
1051    launch_pad->operands[2] = 0;  // no resumption
1052  }
1053  // Record that we've already inlined & null checked
1054  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1055  return true;
1056}
1057
1058// Generates an inlined String.is_empty or String.length.
1059bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1060  if (cu_->instruction_set == kMips) {
1061    // TODO - add Mips implementation
1062    return false;
1063  }
1064  // dst = src.length();
1065  RegLocation rl_obj = info->args[0];
1066  rl_obj = LoadValue(rl_obj, kCoreReg);
1067  RegLocation rl_dest = InlineTarget(info);
1068  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1069  GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
1070  LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(), rl_result.reg.GetReg());
1071  if (is_empty) {
1072    // dst = (dst == 0);
1073    if (cu_->instruction_set == kThumb2) {
1074      int t_reg = AllocTemp();
1075      OpRegReg(kOpNeg, t_reg, rl_result.reg.GetReg());
1076      OpRegRegReg(kOpAdc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), t_reg);
1077    } else {
1078      DCHECK_EQ(cu_->instruction_set, kX86);
1079      OpRegImm(kOpSub, rl_result.reg.GetReg(), 1);
1080      OpRegImm(kOpLsr, rl_result.reg.GetReg(), 31);
1081    }
1082  }
1083  StoreValue(rl_dest, rl_result);
1084  return true;
1085}
1086
1087bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1088  if (cu_->instruction_set == kMips) {
1089    // TODO - add Mips implementation
1090    return false;
1091  }
1092  RegLocation rl_src_i = info->args[0];
1093  RegLocation rl_dest = (size == kLong) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1094  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1095  if (size == kLong) {
1096    RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
1097    int r_i_low = rl_i.reg.GetReg();
1098    if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
1099      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1100      r_i_low = AllocTemp();
1101      OpRegCopy(r_i_low, rl_i.reg.GetReg());
1102    }
1103    OpRegReg(kOpRev, rl_result.reg.GetReg(), rl_i.reg.GetHighReg());
1104    OpRegReg(kOpRev, rl_result.reg.GetHighReg(), r_i_low);
1105    if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
1106      FreeTemp(r_i_low);
1107    }
1108    StoreValueWide(rl_dest, rl_result);
1109  } else {
1110    DCHECK(size == kWord || size == kSignedHalf);
1111    OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
1112    RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
1113    OpRegReg(op, rl_result.reg.GetReg(), rl_i.reg.GetReg());
1114    StoreValue(rl_dest, rl_result);
1115  }
1116  return true;
1117}
1118
1119bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1120  if (cu_->instruction_set == kMips) {
1121    // TODO - add Mips implementation
1122    return false;
1123  }
1124  RegLocation rl_src = info->args[0];
1125  rl_src = LoadValue(rl_src, kCoreReg);
1126  RegLocation rl_dest = InlineTarget(info);
1127  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1128  int sign_reg = AllocTemp();
1129  // abs(x) = y<=x>>31, (x+y)^y.
1130  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetReg(), 31);
1131  OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
1132  OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
1133  StoreValue(rl_dest, rl_result);
1134  return true;
1135}
1136
1137bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1138  if (cu_->instruction_set == kMips) {
1139    // TODO - add Mips implementation
1140    return false;
1141  }
1142  if (cu_->instruction_set == kThumb2) {
1143    RegLocation rl_src = info->args[0];
1144    rl_src = LoadValueWide(rl_src, kCoreReg);
1145    RegLocation rl_dest = InlineTargetWide(info);
1146    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1147    int sign_reg = AllocTemp();
1148    // abs(x) = y<=x>>31, (x+y)^y.
1149    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHighReg(), 31);
1150    OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
1151    OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), sign_reg);
1152    OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
1153    OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
1154    StoreValueWide(rl_dest, rl_result);
1155    return true;
1156  } else {
1157    DCHECK_EQ(cu_->instruction_set, kX86);
1158    // Reuse source registers to avoid running out of temps
1159    RegLocation rl_src = info->args[0];
1160    rl_src = LoadValueWide(rl_src, kCoreReg);
1161    RegLocation rl_dest = InlineTargetWide(info);
1162    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1163    OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
1164    FreeTemp(rl_src.reg.GetReg());
1165    FreeTemp(rl_src.reg.GetHighReg());
1166    int sign_reg = AllocTemp();
1167    // abs(x) = y<=x>>31, (x+y)^y.
1168    OpRegRegImm(kOpAsr, sign_reg, rl_result.reg.GetHighReg(), 31);
1169    OpRegReg(kOpAdd, rl_result.reg.GetReg(), sign_reg);
1170    OpRegReg(kOpAdc, rl_result.reg.GetHighReg(), sign_reg);
1171    OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
1172    OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
1173    StoreValueWide(rl_dest, rl_result);
1174    return true;
1175  }
1176}
1177
1178bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
1179  if (cu_->instruction_set == kMips) {
1180    // TODO - add Mips implementation
1181    return false;
1182  }
1183  RegLocation rl_src = info->args[0];
1184  rl_src = LoadValue(rl_src, kCoreReg);
1185  RegLocation rl_dest = InlineTarget(info);
1186  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1187  int signMask = AllocTemp();
1188  LoadConstant(signMask, 0x7fffffff);
1189  OpRegRegReg(kOpAnd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), signMask);
1190  FreeTemp(signMask);
1191  StoreValue(rl_dest, rl_result);
1192  return true;
1193}
1194
1195bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
1196  if (cu_->instruction_set == kMips) {
1197    // TODO - add Mips implementation
1198    return false;
1199  }
1200  RegLocation rl_src = info->args[0];
1201  rl_src = LoadValueWide(rl_src, kCoreReg);
1202  RegLocation rl_dest = InlineTargetWide(info);
1203  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1204  OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
1205  FreeTemp(rl_src.reg.GetReg());
1206  FreeTemp(rl_src.reg.GetHighReg());
1207  int signMask = AllocTemp();
1208  LoadConstant(signMask, 0x7fffffff);
1209  OpRegReg(kOpAnd, rl_result.reg.GetHighReg(), signMask);
1210  FreeTemp(signMask);
1211  StoreValueWide(rl_dest, rl_result);
1212  return true;
1213}
1214
1215bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1216  if (cu_->instruction_set == kMips) {
1217    // TODO - add Mips implementation
1218    return false;
1219  }
1220  RegLocation rl_src = info->args[0];
1221  RegLocation rl_dest = InlineTarget(info);
1222  StoreValue(rl_dest, rl_src);
1223  return true;
1224}
1225
1226bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1227  if (cu_->instruction_set == kMips) {
1228    // TODO - add Mips implementation
1229    return false;
1230  }
1231  RegLocation rl_src = info->args[0];
1232  RegLocation rl_dest = InlineTargetWide(info);
1233  StoreValueWide(rl_dest, rl_src);
1234  return true;
1235}
1236
1237/*
1238 * Fast string.index_of(I) & (II).  Tests for simple case of char <= 0xffff,
1239 * otherwise bails to standard library code.
1240 */
1241bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1242  if (cu_->instruction_set == kMips) {
1243    // TODO - add Mips implementation
1244    return false;
1245  }
1246  ClobberCallerSave();
1247  LockCallTemps();  // Using fixed registers
1248  int reg_ptr = TargetReg(kArg0);
1249  int reg_char = TargetReg(kArg1);
1250  int reg_start = TargetReg(kArg2);
1251
1252  RegLocation rl_obj = info->args[0];
1253  RegLocation rl_char = info->args[1];
1254  RegLocation rl_start = info->args[2];
1255  LoadValueDirectFixed(rl_obj, reg_ptr);
1256  LoadValueDirectFixed(rl_char, reg_char);
1257  if (zero_based) {
1258    LoadConstant(reg_start, 0);
1259  } else {
1260    LoadValueDirectFixed(rl_start, reg_start);
1261  }
1262  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf));
1263  GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
1264  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
1265  intrinsic_launchpads_.Insert(launch_pad);
1266  OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
1267  // NOTE: not a safepoint
1268  OpReg(kOpBlx, r_tgt);
1269  LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1270  launch_pad->operands[2] = WrapPointer(resume_tgt);
1271  // Record that we've already inlined & null checked
1272  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1273  RegLocation rl_return = GetReturn(false);
1274  RegLocation rl_dest = InlineTarget(info);
1275  StoreValue(rl_dest, rl_return);
1276  return true;
1277}
1278
1279/* Fast string.compareTo(Ljava/lang/string;)I. */
1280bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1281  if (cu_->instruction_set == kMips) {
1282    // TODO - add Mips implementation
1283    return false;
1284  }
1285  ClobberCallerSave();
1286  LockCallTemps();  // Using fixed registers
1287  int reg_this = TargetReg(kArg0);
1288  int reg_cmp = TargetReg(kArg1);
1289
1290  RegLocation rl_this = info->args[0];
1291  RegLocation rl_cmp = info->args[1];
1292  LoadValueDirectFixed(rl_this, reg_this);
1293  LoadValueDirectFixed(rl_cmp, reg_cmp);
1294  int r_tgt = (cu_->instruction_set != kX86) ?
1295      LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
1296  GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
1297  // TUNING: check if rl_cmp.s_reg_low is already null checked
1298  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
1299  intrinsic_launchpads_.Insert(launch_pad);
1300  OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
1301  // NOTE: not a safepoint
1302  if (cu_->instruction_set != kX86) {
1303    OpReg(kOpBlx, r_tgt);
1304  } else {
1305    OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
1306  }
1307  launch_pad->operands[2] = 0;  // No return possible
1308  // Record that we've already inlined & null checked
1309  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1310  RegLocation rl_return = GetReturn(false);
1311  RegLocation rl_dest = InlineTarget(info);
1312  StoreValue(rl_dest, rl_return);
1313  return true;
1314}
1315
1316bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1317  RegLocation rl_dest = InlineTarget(info);
1318  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1319  ThreadOffset offset = Thread::PeerOffset();
1320  if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
1321    LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg.GetReg());
1322  } else {
1323    CHECK(cu_->instruction_set == kX86);
1324    reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
1325  }
1326  StoreValue(rl_dest, rl_result);
1327  return true;
1328}
1329
1330bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1331                                  bool is_long, bool is_volatile) {
1332  if (cu_->instruction_set == kMips) {
1333    // TODO - add Mips implementation
1334    return false;
1335  }
1336  // Unused - RegLocation rl_src_unsafe = info->args[0];
1337  RegLocation rl_src_obj = info->args[1];  // Object
1338  RegLocation rl_src_offset = info->args[2];  // long low
1339  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
1340  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1341  if (is_volatile) {
1342    GenMemBarrier(kLoadLoad);
1343  }
1344  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1345  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1346  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1347  if (is_long) {
1348    OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
1349    LoadBaseDispWide(rl_object.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
1350    StoreValueWide(rl_dest, rl_result);
1351  } else {
1352    LoadBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_result.reg.GetReg(), 0, kWord);
1353    StoreValue(rl_dest, rl_result);
1354  }
1355  return true;
1356}
1357
1358bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1359                                  bool is_object, bool is_volatile, bool is_ordered) {
1360  if (cu_->instruction_set == kMips) {
1361    // TODO - add Mips implementation
1362    return false;
1363  }
1364  // Unused - RegLocation rl_src_unsafe = info->args[0];
1365  RegLocation rl_src_obj = info->args[1];  // Object
1366  RegLocation rl_src_offset = info->args[2];  // long low
1367  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
1368  RegLocation rl_src_value = info->args[4];  // value to store
1369  if (is_volatile || is_ordered) {
1370    GenMemBarrier(kStoreStore);
1371  }
1372  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1373  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1374  RegLocation rl_value;
1375  if (is_long) {
1376    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1377    OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
1378    StoreBaseDispWide(rl_object.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
1379  } else {
1380    rl_value = LoadValue(rl_src_value, kCoreReg);
1381    StoreBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_value.reg.GetReg(), 0, kWord);
1382  }
1383
1384  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1385  FreeTemp(rl_offset.reg.GetReg());
1386  if (is_volatile) {
1387    GenMemBarrier(kStoreLoad);
1388  }
1389  if (is_object) {
1390    MarkGCCard(rl_value.reg.GetReg(), rl_object.reg.GetReg());
1391  }
1392  return true;
1393}
1394
1395void Mir2Lir::GenInvoke(CallInfo* info) {
1396  if (!(info->opt_flags & MIR_INLINED)) {
1397    DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1398    if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1399        ->GenIntrinsic(this, info)) {
1400      return;
1401    }
1402  }
1403  InvokeType original_type = info->type;  // avoiding mutation by ComputeInvokeInfo
1404  int call_state = 0;
1405  LIR* null_ck;
1406  LIR** p_null_ck = NULL;
1407  NextCallInsn next_call_insn;
1408  FlushAllRegs();  /* Everything to home location */
1409  // Explicit register usage
1410  LockCallTemps();
1411
1412  DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
1413  MethodReference target_method(cUnit->GetDexFile(), info->index);
1414  int vtable_idx;
1415  uintptr_t direct_code;
1416  uintptr_t direct_method;
1417  bool skip_this;
1418  bool fast_path =
1419      cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
1420                                              current_dalvik_offset_,
1421                                              true, true,
1422                                              &info->type, &target_method,
1423                                              &vtable_idx,
1424                                              &direct_code, &direct_method) && !SLOW_INVOKE_PATH;
1425  if (info->type == kInterface) {
1426    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1427    skip_this = fast_path;
1428  } else if (info->type == kDirect) {
1429    if (fast_path) {
1430      p_null_ck = &null_ck;
1431    }
1432    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1433    skip_this = false;
1434  } else if (info->type == kStatic) {
1435    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1436    skip_this = false;
1437  } else if (info->type == kSuper) {
1438    DCHECK(!fast_path);  // Fast path is a direct call.
1439    next_call_insn = NextSuperCallInsnSP;
1440    skip_this = false;
1441  } else {
1442    DCHECK_EQ(info->type, kVirtual);
1443    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1444    skip_this = fast_path;
1445  }
1446  if (!info->is_range) {
1447    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1448                                      next_call_insn, target_method,
1449                                      vtable_idx, direct_code, direct_method,
1450                                      original_type, skip_this);
1451  } else {
1452    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1453                                    next_call_insn, target_method, vtable_idx,
1454                                    direct_code, direct_method, original_type,
1455                                    skip_this);
1456  }
1457  // Finish up any of the call sequence not interleaved in arg loading
1458  while (call_state >= 0) {
1459    call_state = next_call_insn(cu_, info, call_state, target_method,
1460                                vtable_idx, direct_code, direct_method,
1461                                original_type);
1462  }
1463  LIR* call_inst;
1464  if (cu_->instruction_set != kX86) {
1465    call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
1466  } else {
1467    if (fast_path) {
1468      if (direct_code == static_cast<unsigned int>(-1)) {
1469        // We can have the linker fixup a call relative.
1470        call_inst =
1471          reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(
1472              target_method.dex_method_index, info->type);
1473      } else {
1474        call_inst = OpMem(kOpBlx, TargetReg(kArg0),
1475                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
1476      }
1477    } else {
1478      ThreadOffset trampoline(-1);
1479      switch (info->type) {
1480      case kInterface:
1481        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
1482        break;
1483      case kDirect:
1484        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
1485        break;
1486      case kStatic:
1487        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
1488        break;
1489      case kSuper:
1490        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
1491        break;
1492      case kVirtual:
1493        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
1494        break;
1495      default:
1496        LOG(FATAL) << "Unexpected invoke type";
1497      }
1498      call_inst = OpThreadMem(kOpBlx, trampoline);
1499    }
1500  }
1501  MarkSafepointPC(call_inst);
1502
1503  ClobberCallerSave();
1504  if (info->result.location != kLocInvalid) {
1505    // We have a following MOVE_RESULT - do it now.
1506    if (info->result.wide) {
1507      RegLocation ret_loc = GetReturnWide(info->result.fp);
1508      StoreValueWide(info->result, ret_loc);
1509    } else {
1510      RegLocation ret_loc = GetReturn(info->result.fp);
1511      StoreValue(info->result, ret_loc);
1512    }
1513  }
1514}
1515
1516}  // namespace art
1517