gen_invoke.cc revision f096aad9203d7c50b2f9cbe1c1215a50c265a059
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
21#include "dex_file-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "invoke_type.h"
24#include "mirror/array.h"
25#include "mirror/string.h"
26#include "mir_to_lir-inl.h"
27#include "x86/codegen_x86.h"
28
29namespace art {
30
31/*
32 * This source files contains "gen" codegen routines that should
33 * be applicable to most targets.  Only mid-level support utilities
34 * and "op" calls may be used here.
35 */
36
37/*
38 * To save scheduling time, helper calls are broken into two parts: generation of
39 * the helper target address, and the actuall call to the helper.  Because x86
40 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
41 * load arguments between the two parts.
42 */
43int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) {
44  return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
45}
46
47/* NOTE: if r_tgt is a temp, it will be freed following use */
48LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc) {
49  LIR* call_inst;
50  if (cu_->instruction_set == kX86) {
51    call_inst = OpThreadMem(kOpBlx, helper_offset);
52  } else {
53    call_inst = OpReg(kOpBlx, r_tgt);
54    FreeTemp(r_tgt);
55  }
56  if (safepoint_pc) {
57    MarkSafepointPC(call_inst);
58  }
59  return call_inst;
60}
61
62void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
63  int r_tgt = CallHelperSetup(helper_offset);
64  LoadConstant(TargetReg(kArg0), arg0);
65  ClobberCallerSave();
66  CallHelper(r_tgt, helper_offset, safepoint_pc);
67}
68
69void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
70  int r_tgt = CallHelperSetup(helper_offset);
71  OpRegCopy(TargetReg(kArg0), arg0);
72  ClobberCallerSave();
73  CallHelper(r_tgt, helper_offset, safepoint_pc);
74}
75
76void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
77                                           bool safepoint_pc) {
78  int r_tgt = CallHelperSetup(helper_offset);
79  if (arg0.wide == 0) {
80    LoadValueDirectFixed(arg0, TargetReg(kArg0));
81  } else {
82    LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
83  }
84  ClobberCallerSave();
85  CallHelper(r_tgt, helper_offset, safepoint_pc);
86}
87
88void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
89                                      bool safepoint_pc) {
90  int r_tgt = CallHelperSetup(helper_offset);
91  LoadConstant(TargetReg(kArg0), arg0);
92  LoadConstant(TargetReg(kArg1), arg1);
93  ClobberCallerSave();
94  CallHelper(r_tgt, helper_offset, safepoint_pc);
95}
96
97void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
98                                              RegLocation arg1, bool safepoint_pc) {
99  int r_tgt = CallHelperSetup(helper_offset);
100  if (arg1.wide == 0) {
101    LoadValueDirectFixed(arg1, TargetReg(kArg1));
102  } else {
103    LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
104  }
105  LoadConstant(TargetReg(kArg0), arg0);
106  ClobberCallerSave();
107  CallHelper(r_tgt, helper_offset, safepoint_pc);
108}
109
110void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1,
111                                              bool safepoint_pc) {
112  int r_tgt = CallHelperSetup(helper_offset);
113  LoadValueDirectFixed(arg0, TargetReg(kArg0));
114  LoadConstant(TargetReg(kArg1), arg1);
115  ClobberCallerSave();
116  CallHelper(r_tgt, helper_offset, safepoint_pc);
117}
118
119void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
120                                      bool safepoint_pc) {
121  int r_tgt = CallHelperSetup(helper_offset);
122  OpRegCopy(TargetReg(kArg1), arg1);
123  LoadConstant(TargetReg(kArg0), arg0);
124  ClobberCallerSave();
125  CallHelper(r_tgt, helper_offset, safepoint_pc);
126}
127
128void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
129                                      bool safepoint_pc) {
130  int r_tgt = CallHelperSetup(helper_offset);
131  OpRegCopy(TargetReg(kArg0), arg0);
132  LoadConstant(TargetReg(kArg1), arg1);
133  ClobberCallerSave();
134  CallHelper(r_tgt, helper_offset, safepoint_pc);
135}
136
137void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
138  int r_tgt = CallHelperSetup(helper_offset);
139  LoadCurrMethodDirect(TargetReg(kArg1));
140  LoadConstant(TargetReg(kArg0), arg0);
141  ClobberCallerSave();
142  CallHelper(r_tgt, helper_offset, safepoint_pc);
143}
144
145void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
146  int r_tgt = CallHelperSetup(helper_offset);
147  DCHECK_NE(TargetReg(kArg1), arg0);
148  if (TargetReg(kArg0) != arg0) {
149    OpRegCopy(TargetReg(kArg0), arg0);
150  }
151  LoadCurrMethodDirect(TargetReg(kArg1));
152  ClobberCallerSave();
153  CallHelper(r_tgt, helper_offset, safepoint_pc);
154}
155
156void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0,
157                                                    RegLocation arg2, bool safepoint_pc) {
158  int r_tgt = CallHelperSetup(helper_offset);
159  DCHECK_NE(TargetReg(kArg1), arg0);
160  if (TargetReg(kArg0) != arg0) {
161    OpRegCopy(TargetReg(kArg0), arg0);
162  }
163  LoadCurrMethodDirect(TargetReg(kArg1));
164  LoadValueDirectFixed(arg2, TargetReg(kArg2));
165  ClobberCallerSave();
166  CallHelper(r_tgt, helper_offset, safepoint_pc);
167}
168
169void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0,
170                                                      RegLocation arg1, bool safepoint_pc) {
171  int r_tgt = CallHelperSetup(helper_offset);
172  if (arg0.wide == 0) {
173    LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
174    if (arg1.wide == 0) {
175      if (cu_->instruction_set == kMips) {
176        LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
177      } else {
178        LoadValueDirectFixed(arg1, TargetReg(kArg1));
179      }
180    } else {
181      if (cu_->instruction_set == kMips) {
182        LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
183      } else {
184        LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
185      }
186    }
187  } else {
188    LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
189    if (arg1.wide == 0) {
190      LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
191    } else {
192      LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
193    }
194  }
195  ClobberCallerSave();
196  CallHelper(r_tgt, helper_offset, safepoint_pc);
197}
198
199void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
200                                      bool safepoint_pc) {
201  int r_tgt = CallHelperSetup(helper_offset);
202  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
203  OpRegCopy(TargetReg(kArg0), arg0);
204  OpRegCopy(TargetReg(kArg1), arg1);
205  ClobberCallerSave();
206  CallHelper(r_tgt, helper_offset, safepoint_pc);
207}
208
209void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
210                                         int arg2, bool safepoint_pc) {
211  int r_tgt = CallHelperSetup(helper_offset);
212  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
213  OpRegCopy(TargetReg(kArg0), arg0);
214  OpRegCopy(TargetReg(kArg1), arg1);
215  LoadConstant(TargetReg(kArg2), arg2);
216  ClobberCallerSave();
217  CallHelper(r_tgt, helper_offset, safepoint_pc);
218}
219
220void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset,
221                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
222  int r_tgt = CallHelperSetup(helper_offset);
223  LoadValueDirectFixed(arg2, TargetReg(kArg2));
224  LoadCurrMethodDirect(TargetReg(kArg1));
225  LoadConstant(TargetReg(kArg0), arg0);
226  ClobberCallerSave();
227  CallHelper(r_tgt, helper_offset, safepoint_pc);
228}
229
230void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0,
231                                            int arg2, bool safepoint_pc) {
232  int r_tgt = CallHelperSetup(helper_offset);
233  LoadCurrMethodDirect(TargetReg(kArg1));
234  LoadConstant(TargetReg(kArg2), arg2);
235  LoadConstant(TargetReg(kArg0), arg0);
236  ClobberCallerSave();
237  CallHelper(r_tgt, helper_offset, safepoint_pc);
238}
239
240void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
241                                                         int arg0, RegLocation arg1,
242                                                         RegLocation arg2, bool safepoint_pc) {
243  int r_tgt = CallHelperSetup(helper_offset);
244  DCHECK_EQ(arg1.wide, 0U);
245  LoadValueDirectFixed(arg1, TargetReg(kArg1));
246  if (arg2.wide == 0) {
247    LoadValueDirectFixed(arg2, TargetReg(kArg2));
248  } else {
249    LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
250  }
251  LoadConstant(TargetReg(kArg0), arg0);
252  ClobberCallerSave();
253  CallHelper(r_tgt, helper_offset, safepoint_pc);
254}
255
256void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset helper_offset,
257                                                                 RegLocation arg0, RegLocation arg1,
258                                                                 RegLocation arg2,
259                                                                 bool safepoint_pc) {
260  int r_tgt = CallHelperSetup(helper_offset);
261  DCHECK_EQ(arg0.wide, 0U);
262  LoadValueDirectFixed(arg0, TargetReg(kArg0));
263  DCHECK_EQ(arg1.wide, 0U);
264  LoadValueDirectFixed(arg1, TargetReg(kArg1));
265  DCHECK_EQ(arg1.wide, 0U);
266  LoadValueDirectFixed(arg2, TargetReg(kArg2));
267  ClobberCallerSave();
268  CallHelper(r_tgt, helper_offset, safepoint_pc);
269}
270
271/*
272 * If there are any ins passed in registers that have not been promoted
273 * to a callee-save register, flush them to the frame.  Perform intial
274 * assignment of promoted arguments.
275 *
276 * ArgLocs is an array of location records describing the incoming arguments
277 * with one location record per word of argument.
278 */
279void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
280  /*
281   * Dummy up a RegLocation for the incoming Method*
282   * It will attempt to keep kArg0 live (or copy it to home location
283   * if promoted).
284   */
285  RegLocation rl_src = rl_method;
286  rl_src.location = kLocPhysReg;
287  rl_src.reg = RegStorage(RegStorage::k32BitSolo, TargetReg(kArg0));
288  rl_src.home = false;
289  MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
290  StoreValue(rl_method, rl_src);
291  // If Method* has been promoted, explicitly flush
292  if (rl_method.location == kLocPhysReg) {
293    StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
294  }
295
296  if (cu_->num_ins == 0) {
297    return;
298  }
299
300  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
301  /*
302   * Copy incoming arguments to their proper home locations.
303   * NOTE: an older version of dx had an issue in which
304   * it would reuse static method argument registers.
305   * This could result in the same Dalvik virtual register
306   * being promoted to both core and fp regs. To account for this,
307   * we only copy to the corresponding promoted physical register
308   * if it matches the type of the SSA name for the incoming
309   * argument.  It is also possible that long and double arguments
310   * end up half-promoted.  In those cases, we must flush the promoted
311   * half to memory as well.
312   */
313  for (int i = 0; i < cu_->num_ins; i++) {
314    PromotionMap* v_map = &promotion_map_[start_vreg + i];
315    int reg = GetArgMappingToPhysicalReg(i);
316
317    if (reg != INVALID_REG) {
318      // If arriving in register
319      bool need_flush = true;
320      RegLocation* t_loc = &ArgLocs[i];
321      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
322        OpRegCopy(v_map->core_reg, reg);
323        need_flush = false;
324      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
325        OpRegCopy(v_map->FpReg, reg);
326        need_flush = false;
327      } else {
328        need_flush = true;
329      }
330
331      // For wide args, force flush if not fully promoted
332      if (t_loc->wide) {
333        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
334        // Is only half promoted?
335        need_flush |= (p_map->core_location != v_map->core_location) ||
336            (p_map->fp_location != v_map->fp_location);
337        if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
338          /*
339           * In Arm, a double is represented as a pair of consecutive single float
340           * registers starting at an even number.  It's possible that both Dalvik vRegs
341           * representing the incoming double were independently promoted as singles - but
342           * not in a form usable as a double.  If so, we need to flush - even though the
343           * incoming arg appears fully in register.  At this point in the code, both
344           * halves of the double are promoted.  Make sure they are in a usable form.
345           */
346          int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
347          int low_reg = promotion_map_[lowreg_index].FpReg;
348          int high_reg = promotion_map_[lowreg_index + 1].FpReg;
349          if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
350            need_flush = true;
351          }
352        }
353      }
354      if (need_flush) {
355        StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kWord);
356      }
357    } else {
358      // If arriving in frame & promoted
359      if (v_map->core_location == kLocPhysReg) {
360        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
361                     v_map->core_reg);
362      }
363      if (v_map->fp_location == kLocPhysReg) {
364        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
365                     v_map->FpReg);
366      }
367    }
368  }
369}
370
371/*
372 * Bit of a hack here - in the absence of a real scheduling pass,
373 * emit the next instruction in static & direct invoke sequences.
374 */
375static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
376                          int state, const MethodReference& target_method,
377                          uint32_t unused,
378                          uintptr_t direct_code, uintptr_t direct_method,
379                          InvokeType type) {
380  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
381  if (direct_code != 0 && direct_method != 0) {
382    switch (state) {
383    case 0:  // Get the current Method* [sets kArg0]
384      if (direct_code != static_cast<unsigned int>(-1)) {
385        if (cu->instruction_set != kX86) {
386          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
387        }
388      } else if (cu->instruction_set != kX86) {
389        CHECK_EQ(cu->dex_file, target_method.dex_file);
390        cg->LoadCodeAddress(target_method.dex_method_index, type, kInvokeTgt);
391      }
392      if (direct_method != static_cast<unsigned int>(-1)) {
393        cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
394      } else {
395        CHECK_EQ(cu->dex_file, target_method.dex_file);
396        cg->LoadMethodAddress(target_method.dex_method_index, type, kArg0);
397      }
398      break;
399    default:
400      return -1;
401    }
402  } else {
403    switch (state) {
404    case 0:  // Get the current Method* [sets kArg0]
405      // TUNING: we can save a reg copy if Method* has been promoted.
406      cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
407      break;
408    case 1:  // Get method->dex_cache_resolved_methods_
409      cg->LoadWordDisp(cg->TargetReg(kArg0),
410        mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
411      // Set up direct code if known.
412      if (direct_code != 0) {
413        if (direct_code != static_cast<unsigned int>(-1)) {
414          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
415        } else if (cu->instruction_set != kX86) {
416          CHECK_EQ(cu->dex_file, target_method.dex_file);
417          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
418          cg->LoadCodeAddress(target_method.dex_method_index, type, kInvokeTgt);
419        }
420      }
421      break;
422    case 2:  // Grab target method*
423      CHECK_EQ(cu->dex_file, target_method.dex_file);
424      cg->LoadWordDisp(cg->TargetReg(kArg0),
425                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
426                           (target_method.dex_method_index * 4),
427                       cg-> TargetReg(kArg0));
428      break;
429    case 3:  // Grab the code from the method*
430      if (cu->instruction_set != kX86) {
431        if (direct_code == 0) {
432          cg->LoadWordDisp(cg->TargetReg(kArg0),
433                           mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
434                           cg->TargetReg(kInvokeTgt));
435        }
436        break;
437      }
438      // Intentional fallthrough for x86
439    default:
440      return -1;
441    }
442  }
443  return state + 1;
444}
445
446/*
447 * Bit of a hack here - in the absence of a real scheduling pass,
448 * emit the next instruction in a virtual invoke sequence.
449 * We can use kLr as a temp prior to target address loading
450 * Note also that we'll load the first argument ("this") into
451 * kArg1 here rather than the standard LoadArgRegs.
452 */
453static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
454                         int state, const MethodReference& target_method,
455                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
456                         InvokeType unused3) {
457  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
458  /*
459   * This is the fast path in which the target virtual method is
460   * fully resolved at compile time.
461   */
462  switch (state) {
463    case 0: {  // Get "this" [set kArg1]
464      RegLocation  rl_arg = info->args[0];
465      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
466      break;
467    }
468    case 1:  // Is "this" null? [use kArg1]
469      cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
470      // get this->klass_ [use kArg1, set kInvokeTgt]
471      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
472                       cg->TargetReg(kInvokeTgt));
473      break;
474    case 2:  // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
475      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
476                       cg->TargetReg(kInvokeTgt));
477      break;
478    case 3:  // Get target method [use kInvokeTgt, set kArg0]
479      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
480                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
481                       cg->TargetReg(kArg0));
482      break;
483    case 4:  // Get the compiled code address [uses kArg0, sets kInvokeTgt]
484      if (cu->instruction_set != kX86) {
485        cg->LoadWordDisp(cg->TargetReg(kArg0),
486                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
487                         cg->TargetReg(kInvokeTgt));
488        break;
489      }
490      // Intentional fallthrough for X86
491    default:
492      return -1;
493  }
494  return state + 1;
495}
496
497/*
498 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
499 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
500 * more than one interface method map to the same index. Note also that we'll load the first
501 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
502 */
503static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
504                                 const MethodReference& target_method,
505                                 uint32_t method_idx, uintptr_t unused,
506                                 uintptr_t direct_method, InvokeType unused2) {
507  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
508
509  switch (state) {
510    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
511      CHECK_EQ(cu->dex_file, target_method.dex_file);
512      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
513      cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index);
514      if (cu->instruction_set == kX86) {
515        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg));
516      }
517      break;
518    case 1: {  // Get "this" [set kArg1]
519      RegLocation  rl_arg = info->args[0];
520      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
521      break;
522    }
523    case 2:  // Is "this" null? [use kArg1]
524      cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
525      // Get this->klass_ [use kArg1, set kInvokeTgt]
526      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
527                       cg->TargetReg(kInvokeTgt));
528      break;
529    case 3:  // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
530      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
531                       cg->TargetReg(kInvokeTgt));
532      break;
533    case 4:  // Get target method [use kInvokeTgt, set kArg0]
534      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) +
535                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
536                       cg->TargetReg(kArg0));
537      break;
538    case 5:  // Get the compiled code address [use kArg0, set kInvokeTgt]
539      if (cu->instruction_set != kX86) {
540        cg->LoadWordDisp(cg->TargetReg(kArg0),
541                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
542                         cg->TargetReg(kInvokeTgt));
543        break;
544      }
545      // Intentional fallthrough for X86
546    default:
547      return -1;
548  }
549  return state + 1;
550}
551
552static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline,
553                            int state, const MethodReference& target_method,
554                            uint32_t method_idx) {
555  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
556  /*
557   * This handles the case in which the base method is not fully
558   * resolved at compile time, we bail to a runtime helper.
559   */
560  if (state == 0) {
561    if (cu->instruction_set != kX86) {
562      // Load trampoline target
563      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
564    }
565    // Load kArg0 with method index
566    CHECK_EQ(cu->dex_file, target_method.dex_file);
567    cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
568    return 1;
569  }
570  return -1;
571}
572
573static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
574                                int state,
575                                const MethodReference& target_method,
576                                uint32_t unused, uintptr_t unused2,
577                                uintptr_t unused3, InvokeType unused4) {
578  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
579  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
580}
581
582static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
583                                const MethodReference& target_method,
584                                uint32_t unused, uintptr_t unused2,
585                                uintptr_t unused3, InvokeType unused4) {
586  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
587  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
588}
589
590static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
591                               const MethodReference& target_method,
592                               uint32_t unused, uintptr_t unused2,
593                               uintptr_t unused3, InvokeType unused4) {
594  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
595  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
596}
597
598static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
599                           const MethodReference& target_method,
600                           uint32_t unused, uintptr_t unused2,
601                           uintptr_t unused3, InvokeType unused4) {
602  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
603  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
604}
605
606static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
607                                                CallInfo* info, int state,
608                                                const MethodReference& target_method,
609                                                uint32_t unused, uintptr_t unused2,
610                                                uintptr_t unused3, InvokeType unused4) {
611  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
612  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
613}
614
615int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
616                         NextCallInsn next_call_insn,
617                         const MethodReference& target_method,
618                         uint32_t vtable_idx, uintptr_t direct_code,
619                         uintptr_t direct_method, InvokeType type, bool skip_this) {
620  int last_arg_reg = TargetReg(kArg3);
621  int next_reg = TargetReg(kArg1);
622  int next_arg = 0;
623  if (skip_this) {
624    next_reg++;
625    next_arg++;
626  }
627  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
628    RegLocation rl_arg = info->args[next_arg++];
629    rl_arg = UpdateRawLoc(rl_arg);
630    if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
631      LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
632      next_reg++;
633      next_arg++;
634    } else {
635      if (rl_arg.wide) {
636        rl_arg.wide = false;
637        rl_arg.is_const = false;
638      }
639      LoadValueDirectFixed(rl_arg, next_reg);
640    }
641    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
642                                direct_code, direct_method, type);
643  }
644  return call_state;
645}
646
647/*
648 * Load up to 5 arguments, the first three of which will be in
649 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
650 * and as part of the load sequence, it must be replaced with
651 * the target method pointer.  Note, this may also be called
652 * for "range" variants if the number of arguments is 5 or fewer.
653 */
654int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
655                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
656                                  const MethodReference& target_method,
657                                  uint32_t vtable_idx, uintptr_t direct_code,
658                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
659  RegLocation rl_arg;
660
661  /* If no arguments, just return */
662  if (info->num_arg_words == 0)
663    return call_state;
664
665  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
666                              direct_code, direct_method, type);
667
668  DCHECK_LE(info->num_arg_words, 5);
669  if (info->num_arg_words > 3) {
670    int32_t next_use = 3;
671    // Detect special case of wide arg spanning arg3/arg4
672    RegLocation rl_use0 = info->args[0];
673    RegLocation rl_use1 = info->args[1];
674    RegLocation rl_use2 = info->args[2];
675    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
676      rl_use2.wide) {
677      int reg = -1;
678      // Wide spans, we need the 2nd half of uses[2].
679      rl_arg = UpdateLocWide(rl_use2);
680      if (rl_arg.location == kLocPhysReg) {
681        reg = rl_arg.reg.GetHighReg();
682      } else {
683        // kArg2 & rArg3 can safely be used here
684        reg = TargetReg(kArg3);
685        LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
686        call_state = next_call_insn(cu_, info, call_state, target_method,
687                                    vtable_idx, direct_code, direct_method, type);
688      }
689      StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
690      StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
691      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
692                                  direct_code, direct_method, type);
693      next_use++;
694    }
695    // Loop through the rest
696    while (next_use < info->num_arg_words) {
697      int low_reg;
698      int high_reg = -1;
699      rl_arg = info->args[next_use];
700      rl_arg = UpdateRawLoc(rl_arg);
701      if (rl_arg.location == kLocPhysReg) {
702        low_reg = rl_arg.reg.GetReg();
703        if (rl_arg.wide) {
704          high_reg = rl_arg.reg.GetHighReg();
705        }
706      } else {
707        low_reg = TargetReg(kArg2);
708        if (rl_arg.wide) {
709          high_reg = TargetReg(kArg3);
710          LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
711        } else {
712          LoadValueDirectFixed(rl_arg, low_reg);
713        }
714        call_state = next_call_insn(cu_, info, call_state, target_method,
715                                    vtable_idx, direct_code, direct_method, type);
716      }
717      int outs_offset = (next_use + 1) * 4;
718      if (rl_arg.wide) {
719        StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
720        next_use += 2;
721      } else {
722        StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
723        next_use++;
724      }
725      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
726                               direct_code, direct_method, type);
727    }
728  }
729
730  call_state = LoadArgRegs(info, call_state, next_call_insn,
731                           target_method, vtable_idx, direct_code, direct_method,
732                           type, skip_this);
733
734  if (pcrLabel) {
735    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
736  }
737  return call_state;
738}
739
740/*
741 * May have 0+ arguments (also used for jumbo).  Note that
742 * source virtual registers may be in physical registers, so may
743 * need to be flushed to home location before copying.  This
744 * applies to arg3 and above (see below).
745 *
746 * Two general strategies:
747 *    If < 20 arguments
748 *       Pass args 3-18 using vldm/vstm block copy
749 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
750 *    If 20+ arguments
751 *       Pass args arg19+ using memcpy block copy
752 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
753 *
754 */
755int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
756                                LIR** pcrLabel, NextCallInsn next_call_insn,
757                                const MethodReference& target_method,
758                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
759                                InvokeType type, bool skip_this) {
760  // If we can treat it as non-range (Jumbo ops will use range form)
761  if (info->num_arg_words <= 5)
762    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
763                                next_call_insn, target_method, vtable_idx,
764                                direct_code, direct_method, type, skip_this);
765  /*
766   * First load the non-register arguments.  Both forms expect all
767   * of the source arguments to be in their home frame location, so
768   * scan the s_reg names and flush any that have been promoted to
769   * frame backing storage.
770   */
771  // Scan the rest of the args - if in phys_reg flush to memory
772  for (int next_arg = 0; next_arg < info->num_arg_words;) {
773    RegLocation loc = info->args[next_arg];
774    if (loc.wide) {
775      loc = UpdateLocWide(loc);
776      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
777        StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
778                          loc.reg.GetReg(), loc.reg.GetHighReg());
779      }
780      next_arg += 2;
781    } else {
782      loc = UpdateLoc(loc);
783      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
784        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
785                      loc.reg.GetReg(), kWord);
786      }
787      next_arg++;
788    }
789  }
790
791  // Logic below assumes that Method pointer is at offset zero from SP.
792  DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
793
794  // The first 3 arguments are passed via registers.
795  // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
796  // get size of uintptr_t or size of object reference according to model being used.
797  int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
798  int start_offset = SRegOffset(info->args[3].s_reg_low);
799  int regs_left_to_pass_via_stack = info->num_arg_words - 3;
800  DCHECK_GT(regs_left_to_pass_via_stack, 0);
801
802  if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
803    // Use vldm/vstm pair using kArg3 as a temp
804    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
805                             direct_code, direct_method, type);
806    OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
807    LIR* ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack);
808    // TUNING: loosen barrier
809    ld->u.m.def_mask = ENCODE_ALL;
810    SetMemRefType(ld, true /* is_load */, kDalvikReg);
811    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
812                             direct_code, direct_method, type);
813    OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
814    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
815                             direct_code, direct_method, type);
816    LIR* st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack);
817    SetMemRefType(st, false /* is_load */, kDalvikReg);
818    st->u.m.def_mask = ENCODE_ALL;
819    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
820                             direct_code, direct_method, type);
821  } else if (cu_->instruction_set == kX86) {
822    int current_src_offset = start_offset;
823    int current_dest_offset = outs_offset;
824
825    while (regs_left_to_pass_via_stack > 0) {
826      // This is based on the knowledge that the stack itself is 16-byte aligned.
827      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
828      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
829      size_t bytes_to_move;
830
831      /*
832       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
833       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
834       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
835       * We do this because we could potentially do a smaller move to align.
836       */
837      if (regs_left_to_pass_via_stack == 4 ||
838          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
839        // Moving 128-bits via xmm register.
840        bytes_to_move = sizeof(uint32_t) * 4;
841
842        // Allocate a free xmm temp. Since we are working through the calling sequence,
843        // we expect to have an xmm temporary available.
844        int temp = AllocTempDouble();
845        CHECK_GT(temp, 0);
846
847        LIR* ld1 = nullptr;
848        LIR* ld2 = nullptr;
849        LIR* st1 = nullptr;
850        LIR* st2 = nullptr;
851
852        /*
853         * The logic is similar for both loads and stores. If we have 16-byte alignment,
854         * do an aligned move. If we have 8-byte alignment, then do the move in two
855         * parts. This approach prevents possible cache line splits. Finally, fall back
856         * to doing an unaligned move. In most cases we likely won't split the cache
857         * line but we cannot prove it and thus take a conservative approach.
858         */
859        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
860        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
861
862        if (src_is_16b_aligned) {
863          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
864        } else if (src_is_8b_aligned) {
865          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP);
866          ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), kMovHi128FP);
867        } else {
868          ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP);
869        }
870
871        if (dest_is_16b_aligned) {
872          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP);
873        } else if (dest_is_8b_aligned) {
874          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP);
875          st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), temp, kMovHi128FP);
876        } else {
877          st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP);
878        }
879
880        // TODO If we could keep track of aliasing information for memory accesses that are wider
881        // than 64-bit, we wouldn't need to set up a barrier.
882        if (ld1 != nullptr) {
883          if (ld2 != nullptr) {
884            // For 64-bit load we can actually set up the aliasing information.
885            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
886            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
887          } else {
888            // Set barrier for 128-bit load.
889            SetMemRefType(ld1, true /* is_load */, kDalvikReg);
890            ld1->u.m.def_mask = ENCODE_ALL;
891          }
892        }
893        if (st1 != nullptr) {
894          if (st2 != nullptr) {
895            // For 64-bit store we can actually set up the aliasing information.
896            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
897            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
898          } else {
899            // Set barrier for 128-bit store.
900            SetMemRefType(st1, false /* is_load */, kDalvikReg);
901            st1->u.m.def_mask = ENCODE_ALL;
902          }
903        }
904
905        // Free the temporary used for the data movement.
906        FreeTemp(temp);
907      } else {
908        // Moving 32-bits via general purpose register.
909        bytes_to_move = sizeof(uint32_t);
910
911        // Instead of allocating a new temp, simply reuse one of the registers being used
912        // for argument passing.
913        int temp = TargetReg(kArg3);
914
915        // Now load the argument VR and store to the outs.
916        LoadWordDisp(TargetReg(kSp), current_src_offset, temp);
917        StoreWordDisp(TargetReg(kSp), current_dest_offset, temp);
918      }
919
920      current_src_offset += bytes_to_move;
921      current_dest_offset += bytes_to_move;
922      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
923    }
924  } else {
925    // Generate memcpy
926    OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
927    OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
928    CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
929                               TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
930  }
931
932  call_state = LoadArgRegs(info, call_state, next_call_insn,
933                           target_method, vtable_idx, direct_code, direct_method,
934                           type, skip_this);
935
936  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
937                           direct_code, direct_method, type);
938  if (pcrLabel) {
939    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
940  }
941  return call_state;
942}
943
944RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
945  RegLocation res;
946  if (info->result.location == kLocInvalid) {
947    res = GetReturn(false);
948  } else {
949    res = info->result;
950  }
951  return res;
952}
953
954RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
955  RegLocation res;
956  if (info->result.location == kLocInvalid) {
957    res = GetReturnWide(false);
958  } else {
959    res = info->result;
960  }
961  return res;
962}
963
964bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
965  if (cu_->instruction_set == kMips) {
966    // TODO - add Mips implementation
967    return false;
968  }
969  // Location of reference to data array
970  int value_offset = mirror::String::ValueOffset().Int32Value();
971  // Location of count
972  int count_offset = mirror::String::CountOffset().Int32Value();
973  // Starting offset within data array
974  int offset_offset = mirror::String::OffsetOffset().Int32Value();
975  // Start of char data with array_
976  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
977
978  RegLocation rl_obj = info->args[0];
979  RegLocation rl_idx = info->args[1];
980  rl_obj = LoadValue(rl_obj, kCoreReg);
981  // X86 wants to avoid putting a constant index into a register.
982  if (!(cu_->instruction_set == kX86 && rl_idx.is_const)) {
983    rl_idx = LoadValue(rl_idx, kCoreReg);
984  }
985  int reg_max;
986  GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
987  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
988  LIR* launch_pad = NULL;
989  int reg_off = INVALID_REG;
990  int reg_ptr = INVALID_REG;
991  if (cu_->instruction_set != kX86) {
992    reg_off = AllocTemp();
993    reg_ptr = AllocTemp();
994    if (range_check) {
995      reg_max = AllocTemp();
996      LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max);
997    }
998    LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
999    LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
1000    if (range_check) {
1001      // Set up a launch pad to allow retry in case of bounds violation */
1002      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
1003      intrinsic_launchpads_.Insert(launch_pad);
1004      OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max);
1005      FreeTemp(reg_max);
1006      OpCondBranch(kCondUge, launch_pad);
1007    }
1008    OpRegImm(kOpAdd, reg_ptr, data_offset);
1009  } else {
1010    if (range_check) {
1011      // On x86, we can compare to memory directly
1012      // Set up a launch pad to allow retry in case of bounds violation */
1013      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
1014      intrinsic_launchpads_.Insert(launch_pad);
1015      if (rl_idx.is_const) {
1016        OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
1017                          mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad);
1018      } else {
1019        OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset);
1020        OpCondBranch(kCondUge, launch_pad);
1021      }
1022    }
1023    reg_off = AllocTemp();
1024    reg_ptr = AllocTemp();
1025    LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
1026    LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
1027  }
1028  if (rl_idx.is_const) {
1029    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1030  } else {
1031    OpRegReg(kOpAdd, reg_off, rl_idx.reg.GetReg());
1032  }
1033  FreeTemp(rl_obj.reg.GetReg());
1034  if (rl_idx.location == kLocPhysReg) {
1035    FreeTemp(rl_idx.reg.GetReg());
1036  }
1037  RegLocation rl_dest = InlineTarget(info);
1038  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1039  if (cu_->instruction_set != kX86) {
1040    LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg.GetReg(), 1, kUnsignedHalf);
1041  } else {
1042    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg.GetReg(),
1043                        INVALID_REG, kUnsignedHalf, INVALID_SREG);
1044  }
1045  FreeTemp(reg_off);
1046  FreeTemp(reg_ptr);
1047  StoreValue(rl_dest, rl_result);
1048  if (range_check) {
1049    launch_pad->operands[2] = 0;  // no resumption
1050  }
1051  // Record that we've already inlined & null checked
1052  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1053  return true;
1054}
1055
1056// Generates an inlined String.is_empty or String.length.
1057bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1058  if (cu_->instruction_set == kMips) {
1059    // TODO - add Mips implementation
1060    return false;
1061  }
1062  // dst = src.length();
1063  RegLocation rl_obj = info->args[0];
1064  rl_obj = LoadValue(rl_obj, kCoreReg);
1065  RegLocation rl_dest = InlineTarget(info);
1066  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1067  GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
1068  LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(), rl_result.reg.GetReg());
1069  if (is_empty) {
1070    // dst = (dst == 0);
1071    if (cu_->instruction_set == kThumb2) {
1072      int t_reg = AllocTemp();
1073      OpRegReg(kOpNeg, t_reg, rl_result.reg.GetReg());
1074      OpRegRegReg(kOpAdc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), t_reg);
1075    } else {
1076      DCHECK_EQ(cu_->instruction_set, kX86);
1077      OpRegImm(kOpSub, rl_result.reg.GetReg(), 1);
1078      OpRegImm(kOpLsr, rl_result.reg.GetReg(), 31);
1079    }
1080  }
1081  StoreValue(rl_dest, rl_result);
1082  return true;
1083}
1084
1085bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1086  if (cu_->instruction_set == kMips) {
1087    // TODO - add Mips implementation
1088    return false;
1089  }
1090  RegLocation rl_src_i = info->args[0];
1091  RegLocation rl_dest = (size == kLong) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1092  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1093  if (size == kLong) {
1094    RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
1095    int r_i_low = rl_i.reg.GetReg();
1096    if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
1097      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1098      r_i_low = AllocTemp();
1099      OpRegCopy(r_i_low, rl_i.reg.GetReg());
1100    }
1101    OpRegReg(kOpRev, rl_result.reg.GetReg(), rl_i.reg.GetHighReg());
1102    OpRegReg(kOpRev, rl_result.reg.GetHighReg(), r_i_low);
1103    if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
1104      FreeTemp(r_i_low);
1105    }
1106    StoreValueWide(rl_dest, rl_result);
1107  } else {
1108    DCHECK(size == kWord || size == kSignedHalf);
1109    OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
1110    RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
1111    OpRegReg(op, rl_result.reg.GetReg(), rl_i.reg.GetReg());
1112    StoreValue(rl_dest, rl_result);
1113  }
1114  return true;
1115}
1116
1117bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1118  if (cu_->instruction_set == kMips) {
1119    // TODO - add Mips implementation
1120    return false;
1121  }
1122  RegLocation rl_src = info->args[0];
1123  rl_src = LoadValue(rl_src, kCoreReg);
1124  RegLocation rl_dest = InlineTarget(info);
1125  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1126  int sign_reg = AllocTemp();
1127  // abs(x) = y<=x>>31, (x+y)^y.
1128  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetReg(), 31);
1129  OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
1130  OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
1131  StoreValue(rl_dest, rl_result);
1132  return true;
1133}
1134
1135bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1136  if (cu_->instruction_set == kMips) {
1137    // TODO - add Mips implementation
1138    return false;
1139  }
1140  if (cu_->instruction_set == kThumb2) {
1141    RegLocation rl_src = info->args[0];
1142    rl_src = LoadValueWide(rl_src, kCoreReg);
1143    RegLocation rl_dest = InlineTargetWide(info);
1144    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1145    int sign_reg = AllocTemp();
1146    // abs(x) = y<=x>>31, (x+y)^y.
1147    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHighReg(), 31);
1148    OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
1149    OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), sign_reg);
1150    OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
1151    OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
1152    StoreValueWide(rl_dest, rl_result);
1153    return true;
1154  } else {
1155    DCHECK_EQ(cu_->instruction_set, kX86);
1156    // Reuse source registers to avoid running out of temps
1157    RegLocation rl_src = info->args[0];
1158    rl_src = LoadValueWide(rl_src, kCoreReg);
1159    RegLocation rl_dest = InlineTargetWide(info);
1160    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1161    OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
1162    FreeTemp(rl_src.reg.GetReg());
1163    FreeTemp(rl_src.reg.GetHighReg());
1164    int sign_reg = AllocTemp();
1165    // abs(x) = y<=x>>31, (x+y)^y.
1166    OpRegRegImm(kOpAsr, sign_reg, rl_result.reg.GetHighReg(), 31);
1167    OpRegReg(kOpAdd, rl_result.reg.GetReg(), sign_reg);
1168    OpRegReg(kOpAdc, rl_result.reg.GetHighReg(), sign_reg);
1169    OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
1170    OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
1171    StoreValueWide(rl_dest, rl_result);
1172    return true;
1173  }
1174}
1175
1176bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
1177  if (cu_->instruction_set == kMips) {
1178    // TODO - add Mips implementation
1179    return false;
1180  }
1181  RegLocation rl_src = info->args[0];
1182  rl_src = LoadValue(rl_src, kCoreReg);
1183  RegLocation rl_dest = InlineTarget(info);
1184  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1185  int signMask = AllocTemp();
1186  LoadConstant(signMask, 0x7fffffff);
1187  OpRegRegReg(kOpAnd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), signMask);
1188  FreeTemp(signMask);
1189  StoreValue(rl_dest, rl_result);
1190  return true;
1191}
1192
1193bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
1194  if (cu_->instruction_set == kMips) {
1195    // TODO - add Mips implementation
1196    return false;
1197  }
1198  RegLocation rl_src = info->args[0];
1199  rl_src = LoadValueWide(rl_src, kCoreReg);
1200  RegLocation rl_dest = InlineTargetWide(info);
1201  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1202  OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
1203  FreeTemp(rl_src.reg.GetReg());
1204  FreeTemp(rl_src.reg.GetHighReg());
1205  int signMask = AllocTemp();
1206  LoadConstant(signMask, 0x7fffffff);
1207  OpRegReg(kOpAnd, rl_result.reg.GetHighReg(), signMask);
1208  FreeTemp(signMask);
1209  StoreValueWide(rl_dest, rl_result);
1210  return true;
1211}
1212
1213bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1214  if (cu_->instruction_set == kMips) {
1215    // TODO - add Mips implementation
1216    return false;
1217  }
1218  RegLocation rl_src = info->args[0];
1219  RegLocation rl_dest = InlineTarget(info);
1220  StoreValue(rl_dest, rl_src);
1221  return true;
1222}
1223
1224bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1225  if (cu_->instruction_set == kMips) {
1226    // TODO - add Mips implementation
1227    return false;
1228  }
1229  RegLocation rl_src = info->args[0];
1230  RegLocation rl_dest = InlineTargetWide(info);
1231  StoreValueWide(rl_dest, rl_src);
1232  return true;
1233}
1234
1235/*
1236 * Fast string.index_of(I) & (II).  Tests for simple case of char <= 0xffff,
1237 * otherwise bails to standard library code.
1238 */
1239bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1240  if (cu_->instruction_set == kMips) {
1241    // TODO - add Mips implementation
1242    return false;
1243  }
1244  ClobberCallerSave();
1245  LockCallTemps();  // Using fixed registers
1246  int reg_ptr = TargetReg(kArg0);
1247  int reg_char = TargetReg(kArg1);
1248  int reg_start = TargetReg(kArg2);
1249
1250  RegLocation rl_obj = info->args[0];
1251  RegLocation rl_char = info->args[1];
1252  LoadValueDirectFixed(rl_obj, reg_ptr);
1253  LoadValueDirectFixed(rl_char, reg_char);
1254  if (zero_based) {
1255    LoadConstant(reg_start, 0);
1256  } else {
1257    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1258    LoadValueDirectFixed(rl_start, reg_start);
1259  }
1260  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf));
1261  GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
1262  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
1263  intrinsic_launchpads_.Insert(launch_pad);
1264  OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
1265  // NOTE: not a safepoint
1266  OpReg(kOpBlx, r_tgt);
1267  LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1268  launch_pad->operands[2] = WrapPointer(resume_tgt);
1269  // Record that we've already inlined & null checked
1270  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1271  RegLocation rl_return = GetReturn(false);
1272  RegLocation rl_dest = InlineTarget(info);
1273  StoreValue(rl_dest, rl_return);
1274  return true;
1275}
1276
1277/* Fast string.compareTo(Ljava/lang/string;)I. */
1278bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1279  if (cu_->instruction_set == kMips) {
1280    // TODO - add Mips implementation
1281    return false;
1282  }
1283  ClobberCallerSave();
1284  LockCallTemps();  // Using fixed registers
1285  int reg_this = TargetReg(kArg0);
1286  int reg_cmp = TargetReg(kArg1);
1287
1288  RegLocation rl_this = info->args[0];
1289  RegLocation rl_cmp = info->args[1];
1290  LoadValueDirectFixed(rl_this, reg_this);
1291  LoadValueDirectFixed(rl_cmp, reg_cmp);
1292  int r_tgt = (cu_->instruction_set != kX86) ?
1293      LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
1294  GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
1295  // TUNING: check if rl_cmp.s_reg_low is already null checked
1296  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
1297  intrinsic_launchpads_.Insert(launch_pad);
1298  OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
1299  // NOTE: not a safepoint
1300  if (cu_->instruction_set != kX86) {
1301    OpReg(kOpBlx, r_tgt);
1302  } else {
1303    OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
1304  }
1305  launch_pad->operands[2] = 0;  // No return possible
1306  // Record that we've already inlined & null checked
1307  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1308  RegLocation rl_return = GetReturn(false);
1309  RegLocation rl_dest = InlineTarget(info);
1310  StoreValue(rl_dest, rl_return);
1311  return true;
1312}
1313
1314bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1315  RegLocation rl_dest = InlineTarget(info);
1316  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1317  ThreadOffset offset = Thread::PeerOffset();
1318  if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
1319    LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg.GetReg());
1320  } else {
1321    CHECK(cu_->instruction_set == kX86);
1322    reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
1323  }
1324  StoreValue(rl_dest, rl_result);
1325  return true;
1326}
1327
1328bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1329                                  bool is_long, bool is_volatile) {
1330  if (cu_->instruction_set == kMips) {
1331    // TODO - add Mips implementation
1332    return false;
1333  }
1334  // Unused - RegLocation rl_src_unsafe = info->args[0];
1335  RegLocation rl_src_obj = info->args[1];  // Object
1336  RegLocation rl_src_offset = info->args[2];  // long low
1337  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
1338  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1339  if (is_volatile) {
1340    GenMemBarrier(kLoadLoad);
1341  }
1342  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1343  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1344  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1345  if (is_long) {
1346    OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
1347    LoadBaseDispWide(rl_object.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
1348    StoreValueWide(rl_dest, rl_result);
1349  } else {
1350    LoadBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_result.reg.GetReg(), 0, kWord);
1351    StoreValue(rl_dest, rl_result);
1352  }
1353  return true;
1354}
1355
1356bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1357                                  bool is_object, bool is_volatile, bool is_ordered) {
1358  if (cu_->instruction_set == kMips) {
1359    // TODO - add Mips implementation
1360    return false;
1361  }
1362  // Unused - RegLocation rl_src_unsafe = info->args[0];
1363  RegLocation rl_src_obj = info->args[1];  // Object
1364  RegLocation rl_src_offset = info->args[2];  // long low
1365  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
1366  RegLocation rl_src_value = info->args[4];  // value to store
1367  if (is_volatile || is_ordered) {
1368    GenMemBarrier(kStoreStore);
1369  }
1370  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1371  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1372  RegLocation rl_value;
1373  if (is_long) {
1374    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1375    OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
1376    StoreBaseDispWide(rl_object.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
1377  } else {
1378    rl_value = LoadValue(rl_src_value, kCoreReg);
1379    StoreBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_value.reg.GetReg(), 0, kWord);
1380  }
1381
1382  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1383  FreeTemp(rl_offset.reg.GetReg());
1384  if (is_volatile) {
1385    GenMemBarrier(kStoreLoad);
1386  }
1387  if (is_object) {
1388    MarkGCCard(rl_value.reg.GetReg(), rl_object.reg.GetReg());
1389  }
1390  return true;
1391}
1392
1393void Mir2Lir::GenInvoke(CallInfo* info) {
1394  if (!(info->opt_flags & MIR_INLINED)) {
1395    DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1396    if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1397        ->GenIntrinsic(this, info)) {
1398      return;
1399    }
1400  }
1401  int call_state = 0;
1402  LIR* null_ck;
1403  LIR** p_null_ck = NULL;
1404  NextCallInsn next_call_insn;
1405  FlushAllRegs();  /* Everything to home location */
1406  // Explicit register usage
1407  LockCallTemps();
1408
1409  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1410  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1411  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1412  info->type = static_cast<InvokeType>(method_info.GetSharpType());
1413  bool fast_path = method_info.FastPath();
1414  bool skip_this;
1415  if (info->type == kInterface) {
1416    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1417    skip_this = fast_path;
1418  } else if (info->type == kDirect) {
1419    if (fast_path) {
1420      p_null_ck = &null_ck;
1421    }
1422    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1423    skip_this = false;
1424  } else if (info->type == kStatic) {
1425    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1426    skip_this = false;
1427  } else if (info->type == kSuper) {
1428    DCHECK(!fast_path);  // Fast path is a direct call.
1429    next_call_insn = NextSuperCallInsnSP;
1430    skip_this = false;
1431  } else {
1432    DCHECK_EQ(info->type, kVirtual);
1433    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1434    skip_this = fast_path;
1435  }
1436  MethodReference target_method = method_info.GetTargetMethod();
1437  if (!info->is_range) {
1438    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1439                                      next_call_insn, target_method, method_info.VTableIndex(),
1440                                      method_info.DirectCode(), method_info.DirectMethod(),
1441                                      original_type, skip_this);
1442  } else {
1443    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1444                                    next_call_insn, target_method, method_info.VTableIndex(),
1445                                    method_info.DirectCode(), method_info.DirectMethod(),
1446                                    original_type, skip_this);
1447  }
1448  // Finish up any of the call sequence not interleaved in arg loading
1449  while (call_state >= 0) {
1450    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1451                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
1452  }
1453  LIR* call_inst;
1454  if (cu_->instruction_set != kX86) {
1455    call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
1456  } else {
1457    if (fast_path) {
1458      if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
1459        // We can have the linker fixup a call relative.
1460        call_inst =
1461          reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(
1462              target_method.dex_method_index, info->type);
1463      } else {
1464        call_inst = OpMem(kOpBlx, TargetReg(kArg0),
1465                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
1466      }
1467    } else {
1468      ThreadOffset trampoline(-1);
1469      switch (info->type) {
1470      case kInterface:
1471        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
1472        break;
1473      case kDirect:
1474        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
1475        break;
1476      case kStatic:
1477        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
1478        break;
1479      case kSuper:
1480        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
1481        break;
1482      case kVirtual:
1483        trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
1484        break;
1485      default:
1486        LOG(FATAL) << "Unexpected invoke type";
1487      }
1488      call_inst = OpThreadMem(kOpBlx, trampoline);
1489    }
1490  }
1491  MarkSafepointPC(call_inst);
1492
1493  ClobberCallerSave();
1494  if (info->result.location != kLocInvalid) {
1495    // We have a following MOVE_RESULT - do it now.
1496    if (info->result.wide) {
1497      RegLocation ret_loc = GetReturnWide(info->result.fp);
1498      StoreValueWide(info->result, ret_loc);
1499    } else {
1500      RegLocation ret_loc = GetReturn(info->result.fp);
1501      StoreValue(info->result, ret_loc);
1502    }
1503  }
1504}
1505
1506}  // namespace art
1507