gen_invoke.cc revision 7934ac288acfb2552bb0b06ec1f61e5820d924a4
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex_file-inl.h"
19#include "invoke_type.h"
20#include "mirror/array.h"
21#include "mirror/string.h"
22#include "mir_to_lir-inl.h"
23#include "oat/runtime/oat_support_entrypoints.h"
24#include "x86/codegen_x86.h"
25
26namespace art {
27
28/*
29 * This source files contains "gen" codegen routines that should
30 * be applicable to most targets.  Only mid-level support utilities
31 * and "op" calls may be used here.
32 */
33
34/*
35 * To save scheduling time, helper calls are broken into two parts: generation of
36 * the helper target address, and the actuall call to the helper.  Because x86
37 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
38 * load arguments between the two parts.
39 */
40int Mir2Lir::CallHelperSetup(int helper_offset) {
41  return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
42}
43
44/* NOTE: if r_tgt is a temp, it will be freed following use */
45LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) {
46  LIR* call_inst;
47  if (cu_->instruction_set == kX86) {
48    call_inst = OpThreadMem(kOpBlx, helper_offset);
49  } else {
50    call_inst = OpReg(kOpBlx, r_tgt);
51    FreeTemp(r_tgt);
52  }
53  if (safepoint_pc) {
54    MarkSafepointPC(call_inst);
55  }
56  return call_inst;
57}
58
59void Mir2Lir::CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc) {
60  int r_tgt = CallHelperSetup(helper_offset);
61  LoadConstant(TargetReg(kArg0), arg0);
62  ClobberCalleeSave();
63  CallHelper(r_tgt, helper_offset, safepoint_pc);
64}
65
66void Mir2Lir::CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc) {
67  int r_tgt = CallHelperSetup(helper_offset);
68  OpRegCopy(TargetReg(kArg0), arg0);
69  ClobberCalleeSave();
70  CallHelper(r_tgt, helper_offset, safepoint_pc);
71}
72
73void Mir2Lir::CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, bool safepoint_pc) {
74  int r_tgt = CallHelperSetup(helper_offset);
75  if (arg0.wide == 0) {
76    LoadValueDirectFixed(arg0, TargetReg(kArg0));
77  } else {
78    LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
79  }
80  ClobberCalleeSave();
81  CallHelper(r_tgt, helper_offset, safepoint_pc);
82}
83
84void Mir2Lir::CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
85                                      bool safepoint_pc) {
86  int r_tgt = CallHelperSetup(helper_offset);
87  LoadConstant(TargetReg(kArg0), arg0);
88  LoadConstant(TargetReg(kArg1), arg1);
89  ClobberCalleeSave();
90  CallHelper(r_tgt, helper_offset, safepoint_pc);
91}
92
93void Mir2Lir::CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
94                                              RegLocation arg1, bool safepoint_pc) {
95  int r_tgt = CallHelperSetup(helper_offset);
96  if (arg1.wide == 0) {
97    LoadValueDirectFixed(arg1, TargetReg(kArg1));
98  } else {
99    LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
100  }
101  LoadConstant(TargetReg(kArg0), arg0);
102  ClobberCalleeSave();
103  CallHelper(r_tgt, helper_offset, safepoint_pc);
104}
105
106void Mir2Lir::CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, int arg1,
107                                              bool safepoint_pc) {
108  int r_tgt = CallHelperSetup(helper_offset);
109  LoadValueDirectFixed(arg0, TargetReg(kArg0));
110  LoadConstant(TargetReg(kArg1), arg1);
111  ClobberCalleeSave();
112  CallHelper(r_tgt, helper_offset, safepoint_pc);
113}
114
115void Mir2Lir::CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
116                                      bool safepoint_pc) {
117  int r_tgt = CallHelperSetup(helper_offset);
118  OpRegCopy(TargetReg(kArg1), arg1);
119  LoadConstant(TargetReg(kArg0), arg0);
120  ClobberCalleeSave();
121  CallHelper(r_tgt, helper_offset, safepoint_pc);
122}
123
124void Mir2Lir::CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
125                             bool safepoint_pc) {
126  int r_tgt = CallHelperSetup(helper_offset);
127  OpRegCopy(TargetReg(kArg0), arg0);
128  LoadConstant(TargetReg(kArg1), arg1);
129  ClobberCalleeSave();
130  CallHelper(r_tgt, helper_offset, safepoint_pc);
131}
132
133void Mir2Lir::CallRuntimeHelperImmMethod(int helper_offset, int arg0, bool safepoint_pc) {
134  int r_tgt = CallHelperSetup(helper_offset);
135  LoadCurrMethodDirect(TargetReg(kArg1));
136  LoadConstant(TargetReg(kArg0), arg0);
137  ClobberCalleeSave();
138  CallHelper(r_tgt, helper_offset, safepoint_pc);
139}
140
141void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(int helper_offset, RegLocation arg0,
142                                                      RegLocation arg1, bool safepoint_pc) {
143  int r_tgt = CallHelperSetup(helper_offset);
144  if (arg0.wide == 0) {
145    LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
146    if (arg1.wide == 0) {
147      if (cu_->instruction_set == kMips) {
148        LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
149      } else {
150        LoadValueDirectFixed(arg1, TargetReg(kArg1));
151      }
152    } else {
153      if (cu_->instruction_set == kMips) {
154        LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
155      } else {
156        LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
157      }
158    }
159  } else {
160    LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
161    if (arg1.wide == 0) {
162      LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
163    } else {
164      LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
165    }
166  }
167  ClobberCalleeSave();
168  CallHelper(r_tgt, helper_offset, safepoint_pc);
169}
170
171void Mir2Lir::CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, bool safepoint_pc) {
172  int r_tgt = CallHelperSetup(helper_offset);
173  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
174  OpRegCopy(TargetReg(kArg0), arg0);
175  OpRegCopy(TargetReg(kArg1), arg1);
176  ClobberCalleeSave();
177  CallHelper(r_tgt, helper_offset, safepoint_pc);
178}
179
180void Mir2Lir::CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
181                                         int arg2, bool safepoint_pc) {
182  int r_tgt = CallHelperSetup(helper_offset);
183  DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
184  OpRegCopy(TargetReg(kArg0), arg0);
185  OpRegCopy(TargetReg(kArg1), arg1);
186  LoadConstant(TargetReg(kArg2), arg2);
187  ClobberCalleeSave();
188  CallHelper(r_tgt, helper_offset, safepoint_pc);
189}
190
191void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(int helper_offset,
192                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
193  int r_tgt = CallHelperSetup(helper_offset);
194  LoadValueDirectFixed(arg2, TargetReg(kArg2));
195  LoadCurrMethodDirect(TargetReg(kArg1));
196  LoadConstant(TargetReg(kArg0), arg0);
197  ClobberCalleeSave();
198  CallHelper(r_tgt, helper_offset, safepoint_pc);
199}
200
201void Mir2Lir::CallRuntimeHelperImmMethodImm(int helper_offset, int arg0,
202                                            int arg2, bool safepoint_pc) {
203  int r_tgt = CallHelperSetup(helper_offset);
204  LoadCurrMethodDirect(TargetReg(kArg1));
205  LoadConstant(TargetReg(kArg2), arg2);
206  LoadConstant(TargetReg(kArg0), arg0);
207  ClobberCalleeSave();
208  CallHelper(r_tgt, helper_offset, safepoint_pc);
209}
210
211void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
212                                                         int arg0, RegLocation arg1,
213                                                         RegLocation arg2, bool safepoint_pc) {
214  int r_tgt = CallHelperSetup(helper_offset);
215  LoadValueDirectFixed(arg1, TargetReg(kArg1));
216  if (arg2.wide == 0) {
217    LoadValueDirectFixed(arg2, TargetReg(kArg2));
218  } else {
219    LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
220  }
221  LoadConstant(TargetReg(kArg0), arg0);
222  ClobberCalleeSave();
223  CallHelper(r_tgt, helper_offset, safepoint_pc);
224}
225
226/*
227 * If there are any ins passed in registers that have not been promoted
228 * to a callee-save register, flush them to the frame.  Perform intial
229 * assignment of promoted arguments.
230 *
231 * ArgLocs is an array of location records describing the incoming arguments
232 * with one location record per word of argument.
233 */
234void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
235  /*
236   * Dummy up a RegLocation for the incoming Method*
237   * It will attempt to keep kArg0 live (or copy it to home location
238   * if promoted).
239   */
240  RegLocation rl_src = rl_method;
241  rl_src.location = kLocPhysReg;
242  rl_src.low_reg = TargetReg(kArg0);
243  rl_src.home = false;
244  MarkLive(rl_src.low_reg, rl_src.s_reg_low);
245  StoreValue(rl_method, rl_src);
246  // If Method* has been promoted, explicitly flush
247  if (rl_method.location == kLocPhysReg) {
248    StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
249  }
250
251  if (cu_->num_ins == 0)
252    return;
253  const int num_arg_regs = 3;
254  static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
255  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
256  /*
257   * Copy incoming arguments to their proper home locations.
258   * NOTE: an older version of dx had an issue in which
259   * it would reuse static method argument registers.
260   * This could result in the same Dalvik virtual register
261   * being promoted to both core and fp regs. To account for this,
262   * we only copy to the corresponding promoted physical register
263   * if it matches the type of the SSA name for the incoming
264   * argument.  It is also possible that long and double arguments
265   * end up half-promoted.  In those cases, we must flush the promoted
266   * half to memory as well.
267   */
268  for (int i = 0; i < cu_->num_ins; i++) {
269    PromotionMap* v_map = &promotion_map_[start_vreg + i];
270    if (i < num_arg_regs) {
271      // If arriving in register
272      bool need_flush = true;
273      RegLocation* t_loc = &ArgLocs[i];
274      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
275        OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i]));
276        need_flush = false;
277      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
278        OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i]));
279        need_flush = false;
280      } else {
281        need_flush = true;
282      }
283
284      // For wide args, force flush if only half is promoted
285      if (t_loc->wide) {
286        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
287        need_flush |= (p_map->core_location != v_map->core_location) ||
288            (p_map->fp_location != v_map->fp_location);
289      }
290      if (need_flush) {
291        StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
292                      TargetReg(arg_regs[i]), kWord);
293      }
294    } else {
295      // If arriving in frame & promoted
296      if (v_map->core_location == kLocPhysReg) {
297        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
298                     v_map->core_reg);
299      }
300      if (v_map->fp_location == kLocPhysReg) {
301        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
302                     v_map->FpReg);
303      }
304    }
305  }
306}
307
308/*
309 * Bit of a hack here - in the absence of a real scheduling pass,
310 * emit the next instruction in static & direct invoke sequences.
311 */
312static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
313                          int state, const MethodReference& target_method,
314                          uint32_t unused,
315                          uintptr_t direct_code, uintptr_t direct_method,
316                          InvokeType type) {
317  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
318  if (cu->instruction_set != kThumb2) {
319    // Disable sharpening
320    direct_code = 0;
321    direct_method = 0;
322  }
323  if (direct_code != 0 && direct_method != 0) {
324    switch (state) {
325    case 0:  // Get the current Method* [sets kArg0]
326      if (direct_code != static_cast<unsigned int>(-1)) {
327        cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
328      } else {
329        CHECK_EQ(cu->dex_file, target_method.dex_file);
330        LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
331                                               target_method.dex_method_index, 0);
332        if (data_target == NULL) {
333          data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
334          data_target->operands[1] = type;
335        }
336        LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
337        cg->AppendLIR(load_pc_rel);
338        DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
339      }
340      if (direct_method != static_cast<unsigned int>(-1)) {
341        cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
342      } else {
343        CHECK_EQ(cu->dex_file, target_method.dex_file);
344        LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
345                                               target_method.dex_method_index, 0);
346        if (data_target == NULL) {
347          data_target = cg->AddWordData(&cg->method_literal_list_, target_method.dex_method_index);
348          data_target->operands[1] = type;
349        }
350        LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
351        cg->AppendLIR(load_pc_rel);
352        DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
353      }
354      break;
355    default:
356      return -1;
357    }
358  } else {
359    switch (state) {
360    case 0:  // Get the current Method* [sets kArg0]
361      // TUNING: we can save a reg copy if Method* has been promoted.
362      cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
363      break;
364    case 1:  // Get method->dex_cache_resolved_methods_
365      cg->LoadWordDisp(cg->TargetReg(kArg0),
366        mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
367      // Set up direct code if known.
368      if (direct_code != 0) {
369        if (direct_code != static_cast<unsigned int>(-1)) {
370          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
371        } else {
372          CHECK_EQ(cu->dex_file, target_method.dex_file);
373          LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
374                                                 target_method.dex_method_index, 0);
375          if (data_target == NULL) {
376            data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
377            data_target->operands[1] = type;
378          }
379          LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
380          cg->AppendLIR(load_pc_rel);
381          DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
382        }
383      }
384      break;
385    case 2:  // Grab target method*
386      CHECK_EQ(cu->dex_file, target_method.dex_file);
387      cg->LoadWordDisp(cg->TargetReg(kArg0),
388                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
389                           (target_method.dex_method_index * 4),
390                       cg-> TargetReg(kArg0));
391      break;
392    case 3:  // Grab the code from the method*
393      if (cu->instruction_set != kX86) {
394        if (direct_code == 0) {
395          cg->LoadWordDisp(cg->TargetReg(kArg0),
396                           mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
397                           cg->TargetReg(kInvokeTgt));
398        }
399        break;
400      }
401      // Intentional fallthrough for x86
402    default:
403      return -1;
404    }
405  }
406  return state + 1;
407}
408
409/*
410 * Bit of a hack here - in the absence of a real scheduling pass,
411 * emit the next instruction in a virtual invoke sequence.
412 * We can use kLr as a temp prior to target address loading
413 * Note also that we'll load the first argument ("this") into
414 * kArg1 here rather than the standard LoadArgRegs.
415 */
416static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
417                         int state, const MethodReference& target_method,
418                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
419                         InvokeType unused3) {
420  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
421  /*
422   * This is the fast path in which the target virtual method is
423   * fully resolved at compile time.
424   */
425  switch (state) {
426    case 0: {  // Get "this" [set kArg1]
427      RegLocation  rl_arg = info->args[0];
428      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
429      break;
430    }
431    case 1:  // Is "this" null? [use kArg1]
432      cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
433      // get this->klass_ [use kArg1, set kInvokeTgt]
434      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
435                       cg->TargetReg(kInvokeTgt));
436      break;
437    case 2:  // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
438      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
439                       cg->TargetReg(kInvokeTgt));
440      break;
441    case 3:  // Get target method [use kInvokeTgt, set kArg0]
442      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
443                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
444                       cg->TargetReg(kArg0));
445      break;
446    case 4:  // Get the compiled code address [uses kArg0, sets kInvokeTgt]
447      if (cu->instruction_set != kX86) {
448        cg->LoadWordDisp(cg->TargetReg(kArg0),
449                         mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
450                         cg->TargetReg(kInvokeTgt));
451        break;
452      }
453      // Intentional fallthrough for X86
454    default:
455      return -1;
456  }
457  return state + 1;
458}
459
460/*
461 * All invoke-interface calls bounce off of art_quick_invoke_interface_trampoline,
462 * which will locate the target and continue on via a tail call.
463 */
464static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
465                                 const MethodReference& target_method,
466                                 uint32_t unused, uintptr_t unused2,
467                                 uintptr_t direct_method, InvokeType unused4) {
468  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
469  if (cu->instruction_set != kThumb2) {
470    // Disable sharpening
471    direct_method = 0;
472  }
473  int trampoline = (cu->instruction_set == kX86) ? 0
474      : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
475
476  if (direct_method != 0) {
477    switch (state) {
478      case 0:  // Load the trampoline target [sets kInvokeTgt].
479        if (cu->instruction_set != kX86) {
480          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
481        }
482        // Get the interface Method* [sets kArg0]
483        if (direct_method != static_cast<unsigned int>(-1)) {
484          cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
485        } else {
486          CHECK_EQ(cu->dex_file, target_method.dex_file);
487          LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
488                                                 target_method.dex_method_index, 0);
489          if (data_target == NULL) {
490            data_target = cg->AddWordData(&cg->method_literal_list_,
491                                          target_method.dex_method_index);
492            data_target->operands[1] = kInterface;
493          }
494          LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
495          cg->AppendLIR(load_pc_rel);
496          DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
497        }
498        break;
499      default:
500        return -1;
501    }
502  } else {
503    switch (state) {
504      case 0:
505        // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
506        cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
507        // Load the trampoline target [sets kInvokeTgt].
508        if (cu->instruction_set != kX86) {
509          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
510        }
511        break;
512    case 1:  // Get method->dex_cache_resolved_methods_ [set/use kArg0]
513      cg->LoadWordDisp(cg->TargetReg(kArg0),
514                       mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
515                       cg->TargetReg(kArg0));
516      break;
517    case 2:  // Grab target method* [set/use kArg0]
518      CHECK_EQ(cu->dex_file, target_method.dex_file);
519      cg->LoadWordDisp(cg->TargetReg(kArg0),
520                       mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
521                           (target_method.dex_method_index * 4),
522                       cg->TargetReg(kArg0));
523      break;
524    default:
525      return -1;
526    }
527  }
528  return state + 1;
529}
530
531static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
532                            int state, const MethodReference& target_method,
533                            uint32_t method_idx) {
534  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
535  /*
536   * This handles the case in which the base method is not fully
537   * resolved at compile time, we bail to a runtime helper.
538   */
539  if (state == 0) {
540    if (cu->instruction_set != kX86) {
541      // Load trampoline target
542      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
543    }
544    // Load kArg0 with method index
545    CHECK_EQ(cu->dex_file, target_method.dex_file);
546    cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
547    return 1;
548  }
549  return -1;
550}
551
552static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
553                                int state,
554                                const MethodReference& target_method,
555                                uint32_t method_idx,
556                                uintptr_t unused, uintptr_t unused2,
557                                InvokeType unused3) {
558  int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
559  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
560}
561
562static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
563                                const MethodReference& target_method,
564                                uint32_t method_idx, uintptr_t unused,
565                                uintptr_t unused2, InvokeType unused3) {
566  int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
567  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
568}
569
570static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
571                               const MethodReference& target_method,
572                               uint32_t method_idx, uintptr_t unused,
573                               uintptr_t unused2, InvokeType unused3) {
574  int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
575  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
576}
577
578static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
579                           const MethodReference& target_method,
580                           uint32_t method_idx, uintptr_t unused,
581                           uintptr_t unused2, InvokeType unused3) {
582  int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
583  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
584}
585
586static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
587                                                CallInfo* info, int state,
588                                                const MethodReference& target_method,
589                                                uint32_t unused,
590                                                uintptr_t unused2, uintptr_t unused3,
591                                                InvokeType unused4) {
592  int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
593  return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
594}
595
596int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
597                         NextCallInsn next_call_insn,
598                         const MethodReference& target_method,
599                         uint32_t vtable_idx, uintptr_t direct_code,
600                         uintptr_t direct_method, InvokeType type, bool skip_this) {
601  int last_arg_reg = TargetReg(kArg3);
602  int next_reg = TargetReg(kArg1);
603  int next_arg = 0;
604  if (skip_this) {
605    next_reg++;
606    next_arg++;
607  }
608  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
609    RegLocation rl_arg = info->args[next_arg++];
610    rl_arg = UpdateRawLoc(rl_arg);
611    if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
612      LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
613      next_reg++;
614      next_arg++;
615    } else {
616      if (rl_arg.wide) {
617        rl_arg.wide = false;
618        rl_arg.is_const = false;
619      }
620      LoadValueDirectFixed(rl_arg, next_reg);
621    }
622    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
623                                direct_code, direct_method, type);
624  }
625  return call_state;
626}
627
628/*
629 * Load up to 5 arguments, the first three of which will be in
630 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
631 * and as part of the load sequence, it must be replaced with
632 * the target method pointer.  Note, this may also be called
633 * for "range" variants if the number of arguments is 5 or fewer.
634 */
635int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
636                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
637                                  const MethodReference& target_method,
638                                  uint32_t vtable_idx, uintptr_t direct_code,
639                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
640  RegLocation rl_arg;
641
642  /* If no arguments, just return */
643  if (info->num_arg_words == 0)
644    return call_state;
645
646  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
647                              direct_code, direct_method, type);
648
649  DCHECK_LE(info->num_arg_words, 5);
650  if (info->num_arg_words > 3) {
651    int32_t next_use = 3;
652    // Detect special case of wide arg spanning arg3/arg4
653    RegLocation rl_use0 = info->args[0];
654    RegLocation rl_use1 = info->args[1];
655    RegLocation rl_use2 = info->args[2];
656    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
657      rl_use2.wide) {
658      int reg = -1;
659      // Wide spans, we need the 2nd half of uses[2].
660      rl_arg = UpdateLocWide(rl_use2);
661      if (rl_arg.location == kLocPhysReg) {
662        reg = rl_arg.high_reg;
663      } else {
664        // kArg2 & rArg3 can safely be used here
665        reg = TargetReg(kArg3);
666        LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
667        call_state = next_call_insn(cu_, info, call_state, target_method,
668                                    vtable_idx, direct_code, direct_method, type);
669      }
670      StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
671      StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
672      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
673                                  direct_code, direct_method, type);
674      next_use++;
675    }
676    // Loop through the rest
677    while (next_use < info->num_arg_words) {
678      int low_reg;
679      int high_reg = -1;
680      rl_arg = info->args[next_use];
681      rl_arg = UpdateRawLoc(rl_arg);
682      if (rl_arg.location == kLocPhysReg) {
683        low_reg = rl_arg.low_reg;
684        high_reg = rl_arg.high_reg;
685      } else {
686        low_reg = TargetReg(kArg2);
687        if (rl_arg.wide) {
688          high_reg = TargetReg(kArg3);
689          LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
690        } else {
691          LoadValueDirectFixed(rl_arg, low_reg);
692        }
693        call_state = next_call_insn(cu_, info, call_state, target_method,
694                                    vtable_idx, direct_code, direct_method, type);
695      }
696      int outs_offset = (next_use + 1) * 4;
697      if (rl_arg.wide) {
698        StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
699        next_use += 2;
700      } else {
701        StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
702        next_use++;
703      }
704      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
705                               direct_code, direct_method, type);
706    }
707  }
708
709  call_state = LoadArgRegs(info, call_state, next_call_insn,
710                           target_method, vtable_idx, direct_code, direct_method,
711                           type, skip_this);
712
713  if (pcrLabel) {
714    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
715  }
716  return call_state;
717}
718
719/*
720 * May have 0+ arguments (also used for jumbo).  Note that
721 * source virtual registers may be in physical registers, so may
722 * need to be flushed to home location before copying.  This
723 * applies to arg3 and above (see below).
724 *
725 * Two general strategies:
726 *    If < 20 arguments
727 *       Pass args 3-18 using vldm/vstm block copy
728 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
729 *    If 20+ arguments
730 *       Pass args arg19+ using memcpy block copy
731 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
732 *
733 */
734int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
735                                LIR** pcrLabel, NextCallInsn next_call_insn,
736                                const MethodReference& target_method,
737                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
738                                InvokeType type, bool skip_this) {
739  // If we can treat it as non-range (Jumbo ops will use range form)
740  if (info->num_arg_words <= 5)
741    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
742                                next_call_insn, target_method, vtable_idx,
743                                direct_code, direct_method, type, skip_this);
744  /*
745   * First load the non-register arguments.  Both forms expect all
746   * of the source arguments to be in their home frame location, so
747   * scan the s_reg names and flush any that have been promoted to
748   * frame backing storage.
749   */
750  // Scan the rest of the args - if in phys_reg flush to memory
751  for (int next_arg = 0; next_arg < info->num_arg_words;) {
752    RegLocation loc = info->args[next_arg];
753    if (loc.wide) {
754      loc = UpdateLocWide(loc);
755      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
756        StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
757                          loc.low_reg, loc.high_reg);
758      }
759      next_arg += 2;
760    } else {
761      loc = UpdateLoc(loc);
762      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
763        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
764                      loc.low_reg, kWord);
765      }
766      next_arg++;
767    }
768  }
769
770  int start_offset = SRegOffset(info->args[3].s_reg_low);
771  int outs_offset = 4 /* Method* */ + (3 * 4);
772  if (cu_->instruction_set != kThumb2) {
773    // Generate memcpy
774    OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
775    OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
776    CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
777                               TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
778  } else {
779    if (info->num_arg_words >= 20) {
780      // Generate memcpy
781      OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
782      OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
783      CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
784                                 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
785    } else {
786      // Use vldm/vstm pair using kArg3 as a temp
787      int regs_left = std::min(info->num_arg_words - 3, 16);
788      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
789                               direct_code, direct_method, type);
790      OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
791      LIR* ld = OpVldm(TargetReg(kArg3), regs_left);
792      // TUNING: loosen barrier
793      ld->def_mask = ENCODE_ALL;
794      SetMemRefType(ld, true /* is_load */, kDalvikReg);
795      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
796                               direct_code, direct_method, type);
797      OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
798      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
799                               direct_code, direct_method, type);
800      LIR* st = OpVstm(TargetReg(kArg3), regs_left);
801      SetMemRefType(st, false /* is_load */, kDalvikReg);
802      st->def_mask = ENCODE_ALL;
803      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
804                               direct_code, direct_method, type);
805    }
806  }
807
808  call_state = LoadArgRegs(info, call_state, next_call_insn,
809                           target_method, vtable_idx, direct_code, direct_method,
810                           type, skip_this);
811
812  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
813                           direct_code, direct_method, type);
814  if (pcrLabel) {
815    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
816  }
817  return call_state;
818}
819
820RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
821  RegLocation res;
822  if (info->result.location == kLocInvalid) {
823    res = GetReturn(false);
824  } else {
825    res = info->result;
826  }
827  return res;
828}
829
830RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
831  RegLocation res;
832  if (info->result.location == kLocInvalid) {
833    res = GetReturnWide(false);
834  } else {
835    res = info->result;
836  }
837  return res;
838}
839
840bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
841  if (cu_->instruction_set == kMips) {
842    // TODO - add Mips implementation
843    return false;
844  }
845  // Location of reference to data array
846  int value_offset = mirror::String::ValueOffset().Int32Value();
847  // Location of count
848  int count_offset = mirror::String::CountOffset().Int32Value();
849  // Starting offset within data array
850  int offset_offset = mirror::String::OffsetOffset().Int32Value();
851  // Start of char data with array_
852  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
853
854  RegLocation rl_obj = info->args[0];
855  RegLocation rl_idx = info->args[1];
856  rl_obj = LoadValue(rl_obj, kCoreReg);
857  rl_idx = LoadValue(rl_idx, kCoreReg);
858  int reg_max;
859  GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
860  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
861  LIR* launch_pad = NULL;
862  int reg_off = INVALID_REG;
863  int reg_ptr = INVALID_REG;
864  if (cu_->instruction_set != kX86) {
865    reg_off = AllocTemp();
866    reg_ptr = AllocTemp();
867    if (range_check) {
868      reg_max = AllocTemp();
869      LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
870    }
871    LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
872    LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
873    if (range_check) {
874      // Set up a launch pad to allow retry in case of bounds violation */
875      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
876      intrinsic_launchpads_.Insert(launch_pad);
877      OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
878      FreeTemp(reg_max);
879      OpCondBranch(kCondCs, launch_pad);
880    }
881  } else {
882    if (range_check) {
883      reg_max = AllocTemp();
884      LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
885      // Set up a launch pad to allow retry in case of bounds violation */
886      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
887      intrinsic_launchpads_.Insert(launch_pad);
888      OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
889      FreeTemp(reg_max);
890      OpCondBranch(kCondCc, launch_pad);
891    }
892    reg_off = AllocTemp();
893    reg_ptr = AllocTemp();
894    LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
895    LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
896  }
897  OpRegImm(kOpAdd, reg_ptr, data_offset);
898  OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
899  FreeTemp(rl_obj.low_reg);
900  FreeTemp(rl_idx.low_reg);
901  RegLocation rl_dest = InlineTarget(info);
902  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
903  LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
904  FreeTemp(reg_off);
905  FreeTemp(reg_ptr);
906  StoreValue(rl_dest, rl_result);
907  if (range_check) {
908    launch_pad->operands[2] = 0;  // no resumption
909  }
910  // Record that we've already inlined & null checked
911  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
912  return true;
913}
914
915// Generates an inlined String.is_empty or String.length.
916bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
917  if (cu_->instruction_set == kMips) {
918    // TODO - add Mips implementation
919    return false;
920  }
921  // dst = src.length();
922  RegLocation rl_obj = info->args[0];
923  rl_obj = LoadValue(rl_obj, kCoreReg);
924  RegLocation rl_dest = InlineTarget(info);
925  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
926  GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
927  LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
928  if (is_empty) {
929    // dst = (dst == 0);
930    if (cu_->instruction_set == kThumb2) {
931      int t_reg = AllocTemp();
932      OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
933      OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
934    } else {
935      DCHECK_EQ(cu_->instruction_set, kX86);
936      OpRegImm(kOpSub, rl_result.low_reg, 1);
937      OpRegImm(kOpLsr, rl_result.low_reg, 31);
938    }
939  }
940  StoreValue(rl_dest, rl_result);
941  return true;
942}
943
944bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
945  if (cu_->instruction_set == kMips) {
946    // TODO - add Mips implementation
947    return false;
948  }
949  RegLocation rl_src = info->args[0];
950  rl_src = LoadValue(rl_src, kCoreReg);
951  RegLocation rl_dest = InlineTarget(info);
952  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
953  int sign_reg = AllocTemp();
954  // abs(x) = y<=x>>31, (x+y)^y.
955  OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
956  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
957  OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
958  StoreValue(rl_dest, rl_result);
959  return true;
960}
961
962bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
963  if (cu_->instruction_set == kMips) {
964    // TODO - add Mips implementation
965    return false;
966  }
967  if (cu_->instruction_set == kThumb2) {
968    RegLocation rl_src = info->args[0];
969    rl_src = LoadValueWide(rl_src, kCoreReg);
970    RegLocation rl_dest = InlineTargetWide(info);
971    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
972    int sign_reg = AllocTemp();
973    // abs(x) = y<=x>>31, (x+y)^y.
974    OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
975    OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
976    OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
977    OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
978    OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
979    StoreValueWide(rl_dest, rl_result);
980    return true;
981  } else {
982    DCHECK_EQ(cu_->instruction_set, kX86);
983    // Reuse source registers to avoid running out of temps
984    RegLocation rl_src = info->args[0];
985    rl_src = LoadValueWide(rl_src, kCoreReg);
986    RegLocation rl_dest = InlineTargetWide(info);
987    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
988    OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
989    FreeTemp(rl_src.low_reg);
990    FreeTemp(rl_src.high_reg);
991    int sign_reg = AllocTemp();
992    // abs(x) = y<=x>>31, (x+y)^y.
993    OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
994    OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
995    OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
996    OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
997    OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
998    StoreValueWide(rl_dest, rl_result);
999    return true;
1000  }
1001}
1002
1003bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1004  if (cu_->instruction_set == kMips) {
1005    // TODO - add Mips implementation
1006    return false;
1007  }
1008  RegLocation rl_src = info->args[0];
1009  RegLocation rl_dest = InlineTarget(info);
1010  StoreValue(rl_dest, rl_src);
1011  return true;
1012}
1013
1014bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1015  if (cu_->instruction_set == kMips) {
1016    // TODO - add Mips implementation
1017    return false;
1018  }
1019  RegLocation rl_src = info->args[0];
1020  RegLocation rl_dest = InlineTargetWide(info);
1021  StoreValueWide(rl_dest, rl_src);
1022  return true;
1023}
1024
1025/*
1026 * Fast string.index_of(I) & (II).  Tests for simple case of char <= 0xffff,
1027 * otherwise bails to standard library code.
1028 */
1029bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1030  if (cu_->instruction_set == kMips) {
1031    // TODO - add Mips implementation
1032    return false;
1033  }
1034  ClobberCalleeSave();
1035  LockCallTemps();  // Using fixed registers
1036  int reg_ptr = TargetReg(kArg0);
1037  int reg_char = TargetReg(kArg1);
1038  int reg_start = TargetReg(kArg2);
1039
1040  RegLocation rl_obj = info->args[0];
1041  RegLocation rl_char = info->args[1];
1042  RegLocation rl_start = info->args[2];
1043  LoadValueDirectFixed(rl_obj, reg_ptr);
1044  LoadValueDirectFixed(rl_char, reg_char);
1045  if (zero_based) {
1046    LoadConstant(reg_start, 0);
1047  } else {
1048    LoadValueDirectFixed(rl_start, reg_start);
1049  }
1050  int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
1051  GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
1052  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
1053  intrinsic_launchpads_.Insert(launch_pad);
1054  OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
1055  // NOTE: not a safepoint
1056  if (cu_->instruction_set != kX86) {
1057    OpReg(kOpBlx, r_tgt);
1058  } else {
1059    OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
1060  }
1061  LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1062  launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
1063  // Record that we've already inlined & null checked
1064  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1065  RegLocation rl_return = GetReturn(false);
1066  RegLocation rl_dest = InlineTarget(info);
1067  StoreValue(rl_dest, rl_return);
1068  return true;
1069}
1070
1071/* Fast string.compareTo(Ljava/lang/string;)I. */
1072bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1073  if (cu_->instruction_set == kMips) {
1074    // TODO - add Mips implementation
1075    return false;
1076  }
1077  ClobberCalleeSave();
1078  LockCallTemps();  // Using fixed registers
1079  int reg_this = TargetReg(kArg0);
1080  int reg_cmp = TargetReg(kArg1);
1081
1082  RegLocation rl_this = info->args[0];
1083  RegLocation rl_cmp = info->args[1];
1084  LoadValueDirectFixed(rl_this, reg_this);
1085  LoadValueDirectFixed(rl_cmp, reg_cmp);
1086  int r_tgt = (cu_->instruction_set != kX86) ?
1087      LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
1088  GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
1089  // TUNING: check if rl_cmp.s_reg_low is already null checked
1090  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
1091  intrinsic_launchpads_.Insert(launch_pad);
1092  OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
1093  // NOTE: not a safepoint
1094  if (cu_->instruction_set != kX86) {
1095    OpReg(kOpBlx, r_tgt);
1096  } else {
1097    OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
1098  }
1099  launch_pad->operands[2] = 0;  // No return possible
1100  // Record that we've already inlined & null checked
1101  info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1102  RegLocation rl_return = GetReturn(false);
1103  RegLocation rl_dest = InlineTarget(info);
1104  StoreValue(rl_dest, rl_return);
1105  return true;
1106}
1107
1108bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1109  RegLocation rl_dest = InlineTarget(info);
1110  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1111  int offset = Thread::PeerOffset().Int32Value();
1112  if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
1113    LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg);
1114  } else {
1115    CHECK(cu_->instruction_set == kX86);
1116    reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
1117  }
1118  StoreValue(rl_dest, rl_result);
1119  return true;
1120}
1121
1122bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1123                                  bool is_long, bool is_volatile) {
1124  if (cu_->instruction_set == kMips) {
1125    // TODO - add Mips implementation
1126    return false;
1127  }
1128  // Unused - RegLocation rl_src_unsafe = info->args[0];
1129  RegLocation rl_src_obj = info->args[1];  // Object
1130  RegLocation rl_src_offset = info->args[2];  // long low
1131  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
1132  RegLocation rl_dest = InlineTarget(info);  // result reg
1133  if (is_volatile) {
1134    GenMemBarrier(kLoadLoad);
1135  }
1136  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1137  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1138  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1139  if (is_long) {
1140    OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1141    LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
1142    StoreValueWide(rl_dest, rl_result);
1143  } else {
1144    LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
1145    StoreValue(rl_dest, rl_result);
1146  }
1147  return true;
1148}
1149
1150bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1151                                  bool is_object, bool is_volatile, bool is_ordered) {
1152  if (cu_->instruction_set == kMips) {
1153    // TODO - add Mips implementation
1154    return false;
1155  }
1156  if (cu_->instruction_set == kX86 && is_object) {
1157    // TODO: fix X86, it exhausts registers for card marking.
1158    return false;
1159  }
1160  // Unused - RegLocation rl_src_unsafe = info->args[0];
1161  RegLocation rl_src_obj = info->args[1];  // Object
1162  RegLocation rl_src_offset = info->args[2];  // long low
1163  rl_src_offset.wide = 0;  // ignore high half in info->args[3]
1164  RegLocation rl_src_value = info->args[4];  // value to store
1165  if (is_volatile || is_ordered) {
1166    GenMemBarrier(kStoreStore);
1167  }
1168  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1169  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1170  RegLocation rl_value;
1171  if (is_long) {
1172    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1173    OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1174    StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
1175  } else {
1176    rl_value = LoadValue(rl_src_value, kCoreReg);
1177    StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
1178  }
1179  if (is_volatile) {
1180    GenMemBarrier(kStoreLoad);
1181  }
1182  if (is_object) {
1183    MarkGCCard(rl_value.low_reg, rl_object.low_reg);
1184  }
1185  return true;
1186}
1187
1188bool Mir2Lir::GenIntrinsic(CallInfo* info) {
1189  if (info->opt_flags & MIR_INLINED) {
1190    return false;
1191  }
1192  /*
1193   * TODO: move these to a target-specific structured constant array
1194   * and use a generic match function.  The list of intrinsics may be
1195   * slightly different depending on target.
1196   * TODO: Fold this into a matching function that runs during
1197   * basic block building.  This should be part of the action for
1198   * small method inlining and recognition of the special object init
1199   * method.  By doing this during basic block construction, we can also
1200   * take advantage of/generate new useful dataflow info.
1201   */
1202  StringPiece tgt_methods_declaring_class(
1203      cu_->dex_file->GetMethodDeclaringClassDescriptor(cu_->dex_file->GetMethodId(info->index)));
1204  if (tgt_methods_declaring_class.starts_with("Ljava/lang/Double;")) {
1205    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1206    if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
1207      return GenInlinedDoubleCvt(info);
1208    }
1209    if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") {
1210      return GenInlinedDoubleCvt(info);
1211    }
1212  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Float;")) {
1213    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1214    if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") {
1215      return GenInlinedFloatCvt(info);
1216    }
1217    if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
1218      return GenInlinedFloatCvt(info);
1219    }
1220  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Math;") ||
1221             tgt_methods_declaring_class.starts_with("Ljava/lang/StrictMath;")) {
1222    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1223    if (tgt_method == "int java.lang.Math.abs(int)" ||
1224        tgt_method == "int java.lang.StrictMath.abs(int)") {
1225      return GenInlinedAbsInt(info);
1226    }
1227    if (tgt_method == "long java.lang.Math.abs(long)" ||
1228        tgt_method == "long java.lang.StrictMath.abs(long)") {
1229      return GenInlinedAbsLong(info);
1230    }
1231    if (tgt_method == "int java.lang.Math.max(int, int)" ||
1232        tgt_method == "int java.lang.StrictMath.max(int, int)") {
1233      return GenInlinedMinMaxInt(info, false /* is_min */);
1234    }
1235    if (tgt_method == "int java.lang.Math.min(int, int)" ||
1236        tgt_method == "int java.lang.StrictMath.min(int, int)") {
1237      return GenInlinedMinMaxInt(info, true /* is_min */);
1238    }
1239    if (tgt_method == "double java.lang.Math.sqrt(double)" ||
1240        tgt_method == "double java.lang.StrictMath.sqrt(double)") {
1241      return GenInlinedSqrt(info);
1242    }
1243  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/String;")) {
1244    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1245    if (tgt_method == "char java.lang.String.charAt(int)") {
1246      return GenInlinedCharAt(info);
1247    }
1248    if (tgt_method == "int java.lang.String.compareTo(java.lang.String)") {
1249      return GenInlinedStringCompareTo(info);
1250    }
1251    if (tgt_method == "boolean java.lang.String.is_empty()") {
1252      return GenInlinedStringIsEmptyOrLength(info, true /* is_empty */);
1253    }
1254    if (tgt_method == "int java.lang.String.index_of(int, int)") {
1255      return GenInlinedIndexOf(info, false /* base 0 */);
1256    }
1257    if (tgt_method == "int java.lang.String.index_of(int)") {
1258      return GenInlinedIndexOf(info, true /* base 0 */);
1259    }
1260    if (tgt_method == "int java.lang.String.length()") {
1261      return GenInlinedStringIsEmptyOrLength(info, false /* is_empty */);
1262    }
1263  } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Thread;")) {
1264    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1265    if (tgt_method == "java.lang.Thread java.lang.Thread.currentThread()") {
1266      return GenInlinedCurrentThread(info);
1267    }
1268  } else if (tgt_methods_declaring_class.starts_with("Lsun/misc/Unsafe;")) {
1269    std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1270    if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
1271      return GenInlinedCas32(info, false);
1272    }
1273    if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") {
1274      return GenInlinedCas32(info, true);
1275    }
1276    if (tgt_method == "int sun.misc.Unsafe.getInt(java.lang.Object, long)") {
1277      return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
1278    }
1279    if (tgt_method == "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") {
1280      return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
1281    }
1282    if (tgt_method == "void sun.misc.Unsafe.putInt(java.lang.Object, long, int)") {
1283      return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
1284                                 false /* is_volatile */, false /* is_ordered */);
1285    }
1286    if (tgt_method == "void sun.misc.Unsafe.putIntVolatile(java.lang.Object, long, int)") {
1287      return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
1288                                 true /* is_volatile */, false /* is_ordered */);
1289    }
1290    if (tgt_method == "void sun.misc.Unsafe.putOrderedInt(java.lang.Object, long, int)") {
1291      return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
1292                                 false /* is_volatile */, true /* is_ordered */);
1293    }
1294    if (tgt_method == "long sun.misc.Unsafe.getLong(java.lang.Object, long)") {
1295      return GenInlinedUnsafeGet(info, true /* is_long */, false /* is_volatile */);
1296    }
1297    if (tgt_method == "long sun.misc.Unsafe.getLongVolatile(java.lang.Object, long)") {
1298      return GenInlinedUnsafeGet(info, true /* is_long */, true /* is_volatile */);
1299    }
1300    if (tgt_method == "void sun.misc.Unsafe.putLong(java.lang.Object, long, long)") {
1301      return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
1302                                 false /* is_volatile */, false /* is_ordered */);
1303    }
1304    if (tgt_method == "void sun.misc.Unsafe.putLongVolatile(java.lang.Object, long, long)") {
1305      return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
1306                                 true /* is_volatile */, false /* is_ordered */);
1307    }
1308    if (tgt_method == "void sun.misc.Unsafe.putOrderedLong(java.lang.Object, long, long)") {
1309      return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
1310                                 false /* is_volatile */, true /* is_ordered */);
1311    }
1312    if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObject(java.lang.Object, long)") {
1313      return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
1314    }
1315    if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") {
1316      return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
1317    }
1318    if (tgt_method == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") {
1319      return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
1320                                 false /* is_volatile */, false /* is_ordered */);
1321    }
1322    if (tgt_method == "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") {
1323      return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
1324                                 true /* is_volatile */, false /* is_ordered */);
1325    }
1326    if (tgt_method == "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") {
1327      return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
1328                                 false /* is_volatile */, true /* is_ordered */);
1329    }
1330  }
1331  return false;
1332}
1333
1334void Mir2Lir::GenInvoke(CallInfo* info) {
1335  if (GenIntrinsic(info)) {
1336    return;
1337  }
1338  InvokeType original_type = info->type;  // avoiding mutation by ComputeInvokeInfo
1339  int call_state = 0;
1340  LIR* null_ck;
1341  LIR** p_null_ck = NULL;
1342  NextCallInsn next_call_insn;
1343  FlushAllRegs();  /* Everything to home location */
1344  // Explicit register usage
1345  LockCallTemps();
1346
1347  DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
1348  MethodReference target_method(cUnit->GetDexFile(), info->index);
1349  int vtable_idx;
1350  uintptr_t direct_code;
1351  uintptr_t direct_method;
1352  bool skip_this;
1353  bool fast_path =
1354      cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
1355                                              current_dalvik_offset_,
1356                                              info->type, target_method,
1357                                              vtable_idx,
1358                                              direct_code, direct_method,
1359                                              true) && !SLOW_INVOKE_PATH;
1360  if (info->type == kInterface) {
1361    if (fast_path) {
1362      p_null_ck = &null_ck;
1363    }
1364    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1365    skip_this = false;
1366  } else if (info->type == kDirect) {
1367    if (fast_path) {
1368      p_null_ck = &null_ck;
1369    }
1370    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1371    skip_this = false;
1372  } else if (info->type == kStatic) {
1373    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1374    skip_this = false;
1375  } else if (info->type == kSuper) {
1376    DCHECK(!fast_path);  // Fast path is a direct call.
1377    next_call_insn = NextSuperCallInsnSP;
1378    skip_this = false;
1379  } else {
1380    DCHECK_EQ(info->type, kVirtual);
1381    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1382    skip_this = fast_path;
1383  }
1384  if (!info->is_range) {
1385    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1386                                      next_call_insn, target_method,
1387                                      vtable_idx, direct_code, direct_method,
1388                                      original_type, skip_this);
1389  } else {
1390    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1391                                    next_call_insn, target_method, vtable_idx,
1392                                    direct_code, direct_method, original_type,
1393                                    skip_this);
1394  }
1395  // Finish up any of the call sequence not interleaved in arg loading
1396  while (call_state >= 0) {
1397    call_state = next_call_insn(cu_, info, call_state, target_method,
1398                                vtable_idx, direct_code, direct_method,
1399                                original_type);
1400  }
1401  LIR* call_inst;
1402  if (cu_->instruction_set != kX86) {
1403    call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
1404  } else {
1405    if (fast_path && info->type != kInterface) {
1406      call_inst = OpMem(kOpBlx, TargetReg(kArg0),
1407                        mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
1408    } else {
1409      int trampoline = 0;
1410      switch (info->type) {
1411      case kInterface:
1412        trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
1413            : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
1414        break;
1415      case kDirect:
1416        trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
1417        break;
1418      case kStatic:
1419        trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
1420        break;
1421      case kSuper:
1422        trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
1423        break;
1424      case kVirtual:
1425        trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
1426        break;
1427      default:
1428        LOG(FATAL) << "Unexpected invoke type";
1429      }
1430      call_inst = OpThreadMem(kOpBlx, trampoline);
1431    }
1432  }
1433  MarkSafepointPC(call_inst);
1434
1435  ClobberCalleeSave();
1436  if (info->result.location != kLocInvalid) {
1437    // We have a following MOVE_RESULT - do it now.
1438    if (info->result.wide) {
1439      RegLocation ret_loc = GetReturnWide(info->result.fp);
1440      StoreValueWide(info->result, ret_loc);
1441    } else {
1442      RegLocation ret_loc = GetReturn(info->result.fp);
1443      StoreValue(info->result, ret_loc);
1444    }
1445  }
1446}
1447
1448}  // namespace art
1449