gen_invoke.cc revision 0b9203e7996ee1856f620f95d95d8a273c43a3df
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mir_to_lir-inl.h"
18
19#include "arm/codegen_arm.h"
20#include "dex/compiler_ir.h"
21#include "dex/dex_flags.h"
22#include "dex/mir_graph.h"
23#include "dex/quick/dex_file_method_inliner.h"
24#include "dex/quick/dex_file_to_method_inliner_map.h"
25#include "dex_file-inl.h"
26#include "driver/compiler_driver.h"
27#include "entrypoints/quick/quick_entrypoints.h"
28#include "invoke_type.h"
29#include "mirror/array.h"
30#include "mirror/class-inl.h"
31#include "mirror/dex_cache.h"
32#include "mirror/object_array-inl.h"
33#include "mirror/string.h"
34#include "scoped_thread_state_change.h"
35
36namespace art {
37
38// Shortcuts to repeatedly used long types.
39typedef mirror::ObjectArray<mirror::Object> ObjArray;
40
41/*
42 * This source files contains "gen" codegen routines that should
43 * be applicable to most targets.  Only mid-level support utilities
44 * and "op" calls may be used here.
45 */
46
47void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
48  class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
49   public:
50    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in)
51        : LIRSlowPath(m2l, info_in->offset, branch_in, resume_in), info_(info_in) {
52    }
53
54    void Compile() {
55      m2l_->ResetRegPool();
56      m2l_->ResetDefTracking();
57      GenerateTargetLabel(kPseudoIntrinsicRetry);
58      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
59      m2l_->GenInvokeNoInline(info_);
60      if (cont_ != nullptr) {
61        m2l_->OpUnconditionalBranch(cont_);
62      }
63    }
64
65   private:
66    CallInfo* const info_;
67  };
68
69  AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
70}
71
72/*
73 * To save scheduling time, helper calls are broken into two parts: generation of
74 * the helper target address, and the actual call to the helper.  Because x86
75 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
76 * load arguments between the two parts.
77 */
78// template <size_t pointer_size>
79RegStorage Mir2Lir::CallHelperSetup(QuickEntrypointEnum trampoline) {
80  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
81    return RegStorage::InvalidReg();
82  } else {
83    return LoadHelper(trampoline);
84  }
85}
86
87LIR* Mir2Lir::CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
88                         bool use_link) {
89  LIR* call_inst = InvokeTrampoline(use_link ? kOpBlx : kOpBx, r_tgt, trampoline);
90
91  if (r_tgt.Valid()) {
92    FreeTemp(r_tgt);
93  }
94
95  if (safepoint_pc) {
96    MarkSafepointPC(call_inst);
97  }
98  return call_inst;
99}
100
101void Mir2Lir::CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc) {
102  RegStorage r_tgt = CallHelperSetup(trampoline);
103  ClobberCallerSave();
104  CallHelper(r_tgt, trampoline, safepoint_pc);
105}
106
107void Mir2Lir::CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc) {
108  RegStorage r_tgt = CallHelperSetup(trampoline);
109  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
110  ClobberCallerSave();
111  CallHelper(r_tgt, trampoline, safepoint_pc);
112}
113
114void Mir2Lir::CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0,
115                                   bool safepoint_pc) {
116  RegStorage r_tgt = CallHelperSetup(trampoline);
117  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
118  ClobberCallerSave();
119  CallHelper(r_tgt, trampoline, safepoint_pc);
120}
121
122void Mir2Lir::CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
123                                           bool safepoint_pc) {
124  RegStorage r_tgt = CallHelperSetup(trampoline);
125  if (arg0.wide == 0) {
126    LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
127  } else {
128    LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
129  }
130  ClobberCallerSave();
131  CallHelper(r_tgt, trampoline, safepoint_pc);
132}
133
134void Mir2Lir::CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
135                                      bool safepoint_pc) {
136  RegStorage r_tgt = CallHelperSetup(trampoline);
137  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
138  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
139  ClobberCallerSave();
140  CallHelper(r_tgt, trampoline, safepoint_pc);
141}
142
143void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0,
144                                              RegLocation arg1, bool safepoint_pc) {
145  RegStorage r_tgt = CallHelperSetup(trampoline);
146  if (arg1.wide == 0) {
147    LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
148  } else {
149    RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
150    LoadValueDirectWideFixed(arg1, r_tmp);
151  }
152  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
153  ClobberCallerSave();
154  CallHelper(r_tgt, trampoline, safepoint_pc);
155}
156
157void Mir2Lir::CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0,
158                                              int arg1, bool safepoint_pc) {
159  RegStorage r_tgt = CallHelperSetup(trampoline);
160  DCHECK(!arg0.wide);
161  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
162  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
163  ClobberCallerSave();
164  CallHelper(r_tgt, trampoline, safepoint_pc);
165}
166
167void Mir2Lir::CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
168                                      bool safepoint_pc) {
169  RegStorage r_tgt = CallHelperSetup(trampoline);
170  OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
171  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
172  ClobberCallerSave();
173  CallHelper(r_tgt, trampoline, safepoint_pc);
174}
175
176void Mir2Lir::CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
177                                      bool safepoint_pc) {
178  RegStorage r_tgt = CallHelperSetup(trampoline);
179  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
180  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
181  ClobberCallerSave();
182  CallHelper(r_tgt, trampoline, safepoint_pc);
183}
184
185void Mir2Lir::CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0,
186                                         bool safepoint_pc) {
187  RegStorage r_tgt = CallHelperSetup(trampoline);
188  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
189  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
190  ClobberCallerSave();
191  CallHelper(r_tgt, trampoline, safepoint_pc);
192}
193
194void Mir2Lir::CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
195                                         bool safepoint_pc) {
196  RegStorage r_tgt = CallHelperSetup(trampoline);
197  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
198  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
199  if (r_tmp.NotExactlyEquals(arg0)) {
200    OpRegCopy(r_tmp, arg0);
201  }
202  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
203  ClobberCallerSave();
204  CallHelper(r_tgt, trampoline, safepoint_pc);
205}
206
207void Mir2Lir::CallRuntimeHelperRegRegLocationMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
208                                                    RegLocation arg1, bool safepoint_pc) {
209  RegStorage r_tgt = CallHelperSetup(trampoline);
210  DCHECK(!IsSameReg(TargetReg(kArg2, arg0.GetWideKind()), arg0));
211  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
212  if (r_tmp.NotExactlyEquals(arg0)) {
213    OpRegCopy(r_tmp, arg0);
214  }
215  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
216  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
217  ClobberCallerSave();
218  CallHelper(r_tgt, trampoline, safepoint_pc);
219}
220
221void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline,
222                                                      RegLocation arg0, RegLocation arg1,
223                                                      bool safepoint_pc) {
224  RegStorage r_tgt = CallHelperSetup(trampoline);
225  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
226    RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
227
228    RegStorage arg1_reg;
229    if (arg1.fp == arg0.fp) {
230      arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
231    } else {
232      arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
233    }
234
235    if (arg0.wide == 0) {
236      LoadValueDirectFixed(arg0, arg0_reg);
237    } else {
238      LoadValueDirectWideFixed(arg0, arg0_reg);
239    }
240
241    if (arg1.wide == 0) {
242      LoadValueDirectFixed(arg1, arg1_reg);
243    } else {
244      LoadValueDirectWideFixed(arg1, arg1_reg);
245    }
246  } else {
247    DCHECK(!cu_->target64);
248    if (arg0.wide == 0) {
249      LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
250      if (arg1.wide == 0) {
251        if (cu_->instruction_set == kMips) {
252          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide));
253        } else {
254          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide));
255        }
256      } else {
257        if (cu_->instruction_set == kMips) {
258          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
259        } else {
260          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide));
261        }
262      }
263    } else {
264      LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
265      if (arg1.wide == 0) {
266        LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
267      } else {
268        LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
269      }
270    }
271  }
272  ClobberCallerSave();
273  CallHelper(r_tgt, trampoline, safepoint_pc);
274}
275
276void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
277  WideKind arg0_kind = arg0.GetWideKind();
278  WideKind arg1_kind = arg1.GetWideKind();
279  if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) {
280    if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) {
281      // Swap kArg0 and kArg1 with kArg2 as temp.
282      OpRegCopy(TargetReg(kArg2, arg1_kind), arg1);
283      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
284      OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind));
285    } else {
286      OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
287      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
288    }
289  } else {
290    OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
291    OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
292  }
293}
294
295void Mir2Lir::CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0,
296                                      RegStorage arg1, bool safepoint_pc) {
297  RegStorage r_tgt = CallHelperSetup(trampoline);
298  CopyToArgumentRegs(arg0, arg1);
299  ClobberCallerSave();
300  CallHelper(r_tgt, trampoline, safepoint_pc);
301}
302
303void Mir2Lir::CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
304                                         RegStorage arg1, int arg2, bool safepoint_pc) {
305  RegStorage r_tgt = CallHelperSetup(trampoline);
306  CopyToArgumentRegs(arg0, arg1);
307  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
308  ClobberCallerSave();
309  CallHelper(r_tgt, trampoline, safepoint_pc);
310}
311
312void Mir2Lir::CallRuntimeHelperImmRegLocationMethod(QuickEntrypointEnum trampoline, int arg0,
313                                                    RegLocation arg1, bool safepoint_pc) {
314  RegStorage r_tgt = CallHelperSetup(trampoline);
315  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
316  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
317  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
318  ClobberCallerSave();
319  CallHelper(r_tgt, trampoline, safepoint_pc);
320}
321
322void Mir2Lir::CallRuntimeHelperImmImmMethod(QuickEntrypointEnum trampoline, int arg0, int arg1,
323                                            bool safepoint_pc) {
324  RegStorage r_tgt = CallHelperSetup(trampoline);
325  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
326  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
327  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
328  ClobberCallerSave();
329  CallHelper(r_tgt, trampoline, safepoint_pc);
330}
331
332void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
333                                                         RegLocation arg1,
334                                                         RegLocation arg2, bool safepoint_pc) {
335  RegStorage r_tgt = CallHelperSetup(trampoline);
336  DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
337                                                        // instantiation bug in GCC.
338  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
339  if (arg2.wide == 0) {
340    LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
341  } else {
342    LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide));
343  }
344  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
345  ClobberCallerSave();
346  CallHelper(r_tgt, trampoline, safepoint_pc);
347}
348
349void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
350    QuickEntrypointEnum trampoline,
351    RegLocation arg0,
352    RegLocation arg1,
353    RegLocation arg2,
354    bool safepoint_pc) {
355  RegStorage r_tgt = CallHelperSetup(trampoline);
356  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
357  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
358  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
359  ClobberCallerSave();
360  CallHelper(r_tgt, trampoline, safepoint_pc);
361}
362
363/*
364 * If there are any ins passed in registers that have not been promoted
365 * to a callee-save register, flush them to the frame.  Perform initial
366 * assignment of promoted arguments.
367 *
368 * ArgLocs is an array of location records describing the incoming arguments
369 * with one location record per word of argument.
370 */
371// TODO: Support 64-bit argument registers.
372void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
373  /*
374   * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
375   * It will attempt to keep kArg0 live (or copy it to home location
376   * if promoted).
377   */
378  RegLocation rl_src = rl_method;
379  rl_src.location = kLocPhysReg;
380  rl_src.reg = TargetReg(kArg0, kRef);
381  rl_src.home = false;
382  MarkLive(rl_src);
383  StoreValue(rl_method, rl_src);
384  // If Method* has been promoted, explicitly flush
385  if (rl_method.location == kLocPhysReg) {
386    StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
387  }
388
389  if (mir_graph_->GetNumOfInVRs() == 0) {
390    return;
391  }
392
393  int start_vreg = mir_graph_->GetFirstInVR();
394  /*
395   * Copy incoming arguments to their proper home locations.
396   * NOTE: an older version of dx had an issue in which
397   * it would reuse static method argument registers.
398   * This could result in the same Dalvik virtual register
399   * being promoted to both core and fp regs. To account for this,
400   * we only copy to the corresponding promoted physical register
401   * if it matches the type of the SSA name for the incoming
402   * argument.  It is also possible that long and double arguments
403   * end up half-promoted.  In those cases, we must flush the promoted
404   * half to memory as well.
405   */
406  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
407  RegLocation* t_loc = nullptr;
408  for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i += t_loc->wide ? 2 : 1) {
409    // get reg corresponding to input
410    RegStorage reg = GetArgMappingToPhysicalReg(i);
411    t_loc = &ArgLocs[i];
412
413    // If the wide input appeared as single, flush it and go
414    // as it comes from memory.
415    if (t_loc->wide && reg.Valid() && !reg.Is64Bit()) {
416      // The memory already holds the half. Don't do anything.
417      reg = RegStorage::InvalidReg();
418    }
419
420    if (reg.Valid()) {
421      // If arriving in register.
422
423      // We have already updated the arg location with promoted info
424      // so we can be based on it.
425      if (t_loc->location == kLocPhysReg) {
426        // Just copy it.
427        if (t_loc->wide) {
428          OpRegCopyWide(t_loc->reg, reg);
429        } else {
430          OpRegCopy(t_loc->reg, reg);
431        }
432      } else {
433        // Needs flush.
434        int offset = SRegOffset(start_vreg + i);
435        if (t_loc->ref) {
436          StoreRefDisp(TargetPtrReg(kSp), offset, reg, kNotVolatile);
437        } else {
438          StoreBaseDisp(TargetPtrReg(kSp), offset, reg, t_loc->wide ? k64 : k32, kNotVolatile);
439        }
440      }
441    } else {
442      // If arriving in frame & promoted.
443      if (t_loc->location == kLocPhysReg) {
444        int offset = SRegOffset(start_vreg + i);
445        if (t_loc->ref) {
446          LoadRefDisp(TargetPtrReg(kSp), offset, t_loc->reg, kNotVolatile);
447        } else {
448          LoadBaseDisp(TargetPtrReg(kSp), offset, t_loc->reg, t_loc->wide ? k64 : k32,
449                       kNotVolatile);
450        }
451      }
452    }
453  }
454}
455
456static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) {
457  RegLocation rl_arg = info->args[0];
458  cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef));
459}
460
461static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
462  cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags);
463  // get this->klass_ [use kArg1, set kArg0]
464  cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(),
465                  cg->TargetReg(kArg0, kRef),
466                  kNotVolatile);
467  cg->MarkPossibleNullPointerException(info->opt_flags);
468}
469
470static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
471                                                       const CompilationUnit* cu, Mir2Lir* cg) {
472  if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
473    int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
474        InstructionSetPointerSize(cu->instruction_set)).Int32Value();
475    // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
476    cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
477                     cg->TargetPtrReg(kInvokeTgt));
478    return true;
479  }
480  return false;
481}
482
483/*
484 * Bit of a hack here - in the absence of a real scheduling pass,
485 * emit the next instruction in a virtual invoke sequence.
486 * We can use kLr as a temp prior to target address loading
487 * Note also that we'll load the first argument ("this") into
488 * kArg1 here rather than the standard GenDalvikArgs.
489 */
490static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
491                         int state, const MethodReference& target_method,
492                         uint32_t method_idx, uintptr_t, uintptr_t,
493                         InvokeType) {
494  UNUSED(target_method);
495  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
496  /*
497   * This is the fast path in which the target virtual method is
498   * fully resolved at compile time.
499   */
500  switch (state) {
501    case 0:
502      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
503      break;
504    case 1:
505      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
506                                                  // Includes a null-check.
507      break;
508    case 2: {
509      // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0]
510      int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
511          method_idx * sizeof(mirror::Class::VTableEntry);
512      // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0]
513      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
514      break;
515    }
516    case 3:
517      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
518        break;                                    // kInvokeTgt := kArg0->entrypoint
519      }
520      DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
521      FALLTHROUGH_INTENDED;
522    default:
523      return -1;
524  }
525  return state + 1;
526}
527
528/*
529 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
530 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
531 * more than one interface method map to the same index. Note also that we'll load the first
532 * argument ("this") into kArg1 here rather than the standard GenDalvikArgs.
533 */
534static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
535                                 const MethodReference& target_method,
536                                 uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
537  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
538
539  switch (state) {
540    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
541      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
542      cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index);
543      if (cu->instruction_set == kX86) {
544        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide));
545      }
546      break;
547    case 1:
548      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
549      break;
550    case 2:
551      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
552                                                  // Includes a null-check.
553      break;
554    case 3: {  // Get target method [use kInvokeTgt, set kArg0]
555      int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
556          (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
557      // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0]
558      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
559      break;
560    }
561    case 4:
562      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
563        break;                                    // kInvokeTgt := kArg0->entrypoint
564      }
565      DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
566      FALLTHROUGH_INTENDED;
567    default:
568      return -1;
569  }
570  return state + 1;
571}
572
573static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
574                            QuickEntrypointEnum trampoline, int state,
575                            const MethodReference& target_method, uint32_t method_idx) {
576  UNUSED(info, method_idx);
577  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
578
579  /*
580   * This handles the case in which the base method is not fully
581   * resolved at compile time, we bail to a runtime helper.
582   */
583  if (state == 0) {
584    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
585      // Load trampoline target
586      int32_t disp;
587      if (cu->target64) {
588        disp = GetThreadOffset<8>(trampoline).Int32Value();
589      } else {
590        disp = GetThreadOffset<4>(trampoline).Int32Value();
591      }
592      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), disp, cg->TargetPtrReg(kInvokeTgt));
593    }
594    // Load kArg0 with method index
595    CHECK_EQ(cu->dex_file, target_method.dex_file);
596    cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index);
597    return 1;
598  }
599  return -1;
600}
601
602static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
603                                int state,
604                                const MethodReference& target_method,
605                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
606  return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
607                          target_method, 0);
608}
609
610static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
611                                const MethodReference& target_method,
612                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
613  return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
614                          target_method, 0);
615}
616
617static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
618                               const MethodReference& target_method,
619                               uint32_t, uintptr_t, uintptr_t, InvokeType) {
620  return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
621                          target_method, 0);
622}
623
624static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
625                           const MethodReference& target_method,
626                           uint32_t, uintptr_t, uintptr_t, InvokeType) {
627  return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
628                          target_method, 0);
629}
630
631static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
632                                                CallInfo* info, int state,
633                                                const MethodReference& target_method,
634                                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
635  return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
636                          target_method, 0);
637}
638
639// Default implementation of implicit null pointer check.
640// Overridden by arch specific as necessary.
641void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
642  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
643    return;
644  }
645  RegStorage tmp = AllocTemp();
646  Load32Disp(reg, 0, tmp);
647  MarkPossibleNullPointerException(opt_flags);
648  FreeTemp(tmp);
649}
650
651/**
652 * @brief Used to flush promoted registers if they are used as argument
653 * in an invocation.
654 * @param info the infromation about arguments for invocation.
655 * @param start the first argument we should start to look from.
656 */
657void Mir2Lir::GenDalvikArgsFlushPromoted(CallInfo* info, int start) {
658  if (cu_->disable_opt & (1 << kPromoteRegs)) {
659    // This make sense only if promotion is enabled.
660    return;
661  }
662  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
663  // Scan the rest of the args - if in phys_reg flush to memory
664  for (int next_arg = start; next_arg < info->num_arg_words;) {
665    RegLocation loc = info->args[next_arg];
666    if (loc.wide) {
667      loc = UpdateLocWide(loc);
668      if (loc.location == kLocPhysReg) {
669        StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
670      }
671      next_arg += 2;
672    } else {
673      loc = UpdateLoc(loc);
674      if (loc.location == kLocPhysReg) {
675        if (loc.ref) {
676          StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
677        } else {
678          StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
679                        kNotVolatile);
680        }
681      }
682      next_arg++;
683    }
684  }
685}
686
687/**
688 * @brief Used to optimize the copying of VRs which are arguments of invocation.
689 * Please note that you should flush promoted registers first if you copy.
690 * If implementation does copying it may skip several of the first VRs but must copy
691 * till the end. Implementation must return the number of skipped VRs
692 * (it might be all VRs).
693 * @see GenDalvikArgsFlushPromoted
694 * @param info the information about arguments for invocation.
695 * @param first the first argument we should start to look from.
696 * @param count the number of remaining arguments we can handle.
697 * @return the number of arguments which we did not handle. Unhandled arguments
698 * must be attached to the first one.
699 */
700int Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
701  // call is pretty expensive, let's use it if count is big.
702  if (count > 16) {
703    GenDalvikArgsFlushPromoted(info, first);
704    int start_offset = SRegOffset(info->args[first].s_reg_low);
705    int outs_offset = StackVisitor::GetOutVROffset(first, cu_->instruction_set);
706
707    OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
708    OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
709    CallRuntimeHelperRegRegImm(kQuickMemcpy, TargetReg(kArg0, kRef), TargetReg(kArg1, kRef),
710                               count * 4, false);
711    count = 0;
712  }
713  return count;
714}
715
716int Mir2Lir::GenDalvikArgs(CallInfo* info, int call_state,
717                           LIR** pcrLabel, NextCallInsn next_call_insn,
718                           const MethodReference& target_method,
719                           uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
720                           InvokeType type, bool skip_this) {
721  // If no arguments, just return.
722  if (info->num_arg_words == 0)
723    return call_state;
724
725  const int start_index = skip_this ? 1 : 0;
726
727  // Get architecture dependent mapping between output VRs and physical registers
728  // basing on shorty of method to call.
729  InToRegStorageMapping in_to_reg_storage_mapping(arena_);
730  {
731    const char* target_shorty = mir_graph_->GetShortyFromMethodReference(target_method);
732    ShortyIterator shorty_iterator(target_shorty, type == kStatic);
733    in_to_reg_storage_mapping.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
734  }
735
736  int stack_map_start = std::max(in_to_reg_storage_mapping.GetMaxMappedIn() + 1, start_index);
737  if ((stack_map_start < info->num_arg_words) && info->args[stack_map_start].high_word) {
738    // It is possible that the last mapped reg is 32 bit while arg is 64-bit.
739    // It will be handled together with low part mapped to register.
740    stack_map_start++;
741  }
742  int regs_left_to_pass_via_stack = info->num_arg_words - stack_map_start;
743
744  // If it is a range case we can try to copy remaining VRs (not mapped to physical registers)
745  // using more optimal algorithm.
746  if (info->is_range && regs_left_to_pass_via_stack > 1) {
747    regs_left_to_pass_via_stack = GenDalvikArgsBulkCopy(info, stack_map_start,
748                                                        regs_left_to_pass_via_stack);
749  }
750
751  // Now handle any remaining VRs mapped to stack.
752  if (in_to_reg_storage_mapping.HasArgumentsOnStack()) {
753    // Two temps but do not use kArg1, it might be this which we can skip.
754    // Separate single and wide - it can give some advantage.
755    RegStorage regRef = TargetReg(kArg3, kRef);
756    RegStorage regSingle = TargetReg(kArg3, kNotWide);
757    RegStorage regWide = TargetReg(kArg2, kWide);
758    for (int i = start_index;
759         i < stack_map_start + regs_left_to_pass_via_stack; i++) {
760      RegLocation rl_arg = info->args[i];
761      rl_arg = UpdateRawLoc(rl_arg);
762      RegStorage reg = in_to_reg_storage_mapping.Get(i);
763      if (!reg.Valid()) {
764        int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
765        {
766          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
767          if (rl_arg.wide) {
768            if (rl_arg.location == kLocPhysReg) {
769              StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
770            } else {
771              LoadValueDirectWideFixed(rl_arg, regWide);
772              StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
773            }
774          } else {
775            if (rl_arg.location == kLocPhysReg) {
776              if (rl_arg.ref) {
777                StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
778              } else {
779                StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
780              }
781            } else {
782              if (rl_arg.ref) {
783                LoadValueDirectFixed(rl_arg, regRef);
784                StoreRefDisp(TargetPtrReg(kSp), out_offset, regRef, kNotVolatile);
785              } else {
786                LoadValueDirectFixed(rl_arg, regSingle);
787                StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
788              }
789            }
790          }
791        }
792        call_state = next_call_insn(cu_, info, call_state, target_method,
793                                    vtable_idx, direct_code, direct_method, type);
794      }
795      if (rl_arg.wide) {
796        i++;
797      }
798    }
799  }
800
801  // Finish with VRs mapped to physical registers.
802  for (int i = start_index; i < stack_map_start; i++) {
803    RegLocation rl_arg = info->args[i];
804    rl_arg = UpdateRawLoc(rl_arg);
805    RegStorage reg = in_to_reg_storage_mapping.Get(i);
806    if (reg.Valid()) {
807      if (rl_arg.wide) {
808        // if reg is not 64-bit (it is half of 64-bit) then handle it separately.
809        if (!reg.Is64Bit()) {
810          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
811          if (rl_arg.location == kLocPhysReg) {
812            int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
813            // Dump it to memory.
814            StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
815            LoadBaseDisp(TargetPtrReg(kSp), out_offset, reg, k32, kNotVolatile);
816          } else {
817            int high_offset = StackVisitor::GetOutVROffset(i + 1, cu_->instruction_set);
818            // First, use target reg for high part.
819            LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low + 1), reg, k32,
820                         kNotVolatile);
821            StoreBaseDisp(TargetPtrReg(kSp), high_offset, reg, k32, kNotVolatile);
822            // Now, use target reg for low part.
823            LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low), reg, k32, kNotVolatile);
824            int low_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
825            // And store it to the expected memory location.
826            StoreBaseDisp(TargetPtrReg(kSp), low_offset, reg, k32, kNotVolatile);
827          }
828        } else {
829          LoadValueDirectWideFixed(rl_arg, reg);
830        }
831      } else {
832        LoadValueDirectFixed(rl_arg, reg);
833      }
834      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
835                               direct_code, direct_method, type);
836    }
837    if (rl_arg.wide) {
838      i++;
839    }
840  }
841
842  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
843                           direct_code, direct_method, type);
844  if (pcrLabel) {
845    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
846      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
847    } else {
848      *pcrLabel = nullptr;
849      GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
850    }
851  }
852  return call_state;
853}
854
855RegStorage Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
856  if (!in_to_reg_storage_mapping_.IsInitialized()) {
857    ShortyIterator shorty_iterator(cu_->shorty, cu_->invoke_type == kStatic);
858    in_to_reg_storage_mapping_.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
859  }
860  return in_to_reg_storage_mapping_.Get(arg_num);
861}
862
863RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
864  RegLocation res;
865  if (info->result.location == kLocInvalid) {
866    // If result is unused, return a sink target based on type of invoke target.
867    res = GetReturn(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
868  } else {
869    res = info->result;
870    DCHECK_EQ(LocToRegClass(res),
871              ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
872  }
873  return res;
874}
875
876RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
877  RegLocation res;
878  if (info->result.location == kLocInvalid) {
879    // If result is unused, return a sink target based on type of invoke target.
880    res = GetReturnWide(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
881  } else {
882    res = info->result;
883    DCHECK_EQ(LocToRegClass(res),
884              ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
885  }
886  return res;
887}
888
889bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
890  if (cu_->instruction_set == kMips) {
891    // TODO - add Mips implementation
892    return false;
893  }
894
895  bool use_direct_type_ptr;
896  uintptr_t direct_type_ptr;
897  ClassReference ref;
898  if (!cu_->compiler_driver->CanEmbedReferenceTypeInCode(&ref,
899        &use_direct_type_ptr, &direct_type_ptr)) {
900    return false;
901  }
902
903  RegStorage reg_class = TargetReg(kArg1, kRef);
904  Clobber(reg_class);
905  LockTemp(reg_class);
906  if (use_direct_type_ptr) {
907    LoadConstant(reg_class, direct_type_ptr);
908  } else {
909    uint16_t type_idx = ref.first->GetClassDef(ref.second).class_idx_;
910    LoadClassType(*ref.first, type_idx, kArg1);
911  }
912
913  uint32_t slow_path_flag_offset = cu_->compiler_driver->GetReferenceSlowFlagOffset();
914  uint32_t disable_flag_offset = cu_->compiler_driver->GetReferenceDisableFlagOffset();
915  CHECK(slow_path_flag_offset && disable_flag_offset &&
916        (slow_path_flag_offset != disable_flag_offset));
917
918  // intrinsic logic start.
919  RegLocation rl_obj = info->args[0];
920  rl_obj = LoadValue(rl_obj, kRefReg);
921
922  RegStorage reg_slow_path = AllocTemp();
923  RegStorage reg_disabled = AllocTemp();
924  LoadBaseDisp(reg_class, slow_path_flag_offset, reg_slow_path, kSignedByte, kNotVolatile);
925  LoadBaseDisp(reg_class, disable_flag_offset, reg_disabled, kSignedByte, kNotVolatile);
926  FreeTemp(reg_class);
927  LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
928  FreeTemp(reg_disabled);
929
930  // if slow path, jump to JNI path target
931  LIR* slow_path_branch;
932  if (or_inst->u.m.def_mask->HasBit(ResourceMask::kCCode)) {
933    // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag).
934    slow_path_branch = OpCondBranch(kCondNe, nullptr);
935  } else {
936    // Generate compare and branch.
937    slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
938  }
939  FreeTemp(reg_slow_path);
940
941  // slow path not enabled, simply load the referent of the reference object
942  RegLocation rl_dest = InlineTarget(info);
943  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
944  GenNullCheck(rl_obj.reg, info->opt_flags);
945  LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
946      kNotVolatile);
947  MarkPossibleNullPointerException(info->opt_flags);
948  StoreValue(rl_dest, rl_result);
949
950  LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
951  AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
952  ClobberCallerSave();  // We must clobber everything because slow path will return here
953  return true;
954}
955
956bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
957  // Location of reference to data array
958  int value_offset = mirror::String::ValueOffset().Int32Value();
959  // Location of count
960  int count_offset = mirror::String::CountOffset().Int32Value();
961  // Starting offset within data array
962  int offset_offset = mirror::String::OffsetOffset().Int32Value();
963  // Start of char data with array_
964  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
965
966  RegLocation rl_obj = info->args[0];
967  RegLocation rl_idx = info->args[1];
968  rl_obj = LoadValue(rl_obj, kRefReg);
969  rl_idx = LoadValue(rl_idx, kCoreReg);
970  RegStorage reg_max;
971  GenNullCheck(rl_obj.reg, info->opt_flags);
972  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
973  LIR* range_check_branch = nullptr;
974  RegStorage reg_off;
975  RegStorage reg_ptr;
976  reg_off = AllocTemp();
977  reg_ptr = AllocTempRef();
978  if (range_check) {
979    reg_max = AllocTemp();
980    Load32Disp(rl_obj.reg, count_offset, reg_max);
981    MarkPossibleNullPointerException(info->opt_flags);
982  }
983  Load32Disp(rl_obj.reg, offset_offset, reg_off);
984  MarkPossibleNullPointerException(info->opt_flags);
985  LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
986  if (range_check) {
987    // Set up a slow path to allow retry in case of bounds violation */
988    OpRegReg(kOpCmp, rl_idx.reg, reg_max);
989    FreeTemp(reg_max);
990    range_check_branch = OpCondBranch(kCondUge, nullptr);
991  }
992  OpRegImm(kOpAdd, reg_ptr, data_offset);
993  if (rl_idx.is_const) {
994    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
995  } else {
996    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
997  }
998  FreeTemp(rl_obj.reg);
999  if (rl_idx.location == kLocPhysReg) {
1000    FreeTemp(rl_idx.reg);
1001  }
1002  RegLocation rl_dest = InlineTarget(info);
1003  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1004  LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
1005  FreeTemp(reg_off);
1006  FreeTemp(reg_ptr);
1007  StoreValue(rl_dest, rl_result);
1008  if (range_check) {
1009    DCHECK(range_check_branch != nullptr);
1010    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1011    AddIntrinsicSlowPath(info, range_check_branch);
1012  }
1013  return true;
1014}
1015
1016// Generates an inlined String.is_empty or String.length.
1017bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1018  if (cu_->instruction_set == kMips) {
1019    // TODO - add Mips implementation
1020    return false;
1021  }
1022  // dst = src.length();
1023  RegLocation rl_obj = info->args[0];
1024  rl_obj = LoadValue(rl_obj, kRefReg);
1025  RegLocation rl_dest = InlineTarget(info);
1026  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1027  GenNullCheck(rl_obj.reg, info->opt_flags);
1028  Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1029  MarkPossibleNullPointerException(info->opt_flags);
1030  if (is_empty) {
1031    // dst = (dst == 0);
1032    if (cu_->instruction_set == kThumb2) {
1033      RegStorage t_reg = AllocTemp();
1034      OpRegReg(kOpNeg, t_reg, rl_result.reg);
1035      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1036    } else if (cu_->instruction_set == kArm64) {
1037      OpRegImm(kOpSub, rl_result.reg, 1);
1038      OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31);
1039    } else {
1040      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1041      OpRegImm(kOpSub, rl_result.reg, 1);
1042      OpRegImm(kOpLsr, rl_result.reg, 31);
1043    }
1044  }
1045  StoreValue(rl_dest, rl_result);
1046  return true;
1047}
1048
1049bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1050  if (cu_->instruction_set == kMips) {
1051    // TODO - add Mips implementation.
1052    return false;
1053  }
1054  RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1055  if (rl_dest.s_reg_low == INVALID_SREG) {
1056    // Result is unused, the code is dead. Inlining successful, no code generated.
1057    return true;
1058  }
1059  RegLocation rl_src_i = info->args[0];
1060  RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1061  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1062  if (IsWide(size)) {
1063    if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
1064      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
1065      StoreValueWide(rl_dest, rl_result);
1066      return true;
1067    }
1068    RegStorage r_i_low = rl_i.reg.GetLow();
1069    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1070      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1071      r_i_low = AllocTemp();
1072      OpRegCopy(r_i_low, rl_i.reg);
1073    }
1074    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1075    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1076    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1077      FreeTemp(r_i_low);
1078    }
1079    StoreValueWide(rl_dest, rl_result);
1080  } else {
1081    DCHECK(size == k32 || size == kSignedHalf);
1082    OpKind op = (size == k32) ? kOpRev : kOpRevsh;
1083    OpRegReg(op, rl_result.reg, rl_i.reg);
1084    StoreValue(rl_dest, rl_result);
1085  }
1086  return true;
1087}
1088
1089bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1090  RegLocation rl_dest = InlineTarget(info);
1091  if (rl_dest.s_reg_low == INVALID_SREG) {
1092    // Result is unused, the code is dead. Inlining successful, no code generated.
1093    return true;
1094  }
1095  RegLocation rl_src = info->args[0];
1096  rl_src = LoadValue(rl_src, kCoreReg);
1097  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1098  RegStorage sign_reg = AllocTemp();
1099  // abs(x) = y<=x>>31, (x+y)^y.
1100  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1101  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1102  OpRegReg(kOpXor, rl_result.reg, sign_reg);
1103  StoreValue(rl_dest, rl_result);
1104  return true;
1105}
1106
1107bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1108  RegLocation rl_dest = InlineTargetWide(info);
1109  if (rl_dest.s_reg_low == INVALID_SREG) {
1110    // Result is unused, the code is dead. Inlining successful, no code generated.
1111    return true;
1112  }
1113  RegLocation rl_src = info->args[0];
1114  rl_src = LoadValueWide(rl_src, kCoreReg);
1115  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1116
1117  // If on x86 or if we would clobber a register needed later, just copy the source first.
1118  if (cu_->instruction_set != kX86_64 &&
1119      (cu_->instruction_set == kX86 ||
1120       rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) {
1121    OpRegCopyWide(rl_result.reg, rl_src.reg);
1122    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1123        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1124        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1125        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1126      // Reuse source registers to avoid running out of temps.
1127      FreeTemp(rl_src.reg);
1128    }
1129    rl_src = rl_result;
1130  }
1131
1132  // abs(x) = y<=x>>31, (x+y)^y.
1133  RegStorage sign_reg;
1134  if (cu_->instruction_set == kX86_64) {
1135    sign_reg = AllocTempWide();
1136    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
1137    OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1138    OpRegReg(kOpXor, rl_result.reg, sign_reg);
1139  } else {
1140    sign_reg = AllocTemp();
1141    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1142    OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1143    OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1144    OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1145    OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1146  }
1147  FreeTemp(sign_reg);
1148  StoreValueWide(rl_dest, rl_result);
1149  return true;
1150}
1151
1152bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1153  // Currently implemented only for ARM64.
1154  UNUSED(info, size);
1155  return false;
1156}
1157
1158bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
1159  // Currently implemented only for ARM64.
1160  UNUSED(info, is_min, is_double);
1161  return false;
1162}
1163
1164bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
1165  UNUSED(info);
1166  return false;
1167}
1168
1169bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
1170  UNUSED(info);
1171  return false;
1172}
1173
1174bool Mir2Lir::GenInlinedRint(CallInfo* info) {
1175  UNUSED(info);
1176  return false;
1177}
1178
1179bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
1180  UNUSED(info, is_double);
1181  return false;
1182}
1183
1184bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1185  if (cu_->instruction_set == kMips) {
1186    // TODO - add Mips implementation
1187    return false;
1188  }
1189  RegLocation rl_dest = InlineTarget(info);
1190  if (rl_dest.s_reg_low == INVALID_SREG) {
1191    // Result is unused, the code is dead. Inlining successful, no code generated.
1192    return true;
1193  }
1194  RegLocation rl_src = info->args[0];
1195  StoreValue(rl_dest, rl_src);
1196  return true;
1197}
1198
1199bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1200  if (cu_->instruction_set == kMips) {
1201    // TODO - add Mips implementation
1202    return false;
1203  }
1204  RegLocation rl_dest = InlineTargetWide(info);
1205  if (rl_dest.s_reg_low == INVALID_SREG) {
1206    // Result is unused, the code is dead. Inlining successful, no code generated.
1207    return true;
1208  }
1209  RegLocation rl_src = info->args[0];
1210  StoreValueWide(rl_dest, rl_src);
1211  return true;
1212}
1213
1214bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1215  UNUSED(info);
1216  return false;
1217}
1218
1219
1220/*
1221 * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1222 * otherwise bails to standard library code.
1223 */
1224bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1225  RegLocation rl_obj = info->args[0];
1226  RegLocation rl_char = info->args[1];
1227  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1228    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1229    return false;
1230  }
1231
1232  ClobberCallerSave();
1233  LockCallTemps();  // Using fixed registers
1234  RegStorage reg_ptr = TargetReg(kArg0, kRef);
1235  RegStorage reg_char = TargetReg(kArg1, kNotWide);
1236  RegStorage reg_start = TargetReg(kArg2, kNotWide);
1237
1238  LoadValueDirectFixed(rl_obj, reg_ptr);
1239  LoadValueDirectFixed(rl_char, reg_char);
1240  if (zero_based) {
1241    LoadConstant(reg_start, 0);
1242  } else {
1243    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1244    LoadValueDirectFixed(rl_start, reg_start);
1245  }
1246  RegStorage r_tgt = LoadHelper(kQuickIndexOf);
1247  GenExplicitNullCheck(reg_ptr, info->opt_flags);
1248  LIR* high_code_point_branch =
1249      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1250  // NOTE: not a safepoint
1251  OpReg(kOpBlx, r_tgt);
1252  if (!rl_char.is_const) {
1253    // Add the slow path for code points beyond 0xFFFF.
1254    DCHECK(high_code_point_branch != nullptr);
1255    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1256    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1257    AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
1258    ClobberCallerSave();  // We must clobber everything because slow path will return here
1259  } else {
1260    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1261    DCHECK(high_code_point_branch == nullptr);
1262  }
1263  RegLocation rl_return = GetReturn(kCoreReg);
1264  RegLocation rl_dest = InlineTarget(info);
1265  StoreValue(rl_dest, rl_return);
1266  return true;
1267}
1268
1269/* Fast string.compareTo(Ljava/lang/string;)I. */
1270bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1271  if (cu_->instruction_set == kMips) {
1272    // TODO - add Mips implementation
1273    return false;
1274  }
1275  ClobberCallerSave();
1276  LockCallTemps();  // Using fixed registers
1277  RegStorage reg_this = TargetReg(kArg0, kRef);
1278  RegStorage reg_cmp = TargetReg(kArg1, kRef);
1279
1280  RegLocation rl_this = info->args[0];
1281  RegLocation rl_cmp = info->args[1];
1282  LoadValueDirectFixed(rl_this, reg_this);
1283  LoadValueDirectFixed(rl_cmp, reg_cmp);
1284  RegStorage r_tgt;
1285  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1286    r_tgt = LoadHelper(kQuickStringCompareTo);
1287  } else {
1288    r_tgt = RegStorage::InvalidReg();
1289  }
1290  GenExplicitNullCheck(reg_this, info->opt_flags);
1291  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1292  // TUNING: check if rl_cmp.s_reg_low is already null checked
1293  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1294  AddIntrinsicSlowPath(info, cmp_null_check_branch);
1295  // NOTE: not a safepoint
1296  CallHelper(r_tgt, kQuickStringCompareTo, false, true);
1297  RegLocation rl_return = GetReturn(kCoreReg);
1298  RegLocation rl_dest = InlineTarget(info);
1299  StoreValue(rl_dest, rl_return);
1300  return true;
1301}
1302
1303bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1304  RegLocation rl_dest = InlineTarget(info);
1305
1306  // Early exit if the result is unused.
1307  if (rl_dest.orig_sreg < 0) {
1308    return true;
1309  }
1310
1311  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1312
1313  if (Is64BitInstructionSet(cu_->instruction_set)) {
1314    LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
1315                kNotVolatile);
1316  } else {
1317    Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
1318  }
1319
1320  StoreValue(rl_dest, rl_result);
1321  return true;
1322}
1323
1324bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1325                                  bool is_long, bool is_volatile) {
1326  if (cu_->instruction_set == kMips) {
1327    // TODO - add Mips implementation
1328    return false;
1329  }
1330  // Unused - RegLocation rl_src_unsafe = info->args[0];
1331  RegLocation rl_src_obj = info->args[1];  // Object
1332  RegLocation rl_src_offset = info->args[2];  // long low
1333  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1334  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1335
1336  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1337  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1338  RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
1339  if (is_long) {
1340    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1341        || cu_->instruction_set == kArm64) {
1342      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
1343    } else {
1344      RegStorage rl_temp_offset = AllocTemp();
1345      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1346      LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
1347      FreeTemp(rl_temp_offset);
1348    }
1349  } else {
1350    if (rl_result.ref) {
1351      LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
1352    } else {
1353      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
1354    }
1355  }
1356
1357  if (is_volatile) {
1358    GenMemBarrier(kLoadAny);
1359  }
1360
1361  if (is_long) {
1362    StoreValueWide(rl_dest, rl_result);
1363  } else {
1364    StoreValue(rl_dest, rl_result);
1365  }
1366  return true;
1367}
1368
1369bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1370                                  bool is_object, bool is_volatile, bool is_ordered) {
1371  if (cu_->instruction_set == kMips) {
1372    // TODO - add Mips implementation
1373    return false;
1374  }
1375  // Unused - RegLocation rl_src_unsafe = info->args[0];
1376  RegLocation rl_src_obj = info->args[1];  // Object
1377  RegLocation rl_src_offset = info->args[2];  // long low
1378  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1379  RegLocation rl_src_value = info->args[4];  // value to store
1380  if (is_volatile || is_ordered) {
1381    GenMemBarrier(kAnyStore);
1382  }
1383  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1384  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1385  RegLocation rl_value;
1386  if (is_long) {
1387    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1388    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1389        || cu_->instruction_set == kArm64) {
1390      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
1391    } else {
1392      RegStorage rl_temp_offset = AllocTemp();
1393      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1394      StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
1395      FreeTemp(rl_temp_offset);
1396    }
1397  } else {
1398    rl_value = LoadValue(rl_src_value, LocToRegClass(rl_src_value));
1399    if (rl_value.ref) {
1400      StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
1401    } else {
1402      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
1403    }
1404  }
1405
1406  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1407  FreeTemp(rl_offset.reg);
1408
1409  if (is_volatile) {
1410    // Prevent reordering with a subsequent volatile load.
1411    // May also be needed to address store atomicity issues.
1412    GenMemBarrier(kAnyAny);
1413  }
1414  if (is_object) {
1415    MarkGCCard(0, rl_value.reg, rl_object.reg);
1416  }
1417  return true;
1418}
1419
1420void Mir2Lir::GenInvoke(CallInfo* info) {
1421  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1422  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1423      ->GenIntrinsic(this, info)) {
1424    return;
1425  }
1426  GenInvokeNoInline(info);
1427}
1428
1429void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1430  int call_state = 0;
1431  LIR* null_ck;
1432  LIR** p_null_ck = NULL;
1433  NextCallInsn next_call_insn;
1434  FlushAllRegs();  /* Everything to home location */
1435  // Explicit register usage
1436  LockCallTemps();
1437
1438  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1439  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1440  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1441  info->type = method_info.GetSharpType();
1442  bool fast_path = method_info.FastPath();
1443  bool skip_this;
1444  if (info->type == kInterface) {
1445    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1446    skip_this = fast_path;
1447  } else if (info->type == kDirect) {
1448    if (fast_path) {
1449      p_null_ck = &null_ck;
1450    }
1451    next_call_insn = fast_path ? GetNextSDCallInsn() : NextDirectCallInsnSP;
1452    skip_this = false;
1453  } else if (info->type == kStatic) {
1454    next_call_insn = fast_path ? GetNextSDCallInsn() : NextStaticCallInsnSP;
1455    skip_this = false;
1456  } else if (info->type == kSuper) {
1457    DCHECK(!fast_path);  // Fast path is a direct call.
1458    next_call_insn = NextSuperCallInsnSP;
1459    skip_this = false;
1460  } else {
1461    DCHECK_EQ(info->type, kVirtual);
1462    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1463    skip_this = fast_path;
1464  }
1465  MethodReference target_method = method_info.GetTargetMethod();
1466  call_state = GenDalvikArgs(info, call_state, p_null_ck,
1467                             next_call_insn, target_method, method_info.VTableIndex(),
1468                             method_info.DirectCode(), method_info.DirectMethod(),
1469                             original_type, skip_this);
1470  // Finish up any of the call sequence not interleaved in arg loading
1471  while (call_state >= 0) {
1472    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1473                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
1474  }
1475  LIR* call_insn = GenCallInsn(method_info);
1476  MarkSafepointPC(call_insn);
1477
1478  FreeCallTemps();
1479  if (info->result.location != kLocInvalid) {
1480    // We have a following MOVE_RESULT - do it now.
1481    if (info->result.wide) {
1482      RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
1483      StoreValueWide(info->result, ret_loc);
1484    } else {
1485      RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
1486      StoreValue(info->result, ret_loc);
1487    }
1488  }
1489}
1490
1491}  // namespace art
1492