gen_invoke.cc revision 0b40ecf156e309aa17c72a28cd1b0237dbfb8746
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mir_to_lir-inl.h"
18
19#include "arm/codegen_arm.h"
20#include "dex/compiler_ir.h"
21#include "dex/dex_flags.h"
22#include "dex/mir_graph.h"
23#include "dex/quick/dex_file_method_inliner.h"
24#include "dex/quick/dex_file_to_method_inliner_map.h"
25#include "dex_file-inl.h"
26#include "driver/compiler_driver.h"
27#include "entrypoints/quick/quick_entrypoints.h"
28#include "invoke_type.h"
29#include "mirror/array.h"
30#include "mirror/class-inl.h"
31#include "mirror/dex_cache.h"
32#include "mirror/object_array-inl.h"
33#include "mirror/string.h"
34#include "scoped_thread_state_change.h"
35
36namespace art {
37
38// Shortcuts to repeatedly used long types.
39typedef mirror::ObjectArray<mirror::Object> ObjArray;
40
41/*
42 * This source files contains "gen" codegen routines that should
43 * be applicable to most targets.  Only mid-level support utilities
44 * and "op" calls may be used here.
45 */
46
47void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
48  class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
49   public:
50    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in)
51        : LIRSlowPath(m2l, branch_in, resume_in), info_(info_in) {
52      DCHECK_EQ(info_in->offset, current_dex_pc_);
53    }
54
55    void Compile() {
56      m2l_->ResetRegPool();
57      m2l_->ResetDefTracking();
58      GenerateTargetLabel(kPseudoIntrinsicRetry);
59      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
60      m2l_->GenInvokeNoInline(info_);
61      if (cont_ != nullptr) {
62        m2l_->OpUnconditionalBranch(cont_);
63      }
64    }
65
66   private:
67    CallInfo* const info_;
68  };
69
70  AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
71}
72
73/*
74 * To save scheduling time, helper calls are broken into two parts: generation of
75 * the helper target address, and the actual call to the helper.  Because x86
76 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
77 * load arguments between the two parts.
78 */
79// template <size_t pointer_size>
80RegStorage Mir2Lir::CallHelperSetup(QuickEntrypointEnum trampoline) {
81  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
82    return RegStorage::InvalidReg();
83  } else {
84    return LoadHelper(trampoline);
85  }
86}
87
88LIR* Mir2Lir::CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
89                         bool use_link) {
90  LIR* call_inst = InvokeTrampoline(use_link ? kOpBlx : kOpBx, r_tgt, trampoline);
91
92  if (r_tgt.Valid()) {
93    FreeTemp(r_tgt);
94  }
95
96  if (safepoint_pc) {
97    MarkSafepointPC(call_inst);
98  }
99  return call_inst;
100}
101
102void Mir2Lir::CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc) {
103  RegStorage r_tgt = CallHelperSetup(trampoline);
104  ClobberCallerSave();
105  CallHelper(r_tgt, trampoline, safepoint_pc);
106}
107
108void Mir2Lir::CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc) {
109  RegStorage r_tgt = CallHelperSetup(trampoline);
110  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
111  ClobberCallerSave();
112  CallHelper(r_tgt, trampoline, safepoint_pc);
113}
114
115void Mir2Lir::CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0,
116                                   bool safepoint_pc) {
117  RegStorage r_tgt = CallHelperSetup(trampoline);
118  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
119  ClobberCallerSave();
120  CallHelper(r_tgt, trampoline, safepoint_pc);
121}
122
123void Mir2Lir::CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
124                                           bool safepoint_pc) {
125  RegStorage r_tgt = CallHelperSetup(trampoline);
126  if (arg0.wide == 0) {
127    LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
128  } else {
129    LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
130  }
131  ClobberCallerSave();
132  CallHelper(r_tgt, trampoline, safepoint_pc);
133}
134
135void Mir2Lir::CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
136                                      bool safepoint_pc) {
137  RegStorage r_tgt = CallHelperSetup(trampoline);
138  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
139  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
140  ClobberCallerSave();
141  CallHelper(r_tgt, trampoline, safepoint_pc);
142}
143
144void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0,
145                                              RegLocation arg1, bool safepoint_pc) {
146  RegStorage r_tgt = CallHelperSetup(trampoline);
147  if (arg1.wide == 0) {
148    LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
149  } else {
150    RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
151    LoadValueDirectWideFixed(arg1, r_tmp);
152  }
153  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
154  ClobberCallerSave();
155  CallHelper(r_tgt, trampoline, safepoint_pc);
156}
157
158void Mir2Lir::CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0,
159                                              int arg1, bool safepoint_pc) {
160  RegStorage r_tgt = CallHelperSetup(trampoline);
161  DCHECK(!arg0.wide);
162  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
163  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
164  ClobberCallerSave();
165  CallHelper(r_tgt, trampoline, safepoint_pc);
166}
167
168void Mir2Lir::CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
169                                      bool safepoint_pc) {
170  RegStorage r_tgt = CallHelperSetup(trampoline);
171  OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
172  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
173  ClobberCallerSave();
174  CallHelper(r_tgt, trampoline, safepoint_pc);
175}
176
177void Mir2Lir::CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
178                                      bool safepoint_pc) {
179  RegStorage r_tgt = CallHelperSetup(trampoline);
180  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
181  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
182  ClobberCallerSave();
183  CallHelper(r_tgt, trampoline, safepoint_pc);
184}
185
186void Mir2Lir::CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0,
187                                         bool safepoint_pc) {
188  RegStorage r_tgt = CallHelperSetup(trampoline);
189  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
190  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
191  ClobberCallerSave();
192  CallHelper(r_tgt, trampoline, safepoint_pc);
193}
194
195void Mir2Lir::CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
196                                         bool safepoint_pc) {
197  RegStorage r_tgt = CallHelperSetup(trampoline);
198  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
199  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
200  if (r_tmp.NotExactlyEquals(arg0)) {
201    OpRegCopy(r_tmp, arg0);
202  }
203  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
204  ClobberCallerSave();
205  CallHelper(r_tgt, trampoline, safepoint_pc);
206}
207
208void Mir2Lir::CallRuntimeHelperRegRegLocationMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
209                                                    RegLocation arg1, bool safepoint_pc) {
210  RegStorage r_tgt = CallHelperSetup(trampoline);
211  DCHECK(!IsSameReg(TargetReg(kArg2, arg0.GetWideKind()), arg0));
212  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
213  if (r_tmp.NotExactlyEquals(arg0)) {
214    OpRegCopy(r_tmp, arg0);
215  }
216  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
217  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
218  ClobberCallerSave();
219  CallHelper(r_tgt, trampoline, safepoint_pc);
220}
221
222void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline,
223                                                      RegLocation arg0, RegLocation arg1,
224                                                      bool safepoint_pc) {
225  RegStorage r_tgt = CallHelperSetup(trampoline);
226  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kMips64 ||
227      cu_->instruction_set == kX86_64) {
228    RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
229
230    RegStorage arg1_reg;
231    if (arg1.fp == arg0.fp) {
232      arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
233    } else {
234      arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
235    }
236
237    if (arg0.wide == 0) {
238      LoadValueDirectFixed(arg0, arg0_reg);
239    } else {
240      LoadValueDirectWideFixed(arg0, arg0_reg);
241    }
242
243    if (arg1.wide == 0) {
244      LoadValueDirectFixed(arg1, arg1_reg);
245    } else {
246      LoadValueDirectWideFixed(arg1, arg1_reg);
247    }
248  } else {
249    DCHECK(!cu_->target64);
250    if (arg0.wide == 0) {
251      LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
252      if (arg1.wide == 0) {
253        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
254        if (cu_->instruction_set == kMips) {
255          LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg1, kNotWide));
256        } else {
257          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide));
258        }
259      } else {
260        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
261        if (cu_->instruction_set == kMips) {
262          LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
263        } else {
264          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide));
265        }
266      }
267    } else {
268      LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
269      if (arg1.wide == 0) {
270        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
271        if (cu_->instruction_set == kMips) {
272          LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kNotWide));
273        } else {
274          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
275        }
276      } else {
277        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
278        if (cu_->instruction_set == kMips) {
279          LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
280        } else {
281          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
282        }
283      }
284    }
285  }
286  ClobberCallerSave();
287  CallHelper(r_tgt, trampoline, safepoint_pc);
288}
289
290void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
291  WideKind arg0_kind = arg0.GetWideKind();
292  WideKind arg1_kind = arg1.GetWideKind();
293  if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) {
294    if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) {
295      // Swap kArg0 and kArg1 with kArg2 as temp.
296      OpRegCopy(TargetReg(kArg2, arg1_kind), arg1);
297      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
298      OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind));
299    } else {
300      OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
301      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
302    }
303  } else {
304    OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
305    OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
306  }
307}
308
309void Mir2Lir::CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0,
310                                      RegStorage arg1, bool safepoint_pc) {
311  RegStorage r_tgt = CallHelperSetup(trampoline);
312  CopyToArgumentRegs(arg0, arg1);
313  ClobberCallerSave();
314  CallHelper(r_tgt, trampoline, safepoint_pc);
315}
316
317void Mir2Lir::CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
318                                         RegStorage arg1, int arg2, bool safepoint_pc) {
319  RegStorage r_tgt = CallHelperSetup(trampoline);
320  CopyToArgumentRegs(arg0, arg1);
321  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
322  ClobberCallerSave();
323  CallHelper(r_tgt, trampoline, safepoint_pc);
324}
325
326void Mir2Lir::CallRuntimeHelperImmRegLocationMethod(QuickEntrypointEnum trampoline, int arg0,
327                                                    RegLocation arg1, bool safepoint_pc) {
328  RegStorage r_tgt = CallHelperSetup(trampoline);
329  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
330  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
331  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
332  ClobberCallerSave();
333  CallHelper(r_tgt, trampoline, safepoint_pc);
334}
335
336void Mir2Lir::CallRuntimeHelperImmImmMethod(QuickEntrypointEnum trampoline, int arg0, int arg1,
337                                            bool safepoint_pc) {
338  RegStorage r_tgt = CallHelperSetup(trampoline);
339  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
340  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
341  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
342  ClobberCallerSave();
343  CallHelper(r_tgt, trampoline, safepoint_pc);
344}
345
346void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
347                                                         RegLocation arg1,
348                                                         RegLocation arg2, bool safepoint_pc) {
349  RegStorage r_tgt = CallHelperSetup(trampoline);
350  DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
351                                                        // instantiation bug in GCC.
352  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
353  if (arg2.wide == 0) {
354    LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
355  } else {
356    LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide));
357  }
358  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
359  ClobberCallerSave();
360  CallHelper(r_tgt, trampoline, safepoint_pc);
361}
362
363void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
364    QuickEntrypointEnum trampoline,
365    RegLocation arg0,
366    RegLocation arg1,
367    RegLocation arg2,
368    bool safepoint_pc) {
369  RegStorage r_tgt = CallHelperSetup(trampoline);
370  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
371  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
372  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
373  ClobberCallerSave();
374  CallHelper(r_tgt, trampoline, safepoint_pc);
375}
376
377/*
378 * If there are any ins passed in registers that have not been promoted
379 * to a callee-save register, flush them to the frame.  Perform initial
380 * assignment of promoted arguments.
381 *
382 * ArgLocs is an array of location records describing the incoming arguments
383 * with one location record per word of argument.
384 */
385// TODO: Support 64-bit argument registers.
386void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
387  /*
388   * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
389   * It will attempt to keep kArg0 live (or copy it to home location
390   * if promoted).
391   */
392  RegLocation rl_src = rl_method;
393  rl_src.location = kLocPhysReg;
394  rl_src.reg = TargetReg(kArg0, kRef);
395  rl_src.home = false;
396  MarkLive(rl_src);
397  StoreValue(rl_method, rl_src);
398  // If Method* has been promoted, explicitly flush
399  if (rl_method.location == kLocPhysReg) {
400    StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
401  }
402
403  if (mir_graph_->GetNumOfInVRs() == 0) {
404    return;
405  }
406
407  int start_vreg = mir_graph_->GetFirstInVR();
408  /*
409   * Copy incoming arguments to their proper home locations.
410   * NOTE: an older version of dx had an issue in which
411   * it would reuse static method argument registers.
412   * This could result in the same Dalvik virtual register
413   * being promoted to both core and fp regs. To account for this,
414   * we only copy to the corresponding promoted physical register
415   * if it matches the type of the SSA name for the incoming
416   * argument.  It is also possible that long and double arguments
417   * end up half-promoted.  In those cases, we must flush the promoted
418   * half to memory as well.
419   */
420  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
421  RegLocation* t_loc = nullptr;
422  EnsureInitializedArgMappingToPhysicalReg();
423  for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i += t_loc->wide ? 2 : 1) {
424    // get reg corresponding to input
425    RegStorage reg = in_to_reg_storage_mapping_.GetReg(i);
426    t_loc = &ArgLocs[i];
427
428    // If the wide input appeared as single, flush it and go
429    // as it comes from memory.
430    if (t_loc->wide && reg.Valid() && !reg.Is64Bit()) {
431      // The memory already holds the half. Don't do anything.
432      reg = RegStorage::InvalidReg();
433    }
434
435    if (reg.Valid()) {
436      // If arriving in register.
437
438      // We have already updated the arg location with promoted info
439      // so we can be based on it.
440      if (t_loc->location == kLocPhysReg) {
441        // Just copy it.
442        if (t_loc->wide) {
443          OpRegCopyWide(t_loc->reg, reg);
444        } else {
445          OpRegCopy(t_loc->reg, reg);
446        }
447      } else {
448        // Needs flush.
449        int offset = SRegOffset(start_vreg + i);
450        if (t_loc->ref) {
451          StoreRefDisp(TargetPtrReg(kSp), offset, reg, kNotVolatile);
452        } else {
453          StoreBaseDisp(TargetPtrReg(kSp), offset, reg, t_loc->wide ? k64 : k32, kNotVolatile);
454        }
455      }
456    } else {
457      // If arriving in frame & promoted.
458      if (t_loc->location == kLocPhysReg) {
459        int offset = SRegOffset(start_vreg + i);
460        if (t_loc->ref) {
461          LoadRefDisp(TargetPtrReg(kSp), offset, t_loc->reg, kNotVolatile);
462        } else {
463          LoadBaseDisp(TargetPtrReg(kSp), offset, t_loc->reg, t_loc->wide ? k64 : k32,
464                       kNotVolatile);
465        }
466      }
467    }
468  }
469}
470
471static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) {
472  RegLocation rl_arg = info->args[0];
473  cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef));
474}
475
476static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
477  cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags);
478  // get this->klass_ [use kArg1, set kArg0]
479  cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(),
480                  cg->TargetReg(kArg0, kRef),
481                  kNotVolatile);
482  cg->MarkPossibleNullPointerException(info->opt_flags);
483}
484
485static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
486                                                       const CompilationUnit* cu, Mir2Lir* cg) {
487  if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
488    int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
489        InstructionSetPointerSize(cu->instruction_set)).Int32Value();
490    // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
491    cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
492                     cg->TargetPtrReg(kInvokeTgt));
493    return true;
494  }
495  return false;
496}
497
498/*
499 * Bit of a hack here - in the absence of a real scheduling pass,
500 * emit the next instruction in a virtual invoke sequence.
501 * We can use kLr as a temp prior to target address loading
502 * Note also that we'll load the first argument ("this") into
503 * kArg1 here rather than the standard GenDalvikArgs.
504 */
505static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
506                         int state, const MethodReference& target_method,
507                         uint32_t method_idx, uintptr_t, uintptr_t,
508                         InvokeType) {
509  UNUSED(target_method);
510  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
511  /*
512   * This is the fast path in which the target virtual method is
513   * fully resolved at compile time.
514   */
515  switch (state) {
516    case 0:
517      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
518      break;
519    case 1:
520      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
521                                                  // Includes a null-check.
522      break;
523    case 2: {
524      // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0]
525      int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
526          method_idx * sizeof(mirror::Class::VTableEntry);
527      // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0]
528      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
529      break;
530    }
531    case 3:
532      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
533        break;                                    // kInvokeTgt := kArg0->entrypoint
534      }
535      DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
536      FALLTHROUGH_INTENDED;
537    default:
538      return -1;
539  }
540  return state + 1;
541}
542
543/*
544 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
545 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
546 * more than one interface method map to the same index. Note also that we'll load the first
547 * argument ("this") into kArg1 here rather than the standard GenDalvikArgs.
548 */
549static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
550                                 const MethodReference& target_method,
551                                 uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
552  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
553
554  switch (state) {
555    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
556      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
557      cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index);
558      if (cu->instruction_set == kX86) {
559        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide));
560      }
561      break;
562    case 1:
563      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
564      break;
565    case 2:
566      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
567                                                  // Includes a null-check.
568      break;
569    case 3: {  // Get target method [use kInvokeTgt, set kArg0]
570      int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
571          (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
572      // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0]
573      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
574      break;
575    }
576    case 4:
577      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
578        break;                                    // kInvokeTgt := kArg0->entrypoint
579      }
580      DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
581      FALLTHROUGH_INTENDED;
582    default:
583      return -1;
584  }
585  return state + 1;
586}
587
588static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
589                            QuickEntrypointEnum trampoline, int state,
590                            const MethodReference& target_method, uint32_t method_idx) {
591  UNUSED(info, method_idx);
592  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
593
594  /*
595   * This handles the case in which the base method is not fully
596   * resolved at compile time, we bail to a runtime helper.
597   */
598  if (state == 0) {
599    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
600      // Load trampoline target
601      int32_t disp;
602      if (cu->target64) {
603        disp = GetThreadOffset<8>(trampoline).Int32Value();
604      } else {
605        disp = GetThreadOffset<4>(trampoline).Int32Value();
606      }
607      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), disp, cg->TargetPtrReg(kInvokeTgt));
608    }
609    // Load kArg0 with method index
610    CHECK_EQ(cu->dex_file, target_method.dex_file);
611    cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index);
612    return 1;
613  }
614  return -1;
615}
616
617static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
618                                int state,
619                                const MethodReference& target_method,
620                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
621  return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
622                          target_method, 0);
623}
624
625static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
626                                const MethodReference& target_method,
627                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
628  return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
629                          target_method, 0);
630}
631
632static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
633                               const MethodReference& target_method,
634                               uint32_t, uintptr_t, uintptr_t, InvokeType) {
635  return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
636                          target_method, 0);
637}
638
639static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
640                           const MethodReference& target_method,
641                           uint32_t, uintptr_t, uintptr_t, InvokeType) {
642  return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
643                          target_method, 0);
644}
645
646static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
647                                                CallInfo* info, int state,
648                                                const MethodReference& target_method,
649                                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
650  return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
651                          target_method, 0);
652}
653
654// Default implementation of implicit null pointer check.
655// Overridden by arch specific as necessary.
656void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
657  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
658    return;
659  }
660  RegStorage tmp = AllocTemp();
661  Load32Disp(reg, 0, tmp);
662  MarkPossibleNullPointerException(opt_flags);
663  FreeTemp(tmp);
664}
665
666/**
667 * @brief Used to flush promoted registers if they are used as argument
668 * in an invocation.
669 * @param info the infromation about arguments for invocation.
670 * @param start the first argument we should start to look from.
671 */
672void Mir2Lir::GenDalvikArgsFlushPromoted(CallInfo* info, int start) {
673  if (cu_->disable_opt & (1 << kPromoteRegs)) {
674    // This make sense only if promotion is enabled.
675    return;
676  }
677  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
678  // Scan the rest of the args - if in phys_reg flush to memory
679  for (size_t next_arg = start; next_arg < info->num_arg_words;) {
680    RegLocation loc = info->args[next_arg];
681    if (loc.wide) {
682      loc = UpdateLocWide(loc);
683      if (loc.location == kLocPhysReg) {
684        StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
685      }
686      next_arg += 2;
687    } else {
688      loc = UpdateLoc(loc);
689      if (loc.location == kLocPhysReg) {
690        if (loc.ref) {
691          StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
692        } else {
693          StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
694                        kNotVolatile);
695        }
696      }
697      next_arg++;
698    }
699  }
700}
701
702/**
703 * @brief Used to optimize the copying of VRs which are arguments of invocation.
704 * Please note that you should flush promoted registers first if you copy.
705 * If implementation does copying it may skip several of the first VRs but must copy
706 * till the end. Implementation must return the number of skipped VRs
707 * (it might be all VRs).
708 * @see GenDalvikArgsFlushPromoted
709 * @param info the information about arguments for invocation.
710 * @param first the first argument we should start to look from.
711 * @param count the number of remaining arguments we can handle.
712 * @return the number of arguments which we did not handle. Unhandled arguments
713 * must be attached to the first one.
714 */
715int Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
716  // call is pretty expensive, let's use it if count is big.
717  if (count > 16) {
718    GenDalvikArgsFlushPromoted(info, first);
719    int start_offset = SRegOffset(info->args[first].s_reg_low);
720    int outs_offset = StackVisitor::GetOutVROffset(first, cu_->instruction_set);
721
722    OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
723    OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
724    CallRuntimeHelperRegRegImm(kQuickMemcpy, TargetReg(kArg0, kRef), TargetReg(kArg1, kRef),
725                               count * 4, false);
726    count = 0;
727  }
728  return count;
729}
730
731int Mir2Lir::GenDalvikArgs(CallInfo* info, int call_state,
732                           LIR** pcrLabel, NextCallInsn next_call_insn,
733                           const MethodReference& target_method,
734                           uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
735                           InvokeType type, bool skip_this) {
736  // If no arguments, just return.
737  if (info->num_arg_words == 0u)
738    return call_state;
739
740  const size_t start_index = skip_this ? 1 : 0;
741
742  // Get architecture dependent mapping between output VRs and physical registers
743  // basing on shorty of method to call.
744  InToRegStorageMapping in_to_reg_storage_mapping(arena_);
745  {
746    const char* target_shorty = mir_graph_->GetShortyFromMethodReference(target_method);
747    ShortyIterator shorty_iterator(target_shorty, type == kStatic);
748    in_to_reg_storage_mapping.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
749  }
750
751  size_t stack_map_start = std::max(in_to_reg_storage_mapping.GetEndMappedIn(), start_index);
752  if ((stack_map_start < info->num_arg_words) && info->args[stack_map_start].high_word) {
753    // It is possible that the last mapped reg is 32 bit while arg is 64-bit.
754    // It will be handled together with low part mapped to register.
755    stack_map_start++;
756  }
757  size_t regs_left_to_pass_via_stack = info->num_arg_words - stack_map_start;
758
759  // If it is a range case we can try to copy remaining VRs (not mapped to physical registers)
760  // using more optimal algorithm.
761  if (info->is_range && regs_left_to_pass_via_stack > 1) {
762    regs_left_to_pass_via_stack = GenDalvikArgsBulkCopy(info, stack_map_start,
763                                                        regs_left_to_pass_via_stack);
764  }
765
766  // Now handle any remaining VRs mapped to stack.
767  if (in_to_reg_storage_mapping.HasArgumentsOnStack()) {
768    // Two temps but do not use kArg1, it might be this which we can skip.
769    // Separate single and wide - it can give some advantage.
770    RegStorage regRef = TargetReg(kArg3, kRef);
771    RegStorage regSingle = TargetReg(kArg3, kNotWide);
772    RegStorage regWide = TargetReg(kArg2, kWide);
773    for (size_t i = start_index; i < stack_map_start + regs_left_to_pass_via_stack; i++) {
774      RegLocation rl_arg = info->args[i];
775      rl_arg = UpdateRawLoc(rl_arg);
776      RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
777      if (!reg.Valid()) {
778        int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
779        {
780          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
781          if (rl_arg.wide) {
782            if (rl_arg.location == kLocPhysReg) {
783              StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
784            } else {
785              LoadValueDirectWideFixed(rl_arg, regWide);
786              StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
787            }
788          } else {
789            if (rl_arg.location == kLocPhysReg) {
790              if (rl_arg.ref) {
791                StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
792              } else {
793                StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
794              }
795            } else {
796              if (rl_arg.ref) {
797                LoadValueDirectFixed(rl_arg, regRef);
798                StoreRefDisp(TargetPtrReg(kSp), out_offset, regRef, kNotVolatile);
799              } else {
800                LoadValueDirectFixed(rl_arg, regSingle);
801                StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
802              }
803            }
804          }
805        }
806        call_state = next_call_insn(cu_, info, call_state, target_method,
807                                    vtable_idx, direct_code, direct_method, type);
808      }
809      if (rl_arg.wide) {
810        i++;
811      }
812    }
813  }
814
815  // Finish with VRs mapped to physical registers.
816  for (size_t i = start_index; i < stack_map_start; i++) {
817    RegLocation rl_arg = info->args[i];
818    rl_arg = UpdateRawLoc(rl_arg);
819    RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
820    if (reg.Valid()) {
821      if (rl_arg.wide) {
822        // if reg is not 64-bit (it is half of 64-bit) then handle it separately.
823        if (!reg.Is64Bit()) {
824          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
825          if (rl_arg.location == kLocPhysReg) {
826            int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
827            // Dump it to memory.
828            StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
829            LoadBaseDisp(TargetPtrReg(kSp), out_offset, reg, k32, kNotVolatile);
830          } else {
831            int high_offset = StackVisitor::GetOutVROffset(i + 1, cu_->instruction_set);
832            // First, use target reg for high part.
833            LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low + 1), reg, k32,
834                         kNotVolatile);
835            StoreBaseDisp(TargetPtrReg(kSp), high_offset, reg, k32, kNotVolatile);
836            // Now, use target reg for low part.
837            LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low), reg, k32, kNotVolatile);
838            int low_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
839            // And store it to the expected memory location.
840            StoreBaseDisp(TargetPtrReg(kSp), low_offset, reg, k32, kNotVolatile);
841          }
842        } else {
843          LoadValueDirectWideFixed(rl_arg, reg);
844        }
845      } else {
846        LoadValueDirectFixed(rl_arg, reg);
847      }
848      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
849                               direct_code, direct_method, type);
850    }
851    if (rl_arg.wide) {
852      i++;
853    }
854  }
855
856  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
857                           direct_code, direct_method, type);
858  if (pcrLabel) {
859    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
860      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
861    } else {
862      *pcrLabel = nullptr;
863      GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
864    }
865  }
866  return call_state;
867}
868
869void Mir2Lir::EnsureInitializedArgMappingToPhysicalReg() {
870  if (!in_to_reg_storage_mapping_.IsInitialized()) {
871    ShortyIterator shorty_iterator(cu_->shorty, cu_->invoke_type == kStatic);
872    in_to_reg_storage_mapping_.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
873  }
874}
875
876RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
877  RegLocation res;
878  if (info->result.location == kLocInvalid) {
879    // If result is unused, return a sink target based on type of invoke target.
880    res = GetReturn(
881        ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
882  } else {
883    res = info->result;
884    DCHECK_EQ(LocToRegClass(res),
885              ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
886  }
887  return res;
888}
889
890RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
891  RegLocation res;
892  if (info->result.location == kLocInvalid) {
893    // If result is unused, return a sink target based on type of invoke target.
894    res = GetReturnWide(ShortyToRegClass(
895        mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
896  } else {
897    res = info->result;
898    DCHECK_EQ(LocToRegClass(res),
899              ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
900  }
901  return res;
902}
903
904bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
905  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
906    // TODO: add Mips and Mips64 implementations.
907    return false;
908  }
909
910  bool use_direct_type_ptr;
911  uintptr_t direct_type_ptr;
912  ClassReference ref;
913  if (!cu_->compiler_driver->CanEmbedReferenceTypeInCode(&ref,
914        &use_direct_type_ptr, &direct_type_ptr)) {
915    return false;
916  }
917
918  RegStorage reg_class = TargetReg(kArg1, kRef);
919  Clobber(reg_class);
920  LockTemp(reg_class);
921  if (use_direct_type_ptr) {
922    LoadConstant(reg_class, direct_type_ptr);
923  } else {
924    uint16_t type_idx = ref.first->GetClassDef(ref.second).class_idx_;
925    LoadClassType(*ref.first, type_idx, kArg1);
926  }
927
928  uint32_t slow_path_flag_offset = cu_->compiler_driver->GetReferenceSlowFlagOffset();
929  uint32_t disable_flag_offset = cu_->compiler_driver->GetReferenceDisableFlagOffset();
930  CHECK(slow_path_flag_offset && disable_flag_offset &&
931        (slow_path_flag_offset != disable_flag_offset));
932
933  // intrinsic logic start.
934  RegLocation rl_obj = info->args[0];
935  rl_obj = LoadValue(rl_obj, kRefReg);
936
937  RegStorage reg_slow_path = AllocTemp();
938  RegStorage reg_disabled = AllocTemp();
939  LoadBaseDisp(reg_class, slow_path_flag_offset, reg_slow_path, kSignedByte, kNotVolatile);
940  LoadBaseDisp(reg_class, disable_flag_offset, reg_disabled, kSignedByte, kNotVolatile);
941  FreeTemp(reg_class);
942  LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
943  FreeTemp(reg_disabled);
944
945  // if slow path, jump to JNI path target
946  LIR* slow_path_branch;
947  if (or_inst->u.m.def_mask->HasBit(ResourceMask::kCCode)) {
948    // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag).
949    slow_path_branch = OpCondBranch(kCondNe, nullptr);
950  } else {
951    // Generate compare and branch.
952    slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
953  }
954  FreeTemp(reg_slow_path);
955
956  // slow path not enabled, simply load the referent of the reference object
957  RegLocation rl_dest = InlineTarget(info);
958  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
959  GenNullCheck(rl_obj.reg, info->opt_flags);
960  LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
961      kNotVolatile);
962  MarkPossibleNullPointerException(info->opt_flags);
963  StoreValue(rl_dest, rl_result);
964
965  LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
966  AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
967  ClobberCallerSave();  // We must clobber everything because slow path will return here
968  return true;
969}
970
971bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
972  // Location of reference to data array
973  int value_offset = mirror::String::ValueOffset().Int32Value();
974  // Location of count
975  int count_offset = mirror::String::CountOffset().Int32Value();
976  // Starting offset within data array
977  int offset_offset = mirror::String::OffsetOffset().Int32Value();
978  // Start of char data with array_
979  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
980
981  RegLocation rl_obj = info->args[0];
982  RegLocation rl_idx = info->args[1];
983  rl_obj = LoadValue(rl_obj, kRefReg);
984  rl_idx = LoadValue(rl_idx, kCoreReg);
985  RegStorage reg_max;
986  GenNullCheck(rl_obj.reg, info->opt_flags);
987  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
988  LIR* range_check_branch = nullptr;
989  RegStorage reg_off;
990  RegStorage reg_ptr;
991  reg_off = AllocTemp();
992  reg_ptr = AllocTempRef();
993  if (range_check) {
994    reg_max = AllocTemp();
995    Load32Disp(rl_obj.reg, count_offset, reg_max);
996    MarkPossibleNullPointerException(info->opt_flags);
997  }
998  Load32Disp(rl_obj.reg, offset_offset, reg_off);
999  MarkPossibleNullPointerException(info->opt_flags);
1000  LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1001  if (range_check) {
1002    // Set up a slow path to allow retry in case of bounds violation */
1003    OpRegReg(kOpCmp, rl_idx.reg, reg_max);
1004    FreeTemp(reg_max);
1005    range_check_branch = OpCondBranch(kCondUge, nullptr);
1006  }
1007  OpRegImm(kOpAdd, reg_ptr, data_offset);
1008  if (rl_idx.is_const) {
1009    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1010  } else {
1011    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
1012  }
1013  FreeTemp(rl_obj.reg);
1014  if (rl_idx.location == kLocPhysReg) {
1015    FreeTemp(rl_idx.reg);
1016  }
1017  RegLocation rl_dest = InlineTarget(info);
1018  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1019  LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
1020  FreeTemp(reg_off);
1021  FreeTemp(reg_ptr);
1022  StoreValue(rl_dest, rl_result);
1023  if (range_check) {
1024    DCHECK(range_check_branch != nullptr);
1025    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1026    AddIntrinsicSlowPath(info, range_check_branch);
1027  }
1028  return true;
1029}
1030
1031// Generates an inlined String.is_empty or String.length.
1032bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1033  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1034    // TODO: add Mips and Mips64 implementations.
1035    return false;
1036  }
1037  // dst = src.length();
1038  RegLocation rl_obj = info->args[0];
1039  rl_obj = LoadValue(rl_obj, kRefReg);
1040  RegLocation rl_dest = InlineTarget(info);
1041  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1042  GenNullCheck(rl_obj.reg, info->opt_flags);
1043  Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1044  MarkPossibleNullPointerException(info->opt_flags);
1045  if (is_empty) {
1046    // dst = (dst == 0);
1047    if (cu_->instruction_set == kThumb2) {
1048      RegStorage t_reg = AllocTemp();
1049      OpRegReg(kOpNeg, t_reg, rl_result.reg);
1050      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1051    } else if (cu_->instruction_set == kArm64) {
1052      OpRegImm(kOpSub, rl_result.reg, 1);
1053      OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31);
1054    } else {
1055      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1056      OpRegImm(kOpSub, rl_result.reg, 1);
1057      OpRegImm(kOpLsr, rl_result.reg, 31);
1058    }
1059  }
1060  StoreValue(rl_dest, rl_result);
1061  return true;
1062}
1063
1064bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1065  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1066    // TODO: add Mips and Mips64 implementations.
1067    return false;
1068  }
1069  RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1070  if (rl_dest.s_reg_low == INVALID_SREG) {
1071    // Result is unused, the code is dead. Inlining successful, no code generated.
1072    return true;
1073  }
1074  RegLocation rl_src_i = info->args[0];
1075  RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1076  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1077  if (IsWide(size)) {
1078    if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
1079      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
1080      StoreValueWide(rl_dest, rl_result);
1081      return true;
1082    }
1083    RegStorage r_i_low = rl_i.reg.GetLow();
1084    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1085      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1086      r_i_low = AllocTemp();
1087      OpRegCopy(r_i_low, rl_i.reg);
1088    }
1089    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1090    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1091    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1092      FreeTemp(r_i_low);
1093    }
1094    StoreValueWide(rl_dest, rl_result);
1095  } else {
1096    DCHECK(size == k32 || size == kSignedHalf);
1097    OpKind op = (size == k32) ? kOpRev : kOpRevsh;
1098    OpRegReg(op, rl_result.reg, rl_i.reg);
1099    StoreValue(rl_dest, rl_result);
1100  }
1101  return true;
1102}
1103
1104bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1105  RegLocation rl_dest = InlineTarget(info);
1106  if (rl_dest.s_reg_low == INVALID_SREG) {
1107    // Result is unused, the code is dead. Inlining successful, no code generated.
1108    return true;
1109  }
1110  RegLocation rl_src = info->args[0];
1111  rl_src = LoadValue(rl_src, kCoreReg);
1112  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1113  RegStorage sign_reg = AllocTemp();
1114  // abs(x) = y<=x>>31, (x+y)^y.
1115  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1116  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1117  OpRegReg(kOpXor, rl_result.reg, sign_reg);
1118  StoreValue(rl_dest, rl_result);
1119  return true;
1120}
1121
1122bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1123  RegLocation rl_dest = InlineTargetWide(info);
1124  if (rl_dest.s_reg_low == INVALID_SREG) {
1125    // Result is unused, the code is dead. Inlining successful, no code generated.
1126    return true;
1127  }
1128  RegLocation rl_src = info->args[0];
1129  rl_src = LoadValueWide(rl_src, kCoreReg);
1130  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1131
1132  // If on x86 or if we would clobber a register needed later, just copy the source first.
1133  if (cu_->instruction_set != kX86_64 &&
1134      (cu_->instruction_set == kX86 ||
1135       rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) {
1136    OpRegCopyWide(rl_result.reg, rl_src.reg);
1137    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1138        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1139        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1140        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1141      // Reuse source registers to avoid running out of temps.
1142      FreeTemp(rl_src.reg);
1143    }
1144    rl_src = rl_result;
1145  }
1146
1147  // abs(x) = y<=x>>31, (x+y)^y.
1148  RegStorage sign_reg;
1149  if (cu_->instruction_set == kX86_64) {
1150    sign_reg = AllocTempWide();
1151    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
1152    OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1153    OpRegReg(kOpXor, rl_result.reg, sign_reg);
1154  } else {
1155    sign_reg = AllocTemp();
1156    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1157    OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1158    OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1159    OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1160    OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1161  }
1162  FreeTemp(sign_reg);
1163  StoreValueWide(rl_dest, rl_result);
1164  return true;
1165}
1166
1167bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1168  // Currently implemented only for ARM64.
1169  UNUSED(info, size);
1170  return false;
1171}
1172
1173bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
1174  // Currently implemented only for ARM64.
1175  UNUSED(info, is_min, is_double);
1176  return false;
1177}
1178
1179bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
1180  UNUSED(info);
1181  return false;
1182}
1183
1184bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
1185  UNUSED(info);
1186  return false;
1187}
1188
1189bool Mir2Lir::GenInlinedRint(CallInfo* info) {
1190  UNUSED(info);
1191  return false;
1192}
1193
1194bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
1195  UNUSED(info, is_double);
1196  return false;
1197}
1198
1199bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1200  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1201    // TODO: add Mips and Mips64 implementations.
1202    return false;
1203  }
1204  RegLocation rl_dest = InlineTarget(info);
1205  if (rl_dest.s_reg_low == INVALID_SREG) {
1206    // Result is unused, the code is dead. Inlining successful, no code generated.
1207    return true;
1208  }
1209  RegLocation rl_src = info->args[0];
1210  StoreValue(rl_dest, rl_src);
1211  return true;
1212}
1213
1214bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1215  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1216    // TODO: add Mips and Mips64 implementations.
1217    return false;
1218  }
1219  RegLocation rl_dest = InlineTargetWide(info);
1220  if (rl_dest.s_reg_low == INVALID_SREG) {
1221    // Result is unused, the code is dead. Inlining successful, no code generated.
1222    return true;
1223  }
1224  RegLocation rl_src = info->args[0];
1225  StoreValueWide(rl_dest, rl_src);
1226  return true;
1227}
1228
1229bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1230  UNUSED(info);
1231  return false;
1232}
1233
1234
1235/*
1236 * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1237 * otherwise bails to standard library code.
1238 */
1239bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1240  RegLocation rl_obj = info->args[0];
1241  RegLocation rl_char = info->args[1];
1242  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1243    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1244    return false;
1245  }
1246
1247  ClobberCallerSave();
1248  LockCallTemps();  // Using fixed registers
1249  RegStorage reg_ptr = TargetReg(kArg0, kRef);
1250  RegStorage reg_char = TargetReg(kArg1, kNotWide);
1251  RegStorage reg_start = TargetReg(kArg2, kNotWide);
1252
1253  LoadValueDirectFixed(rl_obj, reg_ptr);
1254  LoadValueDirectFixed(rl_char, reg_char);
1255  if (zero_based) {
1256    LoadConstant(reg_start, 0);
1257  } else {
1258    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1259    LoadValueDirectFixed(rl_start, reg_start);
1260  }
1261  RegStorage r_tgt = LoadHelper(kQuickIndexOf);
1262  GenExplicitNullCheck(reg_ptr, info->opt_flags);
1263  LIR* high_code_point_branch =
1264      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1265  // NOTE: not a safepoint
1266  OpReg(kOpBlx, r_tgt);
1267  if (!rl_char.is_const) {
1268    // Add the slow path for code points beyond 0xFFFF.
1269    DCHECK(high_code_point_branch != nullptr);
1270    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1271    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1272    AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
1273    ClobberCallerSave();  // We must clobber everything because slow path will return here
1274  } else {
1275    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1276    DCHECK(high_code_point_branch == nullptr);
1277  }
1278  RegLocation rl_return = GetReturn(kCoreReg);
1279  RegLocation rl_dest = InlineTarget(info);
1280  StoreValue(rl_dest, rl_return);
1281  return true;
1282}
1283
1284/* Fast string.compareTo(Ljava/lang/string;)I. */
1285bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1286  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1287    // TODO: add Mips and Mips64 implementations.
1288    return false;
1289  }
1290  ClobberCallerSave();
1291  LockCallTemps();  // Using fixed registers
1292  RegStorage reg_this = TargetReg(kArg0, kRef);
1293  RegStorage reg_cmp = TargetReg(kArg1, kRef);
1294
1295  RegLocation rl_this = info->args[0];
1296  RegLocation rl_cmp = info->args[1];
1297  LoadValueDirectFixed(rl_this, reg_this);
1298  LoadValueDirectFixed(rl_cmp, reg_cmp);
1299  RegStorage r_tgt;
1300  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1301    r_tgt = LoadHelper(kQuickStringCompareTo);
1302  } else {
1303    r_tgt = RegStorage::InvalidReg();
1304  }
1305  GenExplicitNullCheck(reg_this, info->opt_flags);
1306  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1307  // TUNING: check if rl_cmp.s_reg_low is already null checked
1308  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1309  AddIntrinsicSlowPath(info, cmp_null_check_branch);
1310  // NOTE: not a safepoint
1311  CallHelper(r_tgt, kQuickStringCompareTo, false, true);
1312  RegLocation rl_return = GetReturn(kCoreReg);
1313  RegLocation rl_dest = InlineTarget(info);
1314  StoreValue(rl_dest, rl_return);
1315  return true;
1316}
1317
1318bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1319  RegLocation rl_dest = InlineTarget(info);
1320
1321  // Early exit if the result is unused.
1322  if (rl_dest.orig_sreg < 0) {
1323    return true;
1324  }
1325
1326  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1327
1328  if (Is64BitInstructionSet(cu_->instruction_set)) {
1329    LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
1330                kNotVolatile);
1331  } else {
1332    Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
1333  }
1334
1335  StoreValue(rl_dest, rl_result);
1336  return true;
1337}
1338
1339bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1340                                  bool is_long, bool is_volatile) {
1341  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1342    // TODO: add Mips and Mips64 implementations.
1343    return false;
1344  }
1345  // Unused - RegLocation rl_src_unsafe = info->args[0];
1346  RegLocation rl_src_obj = info->args[1];  // Object
1347  RegLocation rl_src_offset = info->args[2];  // long low
1348  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1349  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1350
1351  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1352  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1353  RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
1354  if (is_long) {
1355    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1356        || cu_->instruction_set == kArm64) {
1357      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
1358    } else {
1359      RegStorage rl_temp_offset = AllocTemp();
1360      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1361      LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
1362      FreeTemp(rl_temp_offset);
1363    }
1364  } else {
1365    if (rl_result.ref) {
1366      LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
1367    } else {
1368      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
1369    }
1370  }
1371
1372  if (is_volatile) {
1373    GenMemBarrier(kLoadAny);
1374  }
1375
1376  if (is_long) {
1377    StoreValueWide(rl_dest, rl_result);
1378  } else {
1379    StoreValue(rl_dest, rl_result);
1380  }
1381  return true;
1382}
1383
1384bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1385                                  bool is_object, bool is_volatile, bool is_ordered) {
1386  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1387    // TODO: add Mips and Mips64 implementations.
1388    return false;
1389  }
1390  // Unused - RegLocation rl_src_unsafe = info->args[0];
1391  RegLocation rl_src_obj = info->args[1];  // Object
1392  RegLocation rl_src_offset = info->args[2];  // long low
1393  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1394  RegLocation rl_src_value = info->args[4];  // value to store
1395  if (is_volatile || is_ordered) {
1396    GenMemBarrier(kAnyStore);
1397  }
1398  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1399  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1400  RegLocation rl_value;
1401  if (is_long) {
1402    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1403    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1404        || cu_->instruction_set == kArm64) {
1405      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
1406    } else {
1407      RegStorage rl_temp_offset = AllocTemp();
1408      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1409      StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
1410      FreeTemp(rl_temp_offset);
1411    }
1412  } else {
1413    rl_value = LoadValue(rl_src_value, LocToRegClass(rl_src_value));
1414    if (rl_value.ref) {
1415      StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
1416    } else {
1417      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
1418    }
1419  }
1420
1421  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1422  FreeTemp(rl_offset.reg);
1423
1424  if (is_volatile) {
1425    // Prevent reordering with a subsequent volatile load.
1426    // May also be needed to address store atomicity issues.
1427    GenMemBarrier(kAnyAny);
1428  }
1429  if (is_object) {
1430    MarkGCCard(0, rl_value.reg, rl_object.reg);
1431  }
1432  return true;
1433}
1434
1435void Mir2Lir::GenInvoke(CallInfo* info) {
1436  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1437  const DexFile* dex_file = info->method_ref.dex_file;
1438  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(dex_file)
1439      ->GenIntrinsic(this, info)) {
1440    return;
1441  }
1442  GenInvokeNoInline(info);
1443}
1444
1445void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1446  int call_state = 0;
1447  LIR* null_ck;
1448  LIR** p_null_ck = nullptr;
1449  NextCallInsn next_call_insn;
1450  FlushAllRegs();  /* Everything to home location */
1451  // Explicit register usage
1452  LockCallTemps();
1453
1454  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1455  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1456  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1457  info->type = method_info.GetSharpType();
1458  bool fast_path = method_info.FastPath();
1459  bool skip_this;
1460
1461  if (info->type == kInterface) {
1462    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1463    skip_this = fast_path;
1464  } else if (info->type == kDirect) {
1465    if (fast_path) {
1466      p_null_ck = &null_ck;
1467    }
1468    next_call_insn = fast_path ? GetNextSDCallInsn() : NextDirectCallInsnSP;
1469    skip_this = false;
1470  } else if (info->type == kStatic) {
1471    next_call_insn = fast_path ? GetNextSDCallInsn() : NextStaticCallInsnSP;
1472    skip_this = false;
1473  } else if (info->type == kSuper) {
1474    DCHECK(!fast_path);  // Fast path is a direct call.
1475    next_call_insn = NextSuperCallInsnSP;
1476    skip_this = false;
1477  } else {
1478    DCHECK_EQ(info->type, kVirtual);
1479    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1480    skip_this = fast_path;
1481  }
1482  MethodReference target_method = method_info.GetTargetMethod();
1483  call_state = GenDalvikArgs(info, call_state, p_null_ck,
1484                             next_call_insn, target_method, method_info.VTableIndex(),
1485                             method_info.DirectCode(), method_info.DirectMethod(),
1486                             original_type, skip_this);
1487  // Finish up any of the call sequence not interleaved in arg loading
1488  while (call_state >= 0) {
1489    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1490                                method_info.DirectCode(), method_info.DirectMethod(),
1491                                original_type);
1492  }
1493  LIR* call_insn = GenCallInsn(method_info);
1494  MarkSafepointPC(call_insn);
1495
1496  FreeCallTemps();
1497  if (info->result.location != kLocInvalid) {
1498    // We have a following MOVE_RESULT - do it now.
1499    if (info->result.wide) {
1500      RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
1501      StoreValueWide(info->result, ret_loc);
1502    } else {
1503      RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
1504      StoreValue(info->result, ret_loc);
1505    }
1506  }
1507}
1508
1509}  // namespace art
1510