gen_invoke.cc revision 5030d3ee8c6fe10394912ede107cbc8df63b7b16
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
21#include "dex_file-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "invoke_type.h"
24#include "mirror/array.h"
25#include "mirror/class-inl.h"
26#include "mirror/dex_cache.h"
27#include "mirror/object_array-inl.h"
28#include "mirror/reference-inl.h"
29#include "mirror/string.h"
30#include "mir_to_lir-inl.h"
31#include "scoped_thread_state_change.h"
32#include "x86/codegen_x86.h"
33
34namespace art {
35
36// Shortcuts to repeatedly used long types.
37typedef mirror::ObjectArray<mirror::Object> ObjArray;
38
39/*
40 * This source files contains "gen" codegen routines that should
41 * be applicable to most targets.  Only mid-level support utilities
42 * and "op" calls may be used here.
43 */
44
45void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
46  class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
47   public:
48    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
49        : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
50    }
51
52    void Compile() {
53      m2l_->ResetRegPool();
54      m2l_->ResetDefTracking();
55      GenerateTargetLabel(kPseudoIntrinsicRetry);
56      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
57      m2l_->GenInvokeNoInline(info_);
58      if (cont_ != nullptr) {
59        m2l_->OpUnconditionalBranch(cont_);
60      }
61    }
62
63   private:
64    CallInfo* const info_;
65  };
66
67  AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
68}
69
70// Macro to help instantiate.
71// TODO: This might be used to only instantiate <4> on pure 32b systems.
72#define INSTANTIATE(sig_part1, ...) \
73  template sig_part1(ThreadOffset<4>, __VA_ARGS__); \
74  template sig_part1(ThreadOffset<8>, __VA_ARGS__); \
75
76
77/*
78 * To save scheduling time, helper calls are broken into two parts: generation of
79 * the helper target address, and the actual call to the helper.  Because x86
80 * has a memory call operation, part 1 is a NOP for x86.  For other targets,
81 * load arguments between the two parts.
82 */
83// template <size_t pointer_size>
84RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) {
85  // All CallRuntimeHelperXXX call this first. So make a central check here.
86  DCHECK_EQ(4U, GetInstructionSetPointerSize(cu_->instruction_set));
87
88  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
89    return RegStorage::InvalidReg();
90  } else {
91    return LoadHelper(helper_offset);
92  }
93}
94
95RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<8> helper_offset) {
96  // All CallRuntimeHelperXXX call this first. So make a central check here.
97  DCHECK_EQ(8U, GetInstructionSetPointerSize(cu_->instruction_set));
98
99  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
100    return RegStorage::InvalidReg();
101  } else {
102    return LoadHelper(helper_offset);
103  }
104}
105
106/* NOTE: if r_tgt is a temp, it will be freed following use */
107template <size_t pointer_size>
108LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset,
109                         bool safepoint_pc, bool use_link) {
110  LIR* call_inst;
111  OpKind op = use_link ? kOpBlx : kOpBx;
112  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
113    call_inst = OpThreadMem(op, helper_offset);
114  } else {
115    call_inst = OpReg(op, r_tgt);
116    FreeTemp(r_tgt);
117  }
118  if (safepoint_pc) {
119    MarkSafepointPC(call_inst);
120  }
121  return call_inst;
122}
123template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<4> helper_offset,
124                                        bool safepoint_pc, bool use_link);
125template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<8> helper_offset,
126                                        bool safepoint_pc, bool use_link);
127
128template <size_t pointer_size>
129void Mir2Lir::CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc) {
130  RegStorage r_tgt = CallHelperSetup(helper_offset);
131  ClobberCallerSave();
132  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
133}
134INSTANTIATE(void Mir2Lir::CallRuntimeHelper, bool safepoint_pc)
135
136template <size_t pointer_size>
137void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0,
138                                   bool safepoint_pc) {
139  RegStorage r_tgt = CallHelperSetup(helper_offset);
140  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
141  ClobberCallerSave();
142  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
143}
144INSTANTIATE(void Mir2Lir::CallRuntimeHelperImm, int arg0, bool safepoint_pc)
145
146template <size_t pointer_size>
147void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
148                                   bool safepoint_pc) {
149  RegStorage r_tgt = CallHelperSetup(helper_offset);
150  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
151  ClobberCallerSave();
152  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
153}
154INSTANTIATE(void Mir2Lir::CallRuntimeHelperReg, RegStorage arg0, bool safepoint_pc)
155
156template <size_t pointer_size>
157void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset,
158                                           RegLocation arg0, bool safepoint_pc) {
159  RegStorage r_tgt = CallHelperSetup(helper_offset);
160  if (arg0.wide == 0) {
161    LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
162  } else {
163    LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
164  }
165  ClobberCallerSave();
166  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
167}
168INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocation, RegLocation arg0, bool safepoint_pc)
169
170template <size_t pointer_size>
171void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1,
172                                      bool safepoint_pc) {
173  RegStorage r_tgt = CallHelperSetup(helper_offset);
174  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
175  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
176  ClobberCallerSave();
177  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
178}
179INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmImm, int arg0, int arg1, bool safepoint_pc)
180
181template <size_t pointer_size>
182void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
183                                              RegLocation arg1, bool safepoint_pc) {
184  RegStorage r_tgt = CallHelperSetup(helper_offset);
185  DCHECK(!arg1.fp);
186  if (arg1.wide == 0) {
187    LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
188  } else {
189    RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
190    LoadValueDirectWideFixed(arg1, r_tmp);
191  }
192  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
193  ClobberCallerSave();
194  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
195}
196INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocation, int arg0, RegLocation arg1,
197            bool safepoint_pc)
198
199template <size_t pointer_size>
200void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset,
201                                              RegLocation arg0, int arg1, bool safepoint_pc) {
202  RegStorage r_tgt = CallHelperSetup(helper_offset);
203  DCHECK(!arg0.wide);
204  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
205  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
206  ClobberCallerSave();
207  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
208}
209INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationImm, RegLocation arg0, int arg1,
210            bool safepoint_pc)
211
212template <size_t pointer_size>
213void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0,
214                                      RegStorage arg1, bool safepoint_pc) {
215  RegStorage r_tgt = CallHelperSetup(helper_offset);
216  OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
217  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
218  ClobberCallerSave();
219  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
220}
221INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmReg, int arg0, RegStorage arg1, bool safepoint_pc)
222
223template <size_t pointer_size>
224void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
225                                      int arg1, bool safepoint_pc) {
226  RegStorage r_tgt = CallHelperSetup(helper_offset);
227  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
228  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
229  ClobberCallerSave();
230  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
231}
232INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegImm, RegStorage arg0, int arg1, bool safepoint_pc)
233
234template <size_t pointer_size>
235void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0,
236                                         bool safepoint_pc) {
237  RegStorage r_tgt = CallHelperSetup(helper_offset);
238  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
239  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
240  ClobberCallerSave();
241  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
242}
243INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethod, int arg0, bool safepoint_pc)
244
245template <size_t pointer_size>
246void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
247                                         bool safepoint_pc) {
248  RegStorage r_tgt = CallHelperSetup(helper_offset);
249  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
250  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
251  if (r_tmp.NotExactlyEquals(arg0)) {
252    OpRegCopy(r_tmp, arg0);
253  }
254  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
255  ClobberCallerSave();
256  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
257}
258INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethod, RegStorage arg0, bool safepoint_pc)
259
260template <size_t pointer_size>
261void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
262                                                    RegStorage arg0, RegLocation arg2,
263                                                    bool safepoint_pc) {
264  RegStorage r_tgt = CallHelperSetup(helper_offset);
265  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
266  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
267  if (r_tmp.NotExactlyEquals(arg0)) {
268    OpRegCopy(r_tmp, arg0);
269  }
270  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
271  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
272  ClobberCallerSave();
273  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
274}
275INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethodRegLocation, RegStorage arg0, RegLocation arg2,
276            bool safepoint_pc)
277
278template <size_t pointer_size>
279void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
280                                                      RegLocation arg0, RegLocation arg1,
281                                                      bool safepoint_pc) {
282  RegStorage r_tgt = CallHelperSetup(helper_offset);
283  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
284    RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
285
286    RegStorage arg1_reg;
287    if (arg1.fp == arg0.fp) {
288      arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
289    } else {
290      arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
291    }
292
293    if (arg0.wide == 0) {
294      LoadValueDirectFixed(arg0, arg0_reg);
295    } else {
296      LoadValueDirectWideFixed(arg0, arg0_reg);
297    }
298
299    if (arg1.wide == 0) {
300      LoadValueDirectFixed(arg1, arg1_reg);
301    } else {
302      LoadValueDirectWideFixed(arg1, arg1_reg);
303    }
304  } else {
305    DCHECK(!cu_->target64);
306    if (arg0.wide == 0) {
307      LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
308      if (arg1.wide == 0) {
309        if (cu_->instruction_set == kMips) {
310          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide));
311        } else {
312          LoadValueDirectFixed(arg1, TargetReg(kArg1, kNotWide));
313        }
314      } else {
315        if (cu_->instruction_set == kMips) {
316          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
317        } else {
318          LoadValueDirectWideFixed(arg1, TargetReg(kArg1, kWide));
319        }
320      }
321    } else {
322      LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
323      if (arg1.wide == 0) {
324        LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
325      } else {
326        LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
327      }
328    }
329  }
330  ClobberCallerSave();
331  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
332}
333INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
334            RegLocation arg1, bool safepoint_pc)
335
336void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
337  WideKind arg0_kind = arg0.GetWideKind();
338  WideKind arg1_kind = arg1.GetWideKind();
339  if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) {
340    if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) {
341      // Swap kArg0 and kArg1 with kArg2 as temp.
342      OpRegCopy(TargetReg(kArg2, arg1_kind), arg1);
343      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
344      OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind));
345    } else {
346      OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
347      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
348    }
349  } else {
350    OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
351    OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
352  }
353}
354
355template <size_t pointer_size>
356void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
357                                      RegStorage arg1, bool safepoint_pc) {
358  RegStorage r_tgt = CallHelperSetup(helper_offset);
359  CopyToArgumentRegs(arg0, arg1);
360  ClobberCallerSave();
361  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
362}
363INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegReg, RegStorage arg0, RegStorage arg1,
364            bool safepoint_pc)
365
366template <size_t pointer_size>
367void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
368                                         RegStorage arg1, int arg2, bool safepoint_pc) {
369  RegStorage r_tgt = CallHelperSetup(helper_offset);
370  CopyToArgumentRegs(arg0, arg1);
371  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
372  ClobberCallerSave();
373  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
374}
375INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegRegImm, RegStorage arg0, RegStorage arg1, int arg2,
376            bool safepoint_pc)
377
378template <size_t pointer_size>
379void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
380                                                    int arg0, RegLocation arg2, bool safepoint_pc) {
381  RegStorage r_tgt = CallHelperSetup(helper_offset);
382  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
383  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
384  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
385  ClobberCallerSave();
386  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
387}
388INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodRegLocation, int arg0, RegLocation arg2,
389            bool safepoint_pc)
390
391template <size_t pointer_size>
392void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0,
393                                            int arg2, bool safepoint_pc) {
394  RegStorage r_tgt = CallHelperSetup(helper_offset);
395  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
396  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
397  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
398  ClobberCallerSave();
399  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
400}
401INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodImm, int arg0, int arg2, bool safepoint_pc)
402
403template <size_t pointer_size>
404void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
405                                                         int arg0, RegLocation arg1,
406                                                         RegLocation arg2, bool safepoint_pc) {
407  RegStorage r_tgt = CallHelperSetup(helper_offset);
408  DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
409                                                        // instantiation bug in GCC.
410  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
411  if (arg2.wide == 0) {
412    LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
413  } else {
414    LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide));
415  }
416  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
417  ClobberCallerSave();
418  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
419}
420INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation, int arg0, RegLocation arg1,
421            RegLocation arg2, bool safepoint_pc)
422
423template <size_t pointer_size>
424void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
425    ThreadOffset<pointer_size> helper_offset,
426    RegLocation arg0,
427    RegLocation arg1,
428    RegLocation arg2,
429    bool safepoint_pc) {
430  RegStorage r_tgt = CallHelperSetup(helper_offset);
431  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
432  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
433  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
434  ClobberCallerSave();
435  CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
436}
437INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation, RegLocation arg0,
438            RegLocation arg1, RegLocation arg2, bool safepoint_pc)
439
440/*
441 * If there are any ins passed in registers that have not been promoted
442 * to a callee-save register, flush them to the frame.  Perform initial
443 * assignment of promoted arguments.
444 *
445 * ArgLocs is an array of location records describing the incoming arguments
446 * with one location record per word of argument.
447 */
448void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
449  /*
450   * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
451   * It will attempt to keep kArg0 live (or copy it to home location
452   * if promoted).
453   */
454  RegLocation rl_src = rl_method;
455  rl_src.location = kLocPhysReg;
456  rl_src.reg = TargetReg(kArg0, kRef);
457  rl_src.home = false;
458  MarkLive(rl_src);
459  StoreValue(rl_method, rl_src);
460  // If Method* has been promoted, explicitly flush
461  if (rl_method.location == kLocPhysReg) {
462    StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
463  }
464
465  if (cu_->num_ins == 0) {
466    return;
467  }
468
469  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
470  /*
471   * Copy incoming arguments to their proper home locations.
472   * NOTE: an older version of dx had an issue in which
473   * it would reuse static method argument registers.
474   * This could result in the same Dalvik virtual register
475   * being promoted to both core and fp regs. To account for this,
476   * we only copy to the corresponding promoted physical register
477   * if it matches the type of the SSA name for the incoming
478   * argument.  It is also possible that long and double arguments
479   * end up half-promoted.  In those cases, we must flush the promoted
480   * half to memory as well.
481   */
482  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
483  for (int i = 0; i < cu_->num_ins; i++) {
484    PromotionMap* v_map = &promotion_map_[start_vreg + i];
485    RegStorage reg = GetArgMappingToPhysicalReg(i);
486
487    if (reg.Valid()) {
488      // If arriving in register
489      bool need_flush = true;
490      RegLocation* t_loc = &ArgLocs[i];
491      if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
492        OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
493        need_flush = false;
494      } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
495        OpRegCopy(RegStorage::Solo32(v_map->fp_reg), reg);
496        need_flush = false;
497      } else {
498        need_flush = true;
499      }
500
501      // For wide args, force flush if not fully promoted
502      if (t_loc->wide) {
503        PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
504        // Is only half promoted?
505        need_flush |= (p_map->core_location != v_map->core_location) ||
506            (p_map->fp_location != v_map->fp_location);
507        if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
508          /*
509           * In Arm, a double is represented as a pair of consecutive single float
510           * registers starting at an even number.  It's possible that both Dalvik vRegs
511           * representing the incoming double were independently promoted as singles - but
512           * not in a form usable as a double.  If so, we need to flush - even though the
513           * incoming arg appears fully in register.  At this point in the code, both
514           * halves of the double are promoted.  Make sure they are in a usable form.
515           */
516          int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
517          int low_reg = promotion_map_[lowreg_index].fp_reg;
518          int high_reg = promotion_map_[lowreg_index + 1].fp_reg;
519          if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
520            need_flush = true;
521          }
522        }
523      }
524      if (need_flush) {
525        Store32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg);
526      }
527    } else {
528      // If arriving in frame & promoted
529      if (v_map->core_location == kLocPhysReg) {
530        Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i),
531                   RegStorage::Solo32(v_map->core_reg));
532      }
533      if (v_map->fp_location == kLocPhysReg) {
534        Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i),
535                   RegStorage::Solo32(v_map->fp_reg));
536      }
537    }
538  }
539}
540
541static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) {
542  RegLocation rl_arg = info->args[0];
543  cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef));
544}
545
546static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
547  cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags);
548  // get this->klass_ [use kArg1, set kArg0]
549  cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(),
550                  cg->TargetReg(kArg0, kRef),
551                  kNotVolatile);
552  cg->MarkPossibleNullPointerException(info->opt_flags);
553}
554
555static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
556                                                       const RegStorage* alt_from,
557                                                       const CompilationUnit* cu, Mir2Lir* cg) {
558  if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
559    // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
560    cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from,
561                     mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
562                     cg->TargetPtrReg(kInvokeTgt));
563    return true;
564  }
565  return false;
566}
567
568/*
569 * Bit of a hack here - in the absence of a real scheduling pass,
570 * emit the next instruction in static & direct invoke sequences.
571 */
572static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
573                          int state, const MethodReference& target_method,
574                          uint32_t unused,
575                          uintptr_t direct_code, uintptr_t direct_method,
576                          InvokeType type) {
577  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
578  if (direct_code != 0 && direct_method != 0) {
579    switch (state) {
580    case 0:  // Get the current Method* [sets kArg0]
581      if (direct_code != static_cast<uintptr_t>(-1)) {
582        if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
583          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
584        }
585      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
586        cg->LoadCodeAddress(target_method, type, kInvokeTgt);
587      }
588      if (direct_method != static_cast<uintptr_t>(-1)) {
589        cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
590      } else {
591        cg->LoadMethodAddress(target_method, type, kArg0);
592      }
593      break;
594    default:
595      return -1;
596    }
597  } else {
598    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
599    switch (state) {
600    case 0:  // Get the current Method* [sets kArg0]
601      // TUNING: we can save a reg copy if Method* has been promoted.
602      cg->LoadCurrMethodDirect(arg0_ref);
603      break;
604    case 1:  // Get method->dex_cache_resolved_methods_
605      cg->LoadRefDisp(arg0_ref,
606                      mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
607                      arg0_ref,
608                      kNotVolatile);
609      // Set up direct code if known.
610      if (direct_code != 0) {
611        if (direct_code != static_cast<uintptr_t>(-1)) {
612          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
613        } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
614          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
615          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
616        }
617      }
618      break;
619    case 2:  // Grab target method*
620      CHECK_EQ(cu->dex_file, target_method.dex_file);
621      cg->LoadRefDisp(arg0_ref,
622                      ObjArray::OffsetOfElement(target_method.dex_method_index).Int32Value(),
623                      arg0_ref,
624                      kNotVolatile);
625      break;
626    case 3:  // Grab the code from the method*
627      if (direct_code == 0) {
628        if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) {
629          break;                                    // kInvokeTgt := arg0_ref->entrypoint
630        }
631      } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
632        break;
633      }
634      // Intentional fallthrough for x86
635    default:
636      return -1;
637    }
638  }
639  return state + 1;
640}
641
642/*
643 * Bit of a hack here - in the absence of a real scheduling pass,
644 * emit the next instruction in a virtual invoke sequence.
645 * We can use kLr as a temp prior to target address loading
646 * Note also that we'll load the first argument ("this") into
647 * kArg1 here rather than the standard LoadArgRegs.
648 */
649static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
650                         int state, const MethodReference& target_method,
651                         uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
652                         InvokeType unused3) {
653  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
654  /*
655   * This is the fast path in which the target virtual method is
656   * fully resolved at compile time.
657   */
658  switch (state) {
659    case 0:
660      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
661      break;
662    case 1:
663      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
664                                                  // Includes a null-check.
665      break;
666    case 2: {
667      // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0]
668      int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
669          method_idx * sizeof(mirror::Class::VTableEntry);
670      // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0]
671      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
672      break;
673    }
674    case 3:
675      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
676        break;                                    // kInvokeTgt := kArg0->entrypoint
677      }
678      // Intentional fallthrough for X86
679    default:
680      return -1;
681  }
682  return state + 1;
683}
684
685/*
686 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
687 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
688 * more than one interface method map to the same index. Note also that we'll load the first
689 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
690 */
691static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
692                                 const MethodReference& target_method,
693                                 uint32_t method_idx, uintptr_t unused,
694                                 uintptr_t direct_method, InvokeType unused2) {
695  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
696
697  switch (state) {
698    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
699      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
700      cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index);
701      if (cu->instruction_set == kX86) {
702        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide));
703      }
704      break;
705    case 1:
706      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
707      break;
708    case 2:
709      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
710                                                  // Includes a null-check.
711      break;
712    case 3: {  // Get target method [use kInvokeTgt, set kArg0]
713      int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
714          (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
715      // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0]
716      cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile);
717      break;
718    }
719    case 4:
720      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
721        break;                                    // kInvokeTgt := kArg0->entrypoint
722      }
723      // Intentional fallthrough for X86
724    default:
725      return -1;
726  }
727  return state + 1;
728}
729
730template <size_t pointer_size>
731static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
732                            ThreadOffset<pointer_size> trampoline, int state,
733                            const MethodReference& target_method, uint32_t method_idx) {
734  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
735  /*
736   * This handles the case in which the base method is not fully
737   * resolved at compile time, we bail to a runtime helper.
738   */
739  if (state == 0) {
740    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
741      // Load trampoline target
742      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(),
743                       cg->TargetPtrReg(kInvokeTgt));
744    }
745    // Load kArg0 with method index
746    CHECK_EQ(cu->dex_file, target_method.dex_file);
747    cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index);
748    return 1;
749  }
750  return -1;
751}
752
753static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
754                                int state,
755                                const MethodReference& target_method,
756                                uint32_t unused, uintptr_t unused2,
757                                uintptr_t unused3, InvokeType unused4) {
758  if (cu->target64) {
759    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeStaticTrampolineWithAccessCheck);
760    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
761  } else {
762    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
763    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
764  }
765}
766
767static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
768                                const MethodReference& target_method,
769                                uint32_t unused, uintptr_t unused2,
770                                uintptr_t unused3, InvokeType unused4) {
771  if (cu->target64) {
772    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeDirectTrampolineWithAccessCheck);
773    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
774  } else {
775    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
776    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
777  }
778}
779
780static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
781                               const MethodReference& target_method,
782                               uint32_t unused, uintptr_t unused2,
783                               uintptr_t unused3, InvokeType unused4) {
784  if (cu->target64) {
785    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeSuperTrampolineWithAccessCheck);
786    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
787  } else {
788    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
789    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
790  }
791}
792
793static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
794                           const MethodReference& target_method,
795                           uint32_t unused, uintptr_t unused2,
796                           uintptr_t unused3, InvokeType unused4) {
797  if (cu->target64) {
798    ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
799        pInvokeVirtualTrampolineWithAccessCheck);
800    return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
801  } else {
802    ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
803        pInvokeVirtualTrampolineWithAccessCheck);
804    return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
805  }
806}
807
808static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
809                                                CallInfo* info, int state,
810                                                const MethodReference& target_method,
811                                                uint32_t unused, uintptr_t unused2,
812                                                uintptr_t unused3, InvokeType unused4) {
813  if (cu->target64) {
814      ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
815          pInvokeInterfaceTrampolineWithAccessCheck);
816      return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
817    } else {
818      ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
819          pInvokeInterfaceTrampolineWithAccessCheck);
820      return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
821    }
822}
823
824int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
825                         NextCallInsn next_call_insn,
826                         const MethodReference& target_method,
827                         uint32_t vtable_idx, uintptr_t direct_code,
828                         uintptr_t direct_method, InvokeType type, bool skip_this) {
829  int last_arg_reg = 3 - 1;
830  int arg_regs[3] = {TargetReg(kArg1, kNotWide).GetReg(), TargetReg(kArg2, kNotWide).GetReg(),
831                     TargetReg(kArg3, kNotWide).GetReg()};
832
833  int next_reg = 0;
834  int next_arg = 0;
835  if (skip_this) {
836    next_reg++;
837    next_arg++;
838  }
839  for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
840    RegLocation rl_arg = info->args[next_arg++];
841    rl_arg = UpdateRawLoc(rl_arg);
842    if (rl_arg.wide && (next_reg <= last_arg_reg - 1)) {
843      RegStorage r_tmp(RegStorage::k64BitPair, arg_regs[next_reg], arg_regs[next_reg + 1]);
844      LoadValueDirectWideFixed(rl_arg, r_tmp);
845      next_reg++;
846      next_arg++;
847    } else {
848      if (rl_arg.wide) {
849        rl_arg = NarrowRegLoc(rl_arg);
850        rl_arg.is_const = false;
851      }
852      LoadValueDirectFixed(rl_arg, RegStorage::Solo32(arg_regs[next_reg]));
853    }
854    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
855                                direct_code, direct_method, type);
856  }
857  return call_state;
858}
859
860/*
861 * Load up to 5 arguments, the first three of which will be in
862 * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
863 * and as part of the load sequence, it must be replaced with
864 * the target method pointer.  Note, this may also be called
865 * for "range" variants if the number of arguments is 5 or fewer.
866 */
867int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
868                                  int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
869                                  const MethodReference& target_method,
870                                  uint32_t vtable_idx, uintptr_t direct_code,
871                                  uintptr_t direct_method, InvokeType type, bool skip_this) {
872  RegLocation rl_arg;
873
874  /* If no arguments, just return */
875  if (info->num_arg_words == 0)
876    return call_state;
877
878  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
879                              direct_code, direct_method, type);
880
881  DCHECK_LE(info->num_arg_words, 5);
882  if (info->num_arg_words > 3) {
883    int32_t next_use = 3;
884    // Detect special case of wide arg spanning arg3/arg4
885    RegLocation rl_use0 = info->args[0];
886    RegLocation rl_use1 = info->args[1];
887    RegLocation rl_use2 = info->args[2];
888    if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) && rl_use2.wide) {
889      RegStorage reg;
890      // Wide spans, we need the 2nd half of uses[2].
891      rl_arg = UpdateLocWide(rl_use2);
892      if (rl_arg.location == kLocPhysReg) {
893        if (rl_arg.reg.IsPair()) {
894          reg = rl_arg.reg.GetHigh();
895        } else {
896          RegisterInfo* info = GetRegInfo(rl_arg.reg);
897          info = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
898          if (info == nullptr) {
899            // NOTE: For hard float convention we won't split arguments across reg/mem.
900            UNIMPLEMENTED(FATAL) << "Needs hard float api.";
901          }
902          reg = info->GetReg();
903        }
904      } else {
905        // kArg2 & rArg3 can safely be used here
906        reg = TargetReg(kArg3, kNotWide);
907        {
908          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
909          Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
910        }
911        call_state = next_call_insn(cu_, info, call_state, target_method,
912                                    vtable_idx, direct_code, direct_method, type);
913      }
914      {
915        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
916        Store32Disp(TargetPtrReg(kSp), (next_use + 1) * 4, reg);
917      }
918      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
919                                  direct_code, direct_method, type);
920      next_use++;
921    }
922    // Loop through the rest
923    while (next_use < info->num_arg_words) {
924      RegStorage arg_reg;
925      rl_arg = info->args[next_use];
926      rl_arg = UpdateRawLoc(rl_arg);
927      if (rl_arg.location == kLocPhysReg) {
928        arg_reg = rl_arg.reg;
929      } else {
930        arg_reg = TargetReg(kArg2, rl_arg.wide ? kWide : kNotWide);
931        if (rl_arg.wide) {
932          LoadValueDirectWideFixed(rl_arg, arg_reg);
933        } else {
934          LoadValueDirectFixed(rl_arg, arg_reg);
935        }
936        call_state = next_call_insn(cu_, info, call_state, target_method,
937                                    vtable_idx, direct_code, direct_method, type);
938      }
939      int outs_offset = (next_use + 1) * 4;
940      {
941        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
942        if (rl_arg.wide) {
943          StoreBaseDisp(TargetPtrReg(kSp), outs_offset, arg_reg, k64, kNotVolatile);
944          next_use += 2;
945        } else {
946          Store32Disp(TargetPtrReg(kSp), outs_offset, arg_reg);
947          next_use++;
948        }
949      }
950      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
951                               direct_code, direct_method, type);
952    }
953  }
954
955  call_state = LoadArgRegs(info, call_state, next_call_insn,
956                           target_method, vtable_idx, direct_code, direct_method,
957                           type, skip_this);
958
959  if (pcrLabel) {
960    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
961      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
962    } else {
963      *pcrLabel = nullptr;
964      if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
965          (info->opt_flags & MIR_IGNORE_NULL_CHECK)) {
966        return call_state;
967      }
968      // In lieu of generating a check for kArg1 being null, we need to
969      // perform a load when doing implicit checks.
970      GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
971    }
972  }
973  return call_state;
974}
975
976// Default implementation of implicit null pointer check.
977// Overridden by arch specific as necessary.
978void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
979  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
980    return;
981  }
982  RegStorage tmp = AllocTemp();
983  Load32Disp(reg, 0, tmp);
984  MarkPossibleNullPointerException(opt_flags);
985  FreeTemp(tmp);
986}
987
988
989/*
990 * May have 0+ arguments (also used for jumbo).  Note that
991 * source virtual registers may be in physical registers, so may
992 * need to be flushed to home location before copying.  This
993 * applies to arg3 and above (see below).
994 *
995 * Two general strategies:
996 *    If < 20 arguments
997 *       Pass args 3-18 using vldm/vstm block copy
998 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
999 *    If 20+ arguments
1000 *       Pass args arg19+ using memcpy block copy
1001 *       Pass arg0, arg1 & arg2 in kArg1-kArg3
1002 *
1003 */
1004int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
1005                                LIR** pcrLabel, NextCallInsn next_call_insn,
1006                                const MethodReference& target_method,
1007                                uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
1008                                InvokeType type, bool skip_this) {
1009  // If we can treat it as non-range (Jumbo ops will use range form)
1010  if (info->num_arg_words <= 5)
1011    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
1012                                next_call_insn, target_method, vtable_idx,
1013                                direct_code, direct_method, type, skip_this);
1014  /*
1015   * First load the non-register arguments.  Both forms expect all
1016   * of the source arguments to be in their home frame location, so
1017   * scan the s_reg names and flush any that have been promoted to
1018   * frame backing storage.
1019   */
1020  // Scan the rest of the args - if in phys_reg flush to memory
1021  for (int next_arg = 0; next_arg < info->num_arg_words;) {
1022    RegLocation loc = info->args[next_arg];
1023    if (loc.wide) {
1024      loc = UpdateLocWide(loc);
1025      if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
1026        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1027        StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
1028      }
1029      next_arg += 2;
1030    } else {
1031      loc = UpdateLoc(loc);
1032      if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
1033        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1034        Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
1035      }
1036      next_arg++;
1037    }
1038  }
1039
1040  // Logic below assumes that Method pointer is at offset zero from SP.
1041  DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
1042
1043  // The first 3 arguments are passed via registers.
1044  // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
1045  // get size of uintptr_t or size of object reference according to model being used.
1046  int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
1047  int start_offset = SRegOffset(info->args[3].s_reg_low);
1048  int regs_left_to_pass_via_stack = info->num_arg_words - 3;
1049  DCHECK_GT(regs_left_to_pass_via_stack, 0);
1050
1051  if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
1052    // Use vldm/vstm pair using kArg3 as a temp
1053    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1054                             direct_code, direct_method, type);
1055    OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), start_offset);
1056    LIR* ld = nullptr;
1057    {
1058      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1059      ld = OpVldm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack);
1060    }
1061    // TUNING: loosen barrier
1062    ld->u.m.def_mask = &kEncodeAll;
1063    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1064                             direct_code, direct_method, type);
1065    OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), 4 /* Method* */ + (3 * 4));
1066    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1067                             direct_code, direct_method, type);
1068    LIR* st = nullptr;
1069    {
1070      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1071      st = OpVstm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack);
1072    }
1073    st->u.m.def_mask = &kEncodeAll;
1074    call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1075                             direct_code, direct_method, type);
1076  } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
1077    int current_src_offset = start_offset;
1078    int current_dest_offset = outs_offset;
1079
1080    // Only davik regs are accessed in this loop; no next_call_insn() calls.
1081    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1082    while (regs_left_to_pass_via_stack > 0) {
1083      // This is based on the knowledge that the stack itself is 16-byte aligned.
1084      bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
1085      bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
1086      size_t bytes_to_move;
1087
1088      /*
1089       * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
1090       * a 128-bit move because we won't get the chance to try to aligned. If there are more than
1091       * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
1092       * We do this because we could potentially do a smaller move to align.
1093       */
1094      if (regs_left_to_pass_via_stack == 4 ||
1095          (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
1096        // Moving 128-bits via xmm register.
1097        bytes_to_move = sizeof(uint32_t) * 4;
1098
1099        // Allocate a free xmm temp. Since we are working through the calling sequence,
1100        // we expect to have an xmm temporary available.  AllocTempDouble will abort if
1101        // there are no free registers.
1102        RegStorage temp = AllocTempDouble();
1103
1104        LIR* ld1 = nullptr;
1105        LIR* ld2 = nullptr;
1106        LIR* st1 = nullptr;
1107        LIR* st2 = nullptr;
1108
1109        /*
1110         * The logic is similar for both loads and stores. If we have 16-byte alignment,
1111         * do an aligned move. If we have 8-byte alignment, then do the move in two
1112         * parts. This approach prevents possible cache line splits. Finally, fall back
1113         * to doing an unaligned move. In most cases we likely won't split the cache
1114         * line but we cannot prove it and thus take a conservative approach.
1115         */
1116        bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
1117        bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
1118
1119        if (src_is_16b_aligned) {
1120          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovA128FP);
1121        } else if (src_is_8b_aligned) {
1122          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovLo128FP);
1123          ld2 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset + (bytes_to_move >> 1),
1124                            kMovHi128FP);
1125        } else {
1126          ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovU128FP);
1127        }
1128
1129        if (dest_is_16b_aligned) {
1130          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovA128FP);
1131        } else if (dest_is_8b_aligned) {
1132          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovLo128FP);
1133          st2 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset + (bytes_to_move >> 1),
1134                            temp, kMovHi128FP);
1135        } else {
1136          st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovU128FP);
1137        }
1138
1139        // TODO If we could keep track of aliasing information for memory accesses that are wider
1140        // than 64-bit, we wouldn't need to set up a barrier.
1141        if (ld1 != nullptr) {
1142          if (ld2 != nullptr) {
1143            // For 64-bit load we can actually set up the aliasing information.
1144            AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
1145            AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true,
1146                                    true);
1147          } else {
1148            // Set barrier for 128-bit load.
1149            ld1->u.m.def_mask = &kEncodeAll;
1150          }
1151        }
1152        if (st1 != nullptr) {
1153          if (st2 != nullptr) {
1154            // For 64-bit store we can actually set up the aliasing information.
1155            AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
1156            AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false,
1157                                    true);
1158          } else {
1159            // Set barrier for 128-bit store.
1160            st1->u.m.def_mask = &kEncodeAll;
1161          }
1162        }
1163
1164        // Free the temporary used for the data movement.
1165        FreeTemp(temp);
1166      } else {
1167        // Moving 32-bits via general purpose register.
1168        bytes_to_move = sizeof(uint32_t);
1169
1170        // Instead of allocating a new temp, simply reuse one of the registers being used
1171        // for argument passing.
1172        RegStorage temp = TargetReg(kArg3, kNotWide);
1173
1174        // Now load the argument VR and store to the outs.
1175        Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
1176        Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
1177      }
1178
1179      current_src_offset += bytes_to_move;
1180      current_dest_offset += bytes_to_move;
1181      regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
1182    }
1183  } else {
1184    // Generate memcpy
1185    OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
1186    OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
1187    if (cu_->target64) {
1188      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetReg(kArg0, kRef),
1189                                 TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
1190    } else {
1191      CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0, kRef),
1192                                 TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
1193    }
1194  }
1195
1196  call_state = LoadArgRegs(info, call_state, next_call_insn,
1197                           target_method, vtable_idx, direct_code, direct_method,
1198                           type, skip_this);
1199
1200  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1201                           direct_code, direct_method, type);
1202  if (pcrLabel) {
1203    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
1204      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
1205    } else {
1206      *pcrLabel = nullptr;
1207      if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
1208          (info->opt_flags & MIR_IGNORE_NULL_CHECK)) {
1209        return call_state;
1210      }
1211      // In lieu of generating a check for kArg1 being null, we need to
1212      // perform a load when doing implicit checks.
1213      GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
1214    }
1215  }
1216  return call_state;
1217}
1218
1219RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
1220  RegLocation res;
1221  if (info->result.location == kLocInvalid) {
1222    res = GetReturn(LocToRegClass(info->result));
1223  } else {
1224    res = info->result;
1225  }
1226  return res;
1227}
1228
1229RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
1230  RegLocation res;
1231  if (info->result.location == kLocInvalid) {
1232    res = GetReturnWide(kCoreReg);
1233  } else {
1234    res = info->result;
1235  }
1236  return res;
1237}
1238
1239bool Mir2Lir::GenInlinedGet(CallInfo* info) {
1240  if (cu_->instruction_set == kMips) {
1241    // TODO - add Mips implementation
1242    return false;
1243  }
1244
1245  // the refrence class is stored in the image dex file which might not be the same as the cu's
1246  // dex file. Query the reference class for the image dex file then reset to starting dex file
1247  // in after loading class type.
1248  uint16_t type_idx = 0;
1249  const DexFile* ref_dex_file = nullptr;
1250  {
1251    ScopedObjectAccess soa(Thread::Current());
1252    type_idx = mirror::Reference::GetJavaLangRefReference()->GetDexTypeIndex();
1253    ref_dex_file = mirror::Reference::GetJavaLangRefReference()->GetDexCache()->GetDexFile();
1254  }
1255  CHECK(LIKELY(ref_dex_file != nullptr));
1256
1257  // address is either static within the image file, or needs to be patched up after compilation.
1258  bool unused_type_initialized;
1259  bool use_direct_type_ptr;
1260  uintptr_t direct_type_ptr;
1261  bool is_finalizable;
1262  const DexFile* old_dex = cu_->dex_file;
1263  cu_->dex_file = ref_dex_file;
1264  RegStorage reg_class = TargetReg(kArg1, kRef);
1265  Clobber(reg_class);
1266  LockTemp(reg_class);
1267  if (!cu_->compiler_driver->CanEmbedTypeInCode(*ref_dex_file, type_idx, &unused_type_initialized,
1268                                                &use_direct_type_ptr, &direct_type_ptr,
1269                                                &is_finalizable) || is_finalizable) {
1270    cu_->dex_file = old_dex;
1271    // address is not known and post-compile patch is not possible, cannot insert intrinsic.
1272    return false;
1273  }
1274  if (use_direct_type_ptr) {
1275    LoadConstant(reg_class, direct_type_ptr);
1276  } else {
1277    LoadClassType(type_idx, kArg1);
1278  }
1279  cu_->dex_file = old_dex;
1280
1281  // get the offset for flags in reference class.
1282  uint32_t slow_path_flag_offset = 0;
1283  uint32_t disable_flag_offset = 0;
1284  {
1285    ScopedObjectAccess soa(Thread::Current());
1286    mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
1287    slow_path_flag_offset = reference_class->GetSlowPathFlagOffset().Uint32Value();
1288    disable_flag_offset = reference_class->GetDisableIntrinsicFlagOffset().Uint32Value();
1289  }
1290  CHECK(slow_path_flag_offset && disable_flag_offset &&
1291        (slow_path_flag_offset != disable_flag_offset));
1292
1293  // intrinsic logic start.
1294  RegLocation rl_obj = info->args[0];
1295  rl_obj = LoadValue(rl_obj);
1296
1297  RegStorage reg_slow_path = AllocTemp();
1298  RegStorage reg_disabled = AllocTemp();
1299  Load32Disp(reg_class, slow_path_flag_offset, reg_slow_path);
1300  Load32Disp(reg_class, disable_flag_offset, reg_disabled);
1301  FreeTemp(reg_class);
1302  LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
1303  FreeTemp(reg_disabled);
1304
1305  // if slow path, jump to JNI path target
1306  LIR* slow_path_branch;
1307  if (or_inst->u.m.def_mask->HasBit(ResourceMask::kCCode)) {
1308    // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag).
1309    slow_path_branch = OpCondBranch(kCondNe, nullptr);
1310  } else {
1311    // Generate compare and branch.
1312    slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
1313  }
1314  FreeTemp(reg_slow_path);
1315
1316  // slow path not enabled, simply load the referent of the reference object
1317  RegLocation rl_dest = InlineTarget(info);
1318  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1319  GenNullCheck(rl_obj.reg, info->opt_flags);
1320  LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
1321      kNotVolatile);
1322  MarkPossibleNullPointerException(info->opt_flags);
1323  StoreValue(rl_dest, rl_result);
1324
1325  LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
1326  AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
1327
1328  return true;
1329}
1330
1331bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
1332  if (cu_->instruction_set == kMips) {
1333    // TODO - add Mips implementation
1334    return false;
1335  }
1336  // Location of reference to data array
1337  int value_offset = mirror::String::ValueOffset().Int32Value();
1338  // Location of count
1339  int count_offset = mirror::String::CountOffset().Int32Value();
1340  // Starting offset within data array
1341  int offset_offset = mirror::String::OffsetOffset().Int32Value();
1342  // Start of char data with array_
1343  int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1344
1345  RegLocation rl_obj = info->args[0];
1346  RegLocation rl_idx = info->args[1];
1347  rl_obj = LoadValue(rl_obj, kRefReg);
1348  // X86 wants to avoid putting a constant index into a register.
1349  if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) {
1350    rl_idx = LoadValue(rl_idx, kCoreReg);
1351  }
1352  RegStorage reg_max;
1353  GenNullCheck(rl_obj.reg, info->opt_flags);
1354  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
1355  LIR* range_check_branch = nullptr;
1356  RegStorage reg_off;
1357  RegStorage reg_ptr;
1358  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1359    reg_off = AllocTemp();
1360    reg_ptr = AllocTempRef();
1361    if (range_check) {
1362      reg_max = AllocTemp();
1363      Load32Disp(rl_obj.reg, count_offset, reg_max);
1364      MarkPossibleNullPointerException(info->opt_flags);
1365    }
1366    Load32Disp(rl_obj.reg, offset_offset, reg_off);
1367    MarkPossibleNullPointerException(info->opt_flags);
1368    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1369    if (range_check) {
1370      // Set up a slow path to allow retry in case of bounds violation */
1371      OpRegReg(kOpCmp, rl_idx.reg, reg_max);
1372      FreeTemp(reg_max);
1373      range_check_branch = OpCondBranch(kCondUge, nullptr);
1374    }
1375    OpRegImm(kOpAdd, reg_ptr, data_offset);
1376  } else {
1377    if (range_check) {
1378      // On x86, we can compare to memory directly
1379      // Set up a launch pad to allow retry in case of bounds violation */
1380      if (rl_idx.is_const) {
1381        LIR* comparison;
1382        range_check_branch = OpCmpMemImmBranch(
1383            kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
1384            mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
1385        MarkPossibleNullPointerExceptionAfter(0, comparison);
1386     } else {
1387        OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
1388        MarkPossibleNullPointerException(0);
1389        range_check_branch = OpCondBranch(kCondUge, nullptr);
1390      }
1391    }
1392    reg_off = AllocTemp();
1393    reg_ptr = AllocTempRef();
1394    Load32Disp(rl_obj.reg, offset_offset, reg_off);
1395    LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
1396  }
1397  if (rl_idx.is_const) {
1398    OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1399  } else {
1400    OpRegReg(kOpAdd, reg_off, rl_idx.reg);
1401  }
1402  FreeTemp(rl_obj.reg);
1403  if (rl_idx.location == kLocPhysReg) {
1404    FreeTemp(rl_idx.reg);
1405  }
1406  RegLocation rl_dest = InlineTarget(info);
1407  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1408  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1409    LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
1410  } else {
1411    LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
1412  }
1413  FreeTemp(reg_off);
1414  FreeTemp(reg_ptr);
1415  StoreValue(rl_dest, rl_result);
1416  if (range_check) {
1417    DCHECK(range_check_branch != nullptr);
1418    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1419    AddIntrinsicSlowPath(info, range_check_branch);
1420  }
1421  return true;
1422}
1423
1424// Generates an inlined String.is_empty or String.length.
1425bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1426  if (cu_->instruction_set == kMips) {
1427    // TODO - add Mips implementation
1428    return false;
1429  }
1430  // dst = src.length();
1431  RegLocation rl_obj = info->args[0];
1432  rl_obj = LoadValue(rl_obj, kRefReg);
1433  RegLocation rl_dest = InlineTarget(info);
1434  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1435  GenNullCheck(rl_obj.reg, info->opt_flags);
1436  Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1437  MarkPossibleNullPointerException(info->opt_flags);
1438  if (is_empty) {
1439    // dst = (dst == 0);
1440    if (cu_->instruction_set == kThumb2) {
1441      RegStorage t_reg = AllocTemp();
1442      OpRegReg(kOpNeg, t_reg, rl_result.reg);
1443      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1444    } else if (cu_->instruction_set == kArm64) {
1445      OpRegImm(kOpSub, rl_result.reg, 1);
1446      OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31);
1447    } else {
1448      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1449      OpRegImm(kOpSub, rl_result.reg, 1);
1450      OpRegImm(kOpLsr, rl_result.reg, 31);
1451    }
1452  }
1453  StoreValue(rl_dest, rl_result);
1454  return true;
1455}
1456
1457bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1458  if (cu_->instruction_set == kMips) {
1459    // TODO - add Mips implementation.
1460    return false;
1461  }
1462  RegLocation rl_src_i = info->args[0];
1463  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1464  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1465  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1466  if (size == k64) {
1467    if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
1468      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
1469      StoreValueWide(rl_dest, rl_result);
1470      return true;
1471    }
1472    RegStorage r_i_low = rl_i.reg.GetLow();
1473    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1474      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1475      r_i_low = AllocTemp();
1476      OpRegCopy(r_i_low, rl_i.reg);
1477    }
1478    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1479    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1480    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1481      FreeTemp(r_i_low);
1482    }
1483    StoreValueWide(rl_dest, rl_result);
1484  } else {
1485    DCHECK(size == k32 || size == kSignedHalf);
1486    OpKind op = (size == k32) ? kOpRev : kOpRevsh;
1487    OpRegReg(op, rl_result.reg, rl_i.reg);
1488    StoreValue(rl_dest, rl_result);
1489  }
1490  return true;
1491}
1492
1493bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1494  if (cu_->instruction_set == kMips) {
1495    // TODO - add Mips implementation
1496    return false;
1497  }
1498  RegLocation rl_src = info->args[0];
1499  rl_src = LoadValue(rl_src, kCoreReg);
1500  RegLocation rl_dest = InlineTarget(info);
1501  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1502  RegStorage sign_reg = AllocTemp();
1503  // abs(x) = y<=x>>31, (x+y)^y.
1504  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1505  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1506  OpRegReg(kOpXor, rl_result.reg, sign_reg);
1507  StoreValue(rl_dest, rl_result);
1508  return true;
1509}
1510
1511bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1512  if (cu_->instruction_set == kMips) {
1513    // TODO - add Mips implementation
1514    return false;
1515  }
1516  RegLocation rl_src = info->args[0];
1517  rl_src = LoadValueWide(rl_src, kCoreReg);
1518  RegLocation rl_dest = InlineTargetWide(info);
1519  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1520
1521  // If on x86 or if we would clobber a register needed later, just copy the source first.
1522  if (cu_->instruction_set != kX86_64 &&
1523      (cu_->instruction_set == kX86 ||
1524       rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) {
1525    OpRegCopyWide(rl_result.reg, rl_src.reg);
1526    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1527        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1528        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1529        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1530      // Reuse source registers to avoid running out of temps.
1531      FreeTemp(rl_src.reg);
1532    }
1533    rl_src = rl_result;
1534  }
1535
1536  // abs(x) = y<=x>>31, (x+y)^y.
1537  RegStorage sign_reg;
1538  if (cu_->instruction_set == kX86_64) {
1539    sign_reg = AllocTempWide();
1540    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
1541    OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1542    OpRegReg(kOpXor, rl_result.reg, sign_reg);
1543  } else {
1544    sign_reg = AllocTemp();
1545    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1546    OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1547    OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1548    OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1549    OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1550  }
1551  FreeTemp(sign_reg);
1552  StoreValueWide(rl_dest, rl_result);
1553  return true;
1554}
1555
1556bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1557  // Currently implemented only for ARM64
1558  return false;
1559}
1560
1561bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
1562  // Currently implemented only for ARM64
1563  return false;
1564}
1565
1566bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1567  if (cu_->instruction_set == kMips) {
1568    // TODO - add Mips implementation
1569    return false;
1570  }
1571  RegLocation rl_src = info->args[0];
1572  RegLocation rl_dest = InlineTarget(info);
1573  StoreValue(rl_dest, rl_src);
1574  return true;
1575}
1576
1577bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1578  if (cu_->instruction_set == kMips) {
1579    // TODO - add Mips implementation
1580    return false;
1581  }
1582  RegLocation rl_src = info->args[0];
1583  RegLocation rl_dest = InlineTargetWide(info);
1584  StoreValueWide(rl_dest, rl_src);
1585  return true;
1586}
1587
1588bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1589  return false;
1590}
1591
1592
1593/*
1594 * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1595 * otherwise bails to standard library code.
1596 */
1597bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1598  if (cu_->instruction_set == kMips) {
1599    // TODO - add Mips implementation
1600    return false;
1601  }
1602  if (cu_->instruction_set == kX86_64) {
1603    // TODO - add kX86_64 implementation
1604    return false;
1605  }
1606  RegLocation rl_obj = info->args[0];
1607  RegLocation rl_char = info->args[1];
1608  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1609    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1610    return false;
1611  }
1612
1613  ClobberCallerSave();
1614  LockCallTemps();  // Using fixed registers
1615  RegStorage reg_ptr = TargetReg(kArg0, kRef);
1616  RegStorage reg_char = TargetReg(kArg1, kNotWide);
1617  RegStorage reg_start = TargetReg(kArg2, kNotWide);
1618
1619  LoadValueDirectFixed(rl_obj, reg_ptr);
1620  LoadValueDirectFixed(rl_char, reg_char);
1621  if (zero_based) {
1622    LoadConstant(reg_start, 0);
1623  } else {
1624    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1625    LoadValueDirectFixed(rl_start, reg_start);
1626  }
1627  RegStorage r_tgt = cu_->target64 ?
1628      LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pIndexOf)) :
1629      LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pIndexOf));
1630  GenExplicitNullCheck(reg_ptr, info->opt_flags);
1631  LIR* high_code_point_branch =
1632      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1633  // NOTE: not a safepoint
1634  OpReg(kOpBlx, r_tgt);
1635  if (!rl_char.is_const) {
1636    // Add the slow path for code points beyond 0xFFFF.
1637    DCHECK(high_code_point_branch != nullptr);
1638    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1639    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1640    AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
1641  } else {
1642    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1643    DCHECK(high_code_point_branch == nullptr);
1644  }
1645  RegLocation rl_return = GetReturn(kCoreReg);
1646  RegLocation rl_dest = InlineTarget(info);
1647  StoreValue(rl_dest, rl_return);
1648  return true;
1649}
1650
1651/* Fast string.compareTo(Ljava/lang/string;)I. */
1652bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1653  if (cu_->instruction_set == kMips) {
1654    // TODO - add Mips implementation
1655    return false;
1656  }
1657  ClobberCallerSave();
1658  LockCallTemps();  // Using fixed registers
1659  RegStorage reg_this = TargetReg(kArg0, kRef);
1660  RegStorage reg_cmp = TargetReg(kArg1, kRef);
1661
1662  RegLocation rl_this = info->args[0];
1663  RegLocation rl_cmp = info->args[1];
1664  LoadValueDirectFixed(rl_this, reg_this);
1665  LoadValueDirectFixed(rl_cmp, reg_cmp);
1666  RegStorage r_tgt;
1667  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1668    if (cu_->target64) {
1669      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
1670    } else {
1671      r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1672    }
1673  } else {
1674    r_tgt = RegStorage::InvalidReg();
1675  }
1676  GenExplicitNullCheck(reg_this, info->opt_flags);
1677  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1678  // TUNING: check if rl_cmp.s_reg_low is already null checked
1679  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1680  AddIntrinsicSlowPath(info, cmp_null_check_branch);
1681  // NOTE: not a safepoint
1682  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1683    OpReg(kOpBlx, r_tgt);
1684  } else {
1685    if (cu_->target64) {
1686      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
1687    } else {
1688      OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
1689    }
1690  }
1691  RegLocation rl_return = GetReturn(kCoreReg);
1692  RegLocation rl_dest = InlineTarget(info);
1693  StoreValue(rl_dest, rl_return);
1694  return true;
1695}
1696
1697bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1698  RegLocation rl_dest = InlineTarget(info);
1699
1700  // Early exit if the result is unused.
1701  if (rl_dest.orig_sreg < 0) {
1702    return true;
1703  }
1704
1705  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1706
1707  switch (cu_->instruction_set) {
1708    case kArm:
1709      // Fall-through.
1710    case kThumb2:
1711      // Fall-through.
1712    case kMips:
1713      Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
1714      break;
1715
1716    case kArm64:
1717      LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
1718                  kNotVolatile);
1719      break;
1720
1721    case kX86:
1722      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
1723                                                          Thread::PeerOffset<4>());
1724      break;
1725
1726    case kX86_64:
1727      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
1728                                                          Thread::PeerOffset<8>());
1729      break;
1730
1731    default:
1732      LOG(FATAL) << "Unexpected isa " << cu_->instruction_set;
1733  }
1734  StoreValue(rl_dest, rl_result);
1735  return true;
1736}
1737
1738bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1739                                  bool is_long, bool is_volatile) {
1740  if (cu_->instruction_set == kMips) {
1741    // TODO - add Mips implementation
1742    return false;
1743  }
1744  // Unused - RegLocation rl_src_unsafe = info->args[0];
1745  RegLocation rl_src_obj = info->args[1];  // Object
1746  RegLocation rl_src_offset = info->args[2];  // long low
1747  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1748  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1749
1750  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1751  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1752  RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
1753  if (is_long) {
1754    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1755        || cu_->instruction_set == kArm64) {
1756      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
1757    } else {
1758      RegStorage rl_temp_offset = AllocTemp();
1759      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1760      LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
1761      FreeTemp(rl_temp_offset);
1762    }
1763  } else {
1764    if (rl_result.ref) {
1765      LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
1766    } else {
1767      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
1768    }
1769  }
1770
1771  if (is_volatile) {
1772    GenMemBarrier(kLoadAny);
1773  }
1774
1775  if (is_long) {
1776    StoreValueWide(rl_dest, rl_result);
1777  } else {
1778    StoreValue(rl_dest, rl_result);
1779  }
1780  return true;
1781}
1782
1783bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1784                                  bool is_object, bool is_volatile, bool is_ordered) {
1785  if (cu_->instruction_set == kMips) {
1786    // TODO - add Mips implementation
1787    return false;
1788  }
1789  // Unused - RegLocation rl_src_unsafe = info->args[0];
1790  RegLocation rl_src_obj = info->args[1];  // Object
1791  RegLocation rl_src_offset = info->args[2];  // long low
1792  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1793  RegLocation rl_src_value = info->args[4];  // value to store
1794  if (is_volatile || is_ordered) {
1795    GenMemBarrier(kAnyStore);
1796  }
1797  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1798  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1799  RegLocation rl_value;
1800  if (is_long) {
1801    rl_value = LoadValueWide(rl_src_value, kCoreReg);
1802    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1803        || cu_->instruction_set == kArm64) {
1804      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
1805    } else {
1806      RegStorage rl_temp_offset = AllocTemp();
1807      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1808      StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
1809      FreeTemp(rl_temp_offset);
1810    }
1811  } else {
1812    rl_value = LoadValue(rl_src_value);
1813    if (rl_value.ref) {
1814      StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
1815    } else {
1816      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
1817    }
1818  }
1819
1820  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1821  FreeTemp(rl_offset.reg);
1822
1823  if (is_volatile) {
1824    // Prevent reordering with a subsequent volatile load.
1825    // May also be needed to address store atomicity issues.
1826    GenMemBarrier(kAnyAny);
1827  }
1828  if (is_object) {
1829    MarkGCCard(rl_value.reg, rl_object.reg);
1830  }
1831  return true;
1832}
1833
1834void Mir2Lir::GenInvoke(CallInfo* info) {
1835  if ((info->opt_flags & MIR_INLINED) != 0) {
1836    // Already inlined but we may still need the null check.
1837    if (info->type != kStatic &&
1838        ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
1839         (info->opt_flags & MIR_IGNORE_NULL_CHECK) == 0))  {
1840      RegLocation rl_obj = LoadValue(info->args[0], kRefReg);
1841      GenNullCheck(rl_obj.reg);
1842    }
1843    return;
1844  }
1845  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1846  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1847      ->GenIntrinsic(this, info)) {
1848    return;
1849  }
1850  GenInvokeNoInline(info);
1851}
1852
1853template <size_t pointer_size>
1854static LIR* GenInvokeNoInlineCall(Mir2Lir* mir_to_lir, InvokeType type) {
1855  ThreadOffset<pointer_size> trampoline(-1);
1856  switch (type) {
1857    case kInterface:
1858      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeInterfaceTrampolineWithAccessCheck);
1859      break;
1860    case kDirect:
1861      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeDirectTrampolineWithAccessCheck);
1862      break;
1863    case kStatic:
1864      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeStaticTrampolineWithAccessCheck);
1865      break;
1866    case kSuper:
1867      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeSuperTrampolineWithAccessCheck);
1868      break;
1869    case kVirtual:
1870      trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeVirtualTrampolineWithAccessCheck);
1871      break;
1872    default:
1873      LOG(FATAL) << "Unexpected invoke type";
1874  }
1875  return mir_to_lir->OpThreadMem(kOpBlx, trampoline);
1876}
1877
1878void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1879  int call_state = 0;
1880  LIR* null_ck;
1881  LIR** p_null_ck = NULL;
1882  NextCallInsn next_call_insn;
1883  FlushAllRegs();  /* Everything to home location */
1884  // Explicit register usage
1885  LockCallTemps();
1886
1887  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1888  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1889  BeginInvoke(info);
1890  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1891  info->type = static_cast<InvokeType>(method_info.GetSharpType());
1892  bool fast_path = method_info.FastPath();
1893  bool skip_this;
1894  if (info->type == kInterface) {
1895    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1896    skip_this = fast_path;
1897  } else if (info->type == kDirect) {
1898    if (fast_path) {
1899      p_null_ck = &null_ck;
1900    }
1901    next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1902    skip_this = false;
1903  } else if (info->type == kStatic) {
1904    next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1905    skip_this = false;
1906  } else if (info->type == kSuper) {
1907    DCHECK(!fast_path);  // Fast path is a direct call.
1908    next_call_insn = NextSuperCallInsnSP;
1909    skip_this = false;
1910  } else {
1911    DCHECK_EQ(info->type, kVirtual);
1912    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1913    skip_this = fast_path;
1914  }
1915  MethodReference target_method = method_info.GetTargetMethod();
1916  if (!info->is_range) {
1917    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1918                                      next_call_insn, target_method, method_info.VTableIndex(),
1919                                      method_info.DirectCode(), method_info.DirectMethod(),
1920                                      original_type, skip_this);
1921  } else {
1922    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1923                                    next_call_insn, target_method, method_info.VTableIndex(),
1924                                    method_info.DirectCode(), method_info.DirectMethod(),
1925                                    original_type, skip_this);
1926  }
1927  // Finish up any of the call sequence not interleaved in arg loading
1928  while (call_state >= 0) {
1929    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1930                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
1931  }
1932  LIR* call_inst;
1933  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1934    call_inst = OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
1935  } else {
1936    if (fast_path) {
1937      if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
1938        // We can have the linker fixup a call relative.
1939        call_inst =
1940          reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type);
1941      } else {
1942        call_inst = OpMem(kOpBlx, TargetReg(kArg0, kRef),
1943                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
1944      }
1945    } else {
1946      // TODO: Extract?
1947      if (cu_->target64) {
1948        call_inst = GenInvokeNoInlineCall<8>(this, info->type);
1949      } else {
1950        call_inst = GenInvokeNoInlineCall<4>(this, info->type);
1951      }
1952    }
1953  }
1954  EndInvoke(info);
1955  MarkSafepointPC(call_inst);
1956
1957  ClobberCallerSave();
1958  if (info->result.location != kLocInvalid) {
1959    // We have a following MOVE_RESULT - do it now.
1960    if (info->result.wide) {
1961      RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
1962      StoreValueWide(info->result, ret_loc);
1963    } else {
1964      RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
1965      StoreValue(info->result, ret_loc);
1966    }
1967  }
1968}
1969
1970}  // namespace art
1971