quick_trampoline_entrypoints.cc revision 1432a5bb0608a920e7281b38ee5f6e8dfcfae5ef
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "art_method-inl.h"
18#include "base/enums.h"
19#include "callee_save_frame.h"
20#include "common_throws.h"
21#include "dex_file-inl.h"
22#include "dex_instruction-inl.h"
23#include "entrypoints/entrypoint_utils-inl.h"
24#include "entrypoints/runtime_asm_entrypoints.h"
25#include "gc/accounting/card_table-inl.h"
26#include "imt_conflict_table.h"
27#include "imtable-inl.h"
28#include "interpreter/interpreter.h"
29#include "linear_alloc.h"
30#include "method_reference.h"
31#include "mirror/class-inl.h"
32#include "mirror/dex_cache-inl.h"
33#include "mirror/method.h"
34#include "mirror/object-inl.h"
35#include "mirror/object_array-inl.h"
36#include "oat_quick_method_header.h"
37#include "quick_exception_handler.h"
38#include "runtime.h"
39#include "scoped_thread_state_change-inl.h"
40#include "stack.h"
41#include "debugger.h"
42
43namespace art {
44
45// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
46class QuickArgumentVisitor {
47  // Number of bytes for each out register in the caller method's frame.
48  static constexpr size_t kBytesStackArgLocation = 4;
49  // Frame size in bytes of a callee-save frame for RefsAndArgs.
50  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
51      GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
52#if defined(__arm__)
53  // The callee save frame is pointed to by SP.
54  // | argN       |  |
55  // | ...        |  |
56  // | arg4       |  |
57  // | arg3 spill |  |  Caller's frame
58  // | arg2 spill |  |
59  // | arg1 spill |  |
60  // | Method*    | ---
61  // | LR         |
62  // | ...        |    4x6 bytes callee saves
63  // | R3         |
64  // | R2         |
65  // | R1         |
66  // | S15        |
67  // | :          |
68  // | S0         |
69  // |            |    4x2 bytes padding
70  // | Method*    |  <- sp
71  static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat;
72  static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat;
73  static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat;
74  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat;
75  static constexpr bool kQuickSkipOddFpRegisters = false;
76  static constexpr size_t kNumQuickGprArgs = 3;
77  static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
78  static constexpr bool kGprFprLockstep = false;
79  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
80      arm::ArmCalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs);  // Offset of first FPR arg.
81  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
82      arm::ArmCalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs);  // Offset of first GPR arg.
83  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
84      arm::ArmCalleeSaveLrOffset(Runtime::kSaveRefsAndArgs);  // Offset of return address.
85  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
86    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
87  }
88#elif defined(__aarch64__)
89  // The callee save frame is pointed to by SP.
90  // | argN       |  |
91  // | ...        |  |
92  // | arg4       |  |
93  // | arg3 spill |  |  Caller's frame
94  // | arg2 spill |  |
95  // | arg1 spill |  |
96  // | Method*    | ---
97  // | LR         |
98  // | X29        |
99  // |  :         |
100  // | X20        |
101  // | X7         |
102  // | :          |
103  // | X1         |
104  // | D7         |
105  // |  :         |
106  // | D0         |
107  // |            |    padding
108  // | Method*    |  <- sp
109  static constexpr bool kSplitPairAcrossRegisterAndStack = false;
110  static constexpr bool kAlignPairRegister = false;
111  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
112  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
113  static constexpr bool kQuickSkipOddFpRegisters = false;
114  static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
115  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
116  static constexpr bool kGprFprLockstep = false;
117  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
118      arm64::Arm64CalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs);  // Offset of first FPR arg.
119  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
120      arm64::Arm64CalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs);  // Offset of first GPR arg.
121  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
122      arm64::Arm64CalleeSaveLrOffset(Runtime::kSaveRefsAndArgs);  // Offset of return address.
123  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
124    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
125  }
126#elif defined(__mips__) && !defined(__LP64__)
127  // The callee save frame is pointed to by SP.
128  // | argN       |  |
129  // | ...        |  |
130  // | arg4       |  |
131  // | arg3 spill |  |  Caller's frame
132  // | arg2 spill |  |
133  // | arg1 spill |  |
134  // | Method*    | ---
135  // | RA         |
136  // | ...        |    callee saves
137  // | A3         |    arg3
138  // | A2         |    arg2
139  // | A1         |    arg1
140  // | F15        |
141  // | F14        |    f_arg1
142  // | F13        |
143  // | F12        |    f_arg0
144  // |            |    padding
145  // | A0/Method* |  <- sp
146  static constexpr bool kSplitPairAcrossRegisterAndStack = false;
147  static constexpr bool kAlignPairRegister = true;
148  static constexpr bool kQuickSoftFloatAbi = false;
149  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
150  static constexpr bool kQuickSkipOddFpRegisters = true;
151  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
152  static constexpr size_t kNumQuickFprArgs = 4;  // 2 arguments passed in FPRs. Floats can be passed
153                                                 // only in even numbered registers and each double
154                                                 // occupies two registers.
155  static constexpr bool kGprFprLockstep = false;
156  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
157  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 32;  // Offset of first GPR arg.
158  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 76;  // Offset of return address.
159  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
160    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
161  }
162#elif defined(__mips__) && defined(__LP64__)
163  // The callee save frame is pointed to by SP.
164  // | argN       |  |
165  // | ...        |  |
166  // | arg4       |  |
167  // | arg3 spill |  |  Caller's frame
168  // | arg2 spill |  |
169  // | arg1 spill |  |
170  // | Method*    | ---
171  // | RA         |
172  // | ...        |    callee saves
173  // | A7         |    arg7
174  // | A6         |    arg6
175  // | A5         |    arg5
176  // | A4         |    arg4
177  // | A3         |    arg3
178  // | A2         |    arg2
179  // | A1         |    arg1
180  // | F19        |    f_arg7
181  // | F18        |    f_arg6
182  // | F17        |    f_arg5
183  // | F16        |    f_arg4
184  // | F15        |    f_arg3
185  // | F14        |    f_arg2
186  // | F13        |    f_arg1
187  // | F12        |    f_arg0
188  // |            |    padding
189  // | A0/Method* |  <- sp
190  // NOTE: for Mip64, when A0 is skipped, F0 is also skipped.
191  static constexpr bool kSplitPairAcrossRegisterAndStack = false;
192  static constexpr bool kAlignPairRegister = false;
193  static constexpr bool kQuickSoftFloatAbi = false;
194  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
195  static constexpr bool kQuickSkipOddFpRegisters = false;
196  static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
197  static constexpr size_t kNumQuickFprArgs = 7;  // 7 arguments passed in FPRs.
198  static constexpr bool kGprFprLockstep = true;
199
200  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24;  // Offset of first FPR arg (F1).
201  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80;  // Offset of first GPR arg (A1).
202  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200;  // Offset of return address.
203  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
204    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
205  }
206#elif defined(__i386__)
207  // The callee save frame is pointed to by SP.
208  // | argN        |  |
209  // | ...         |  |
210  // | arg4        |  |
211  // | arg3 spill  |  |  Caller's frame
212  // | arg2 spill  |  |
213  // | arg1 spill  |  |
214  // | Method*     | ---
215  // | Return      |
216  // | EBP,ESI,EDI |    callee saves
217  // | EBX         |    arg3
218  // | EDX         |    arg2
219  // | ECX         |    arg1
220  // | XMM3        |    float arg 4
221  // | XMM2        |    float arg 3
222  // | XMM1        |    float arg 2
223  // | XMM0        |    float arg 1
224  // | EAX/Method* |  <- sp
225  static constexpr bool kSplitPairAcrossRegisterAndStack = false;
226  static constexpr bool kAlignPairRegister = false;
227  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
228  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
229  static constexpr bool kQuickSkipOddFpRegisters = false;
230  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
231  static constexpr size_t kNumQuickFprArgs = 4;  // 4 arguments passed in FPRs.
232  static constexpr bool kGprFprLockstep = false;
233  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4;  // Offset of first FPR arg.
234  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8;  // Offset of first GPR arg.
235  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8;  // Offset of return address.
236  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
237    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
238  }
239#elif defined(__x86_64__)
240  // The callee save frame is pointed to by SP.
241  // | argN            |  |
242  // | ...             |  |
243  // | reg. arg spills |  |  Caller's frame
244  // | Method*         | ---
245  // | Return          |
246  // | R15             |    callee save
247  // | R14             |    callee save
248  // | R13             |    callee save
249  // | R12             |    callee save
250  // | R9              |    arg5
251  // | R8              |    arg4
252  // | RSI/R6          |    arg1
253  // | RBP/R5          |    callee save
254  // | RBX/R3          |    callee save
255  // | RDX/R2          |    arg2
256  // | RCX/R1          |    arg3
257  // | XMM7            |    float arg 8
258  // | XMM6            |    float arg 7
259  // | XMM5            |    float arg 6
260  // | XMM4            |    float arg 5
261  // | XMM3            |    float arg 4
262  // | XMM2            |    float arg 3
263  // | XMM1            |    float arg 2
264  // | XMM0            |    float arg 1
265  // | Padding         |
266  // | RDI/Method*     |  <- sp
267  static constexpr bool kSplitPairAcrossRegisterAndStack = false;
268  static constexpr bool kAlignPairRegister = false;
269  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
270  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
271  static constexpr bool kQuickSkipOddFpRegisters = false;
272  static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
273  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
274  static constexpr bool kGprFprLockstep = false;
275  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
276  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8;  // Offset of first GPR arg.
277  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8;  // Offset of return address.
278  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
279    switch (gpr_index) {
280      case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
281      case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
282      case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
283      case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
284      case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
285      default:
286      LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
287      return 0;
288    }
289  }
290#else
291#error "Unsupported architecture"
292#endif
293
294 public:
295  // Special handling for proxy methods. Proxy methods are instance methods so the
296  // 'this' object is the 1st argument. They also have the same frame layout as the
297  // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
298  // 1st GPR.
299  static mirror::Object* GetProxyThisObject(ArtMethod** sp)
300      REQUIRES_SHARED(Locks::mutator_lock_) {
301    CHECK((*sp)->IsProxyMethod());
302    CHECK_GT(kNumQuickGprArgs, 0u);
303    constexpr uint32_t kThisGprIndex = 0u;  // 'this' is in the 1st GPR.
304    size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
305        GprIndexToGprOffset(kThisGprIndex);
306    uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
307    return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
308  }
309
310  static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
311    DCHECK((*sp)->IsCalleeSaveMethod());
312    return GetCalleeSaveMethodCaller(sp, Runtime::kSaveRefsAndArgs);
313  }
314
315  static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
316    DCHECK((*sp)->IsCalleeSaveMethod());
317    uint8_t* previous_sp =
318        reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
319    return *reinterpret_cast<ArtMethod**>(previous_sp);
320  }
321
322  static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
323    DCHECK((*sp)->IsCalleeSaveMethod());
324    const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
325    ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
326        reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
327    uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
328    const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc);
329    uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
330
331    if (current_code->IsOptimized()) {
332      CodeInfo code_info = current_code->GetOptimizedCodeInfo();
333      CodeInfoEncoding encoding = code_info.ExtractEncoding();
334      StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
335      DCHECK(stack_map.IsValid());
336      if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
337        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
338        return inline_info.GetDexPcAtDepth(encoding.inline_info_encoding,
339                                           inline_info.GetDepth(encoding.inline_info_encoding)-1);
340      } else {
341        return stack_map.GetDexPc(encoding.stack_map_encoding);
342      }
343    } else {
344      return current_code->ToDexPc(*caller_sp, outer_pc);
345    }
346  }
347
348  // For the given quick ref and args quick frame, return the caller's PC.
349  static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
350    DCHECK((*sp)->IsCalleeSaveMethod());
351    uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
352    return *reinterpret_cast<uintptr_t*>(lr);
353  }
354
355  QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
356                       uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) :
357          is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
358          gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
359          fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
360          stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
361              + sizeof(ArtMethod*)),  // Skip ArtMethod*.
362          gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
363          cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
364    static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
365                  "Number of Quick FPR arguments unexpected");
366    static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
367                  "Double alignment unexpected");
368    // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
369    // next register is even.
370    static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
371                  "Number of Quick FPR arguments not even");
372    DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
373  }
374
375  virtual ~QuickArgumentVisitor() {}
376
377  virtual void Visit() = 0;
378
379  Primitive::Type GetParamPrimitiveType() const {
380    return cur_type_;
381  }
382
383  uint8_t* GetParamAddress() const {
384    if (!kQuickSoftFloatAbi) {
385      Primitive::Type type = GetParamPrimitiveType();
386      if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
387        if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
388          if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
389            return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
390          }
391        } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
392          return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
393        }
394        return stack_args_ + (stack_index_ * kBytesStackArgLocation);
395      }
396    }
397    if (gpr_index_ < kNumQuickGprArgs) {
398      return gpr_args_ + GprIndexToGprOffset(gpr_index_);
399    }
400    return stack_args_ + (stack_index_ * kBytesStackArgLocation);
401  }
402
403  bool IsSplitLongOrDouble() const {
404    if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) ||
405        (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
406      return is_split_long_or_double_;
407    } else {
408      return false;  // An optimization for when GPR and FPRs are 64bit.
409    }
410  }
411
412  bool IsParamAReference() const {
413    return GetParamPrimitiveType() == Primitive::kPrimNot;
414  }
415
416  bool IsParamALongOrDouble() const {
417    Primitive::Type type = GetParamPrimitiveType();
418    return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
419  }
420
421  uint64_t ReadSplitLongParam() const {
422    // The splitted long is always available through the stack.
423    return *reinterpret_cast<uint64_t*>(stack_args_
424        + stack_index_ * kBytesStackArgLocation);
425  }
426
427  void IncGprIndex() {
428    gpr_index_++;
429    if (kGprFprLockstep) {
430      fpr_index_++;
431    }
432  }
433
434  void IncFprIndex() {
435    fpr_index_++;
436    if (kGprFprLockstep) {
437      gpr_index_++;
438    }
439  }
440
441  void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) {
442    // (a) 'stack_args_' should point to the first method's argument
443    // (b) whatever the argument type it is, the 'stack_index_' should
444    //     be moved forward along with every visiting.
445    gpr_index_ = 0;
446    fpr_index_ = 0;
447    if (kQuickDoubleRegAlignedFloatBackFilled) {
448      fpr_double_index_ = 0;
449    }
450    stack_index_ = 0;
451    if (!is_static_) {  // Handle this.
452      cur_type_ = Primitive::kPrimNot;
453      is_split_long_or_double_ = false;
454      Visit();
455      stack_index_++;
456      if (kNumQuickGprArgs > 0) {
457        IncGprIndex();
458      }
459    }
460    for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
461      cur_type_ = Primitive::GetType(shorty_[shorty_index]);
462      switch (cur_type_) {
463        case Primitive::kPrimNot:
464        case Primitive::kPrimBoolean:
465        case Primitive::kPrimByte:
466        case Primitive::kPrimChar:
467        case Primitive::kPrimShort:
468        case Primitive::kPrimInt:
469          is_split_long_or_double_ = false;
470          Visit();
471          stack_index_++;
472          if (gpr_index_ < kNumQuickGprArgs) {
473            IncGprIndex();
474          }
475          break;
476        case Primitive::kPrimFloat:
477          is_split_long_or_double_ = false;
478          Visit();
479          stack_index_++;
480          if (kQuickSoftFloatAbi) {
481            if (gpr_index_ < kNumQuickGprArgs) {
482              IncGprIndex();
483            }
484          } else {
485            if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
486              IncFprIndex();
487              if (kQuickDoubleRegAlignedFloatBackFilled) {
488                // Double should not overlap with float.
489                // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
490                fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
491                // Float should not overlap with double.
492                if (fpr_index_ % 2 == 0) {
493                  fpr_index_ = std::max(fpr_double_index_, fpr_index_);
494                }
495              } else if (kQuickSkipOddFpRegisters) {
496                IncFprIndex();
497              }
498            }
499          }
500          break;
501        case Primitive::kPrimDouble:
502        case Primitive::kPrimLong:
503          if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
504            if (cur_type_ == Primitive::kPrimLong && kAlignPairRegister && gpr_index_ == 0) {
505              // Currently, this is only for ARM and MIPS, where the first available parameter
506              // register is R1 (on ARM) or A1 (on MIPS). So we skip it, and use R2 (on ARM) or
507              // A2 (on MIPS) instead.
508              IncGprIndex();
509            }
510            is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
511                ((gpr_index_ + 1) == kNumQuickGprArgs);
512            if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) {
513              // We don't want to split this. Pass over this register.
514              gpr_index_++;
515              is_split_long_or_double_ = false;
516            }
517            Visit();
518            if (kBytesStackArgLocation == 4) {
519              stack_index_+= 2;
520            } else {
521              CHECK_EQ(kBytesStackArgLocation, 8U);
522              stack_index_++;
523            }
524            if (gpr_index_ < kNumQuickGprArgs) {
525              IncGprIndex();
526              if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
527                if (gpr_index_ < kNumQuickGprArgs) {
528                  IncGprIndex();
529                }
530              }
531            }
532          } else {
533            is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
534                ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
535            Visit();
536            if (kBytesStackArgLocation == 4) {
537              stack_index_+= 2;
538            } else {
539              CHECK_EQ(kBytesStackArgLocation, 8U);
540              stack_index_++;
541            }
542            if (kQuickDoubleRegAlignedFloatBackFilled) {
543              if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
544                fpr_double_index_ += 2;
545                // Float should not overlap with double.
546                if (fpr_index_ % 2 == 0) {
547                  fpr_index_ = std::max(fpr_double_index_, fpr_index_);
548                }
549              }
550            } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
551              IncFprIndex();
552              if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
553                if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
554                  IncFprIndex();
555                }
556              }
557            }
558          }
559          break;
560        default:
561          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
562      }
563    }
564  }
565
566 protected:
567  const bool is_static_;
568  const char* const shorty_;
569  const uint32_t shorty_len_;
570
571 private:
572  uint8_t* const gpr_args_;  // Address of GPR arguments in callee save frame.
573  uint8_t* const fpr_args_;  // Address of FPR arguments in callee save frame.
574  uint8_t* const stack_args_;  // Address of stack arguments in caller's frame.
575  uint32_t gpr_index_;  // Index into spilled GPRs.
576  // Index into spilled FPRs.
577  // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
578  // holds a higher register number.
579  uint32_t fpr_index_;
580  // Index into spilled FPRs for aligned double.
581  // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
582  // terms of singles, may be behind fpr_index.
583  uint32_t fpr_double_index_;
584  uint32_t stack_index_;  // Index into arguments on the stack.
585  // The current type of argument during VisitArguments.
586  Primitive::Type cur_type_;
587  // Does a 64bit parameter straddle the register and stack arguments?
588  bool is_split_long_or_double_;
589};
590
591// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
592// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
593extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
594    REQUIRES_SHARED(Locks::mutator_lock_) {
595  return QuickArgumentVisitor::GetProxyThisObject(sp);
596}
597
598// Visits arguments on the stack placing them into the shadow frame.
599class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
600 public:
601  BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
602                               uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
603      QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
604
605  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
606
607 private:
608  ShadowFrame* const sf_;
609  uint32_t cur_reg_;
610
611  DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
612};
613
614void BuildQuickShadowFrameVisitor::Visit() {
615  Primitive::Type type = GetParamPrimitiveType();
616  switch (type) {
617    case Primitive::kPrimLong:  // Fall-through.
618    case Primitive::kPrimDouble:
619      if (IsSplitLongOrDouble()) {
620        sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
621      } else {
622        sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
623      }
624      ++cur_reg_;
625      break;
626    case Primitive::kPrimNot: {
627        StackReference<mirror::Object>* stack_ref =
628            reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
629        sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
630      }
631      break;
632    case Primitive::kPrimBoolean:  // Fall-through.
633    case Primitive::kPrimByte:     // Fall-through.
634    case Primitive::kPrimChar:     // Fall-through.
635    case Primitive::kPrimShort:    // Fall-through.
636    case Primitive::kPrimInt:      // Fall-through.
637    case Primitive::kPrimFloat:
638      sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
639      break;
640    case Primitive::kPrimVoid:
641      LOG(FATAL) << "UNREACHABLE";
642      UNREACHABLE();
643  }
644  ++cur_reg_;
645}
646
647extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
648    REQUIRES_SHARED(Locks::mutator_lock_) {
649  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
650  // frame.
651  ScopedQuickEntrypointChecks sqec(self);
652
653  if (UNLIKELY(!method->IsInvokable())) {
654    method->ThrowInvocationTimeError();
655    return 0;
656  }
657
658  JValue tmp_value;
659  ShadowFrame* deopt_frame = self->PopStackedShadowFrame(
660      StackedShadowFrameType::kDeoptimizationShadowFrame, false);
661  ManagedStack fragment;
662
663  DCHECK(!method->IsNative()) << PrettyMethod(method);
664  uint32_t shorty_len = 0;
665  ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
666  const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem();
667  DCHECK(code_item != nullptr) << PrettyMethod(method);
668  const char* shorty = non_proxy_method->GetShorty(&shorty_len);
669
670  JValue result;
671
672  if (deopt_frame != nullptr) {
673    // Coming from partial-fragment deopt.
674
675    if (kIsDebugBuild) {
676      // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom
677      // of the call-stack) corresponds to the called method.
678      ShadowFrame* linked = deopt_frame;
679      while (linked->GetLink() != nullptr) {
680        linked = linked->GetLink();
681      }
682      CHECK_EQ(method, linked->GetMethod()) << PrettyMethod(method) << " "
683          << PrettyMethod(linked->GetMethod());
684    }
685
686    if (VLOG_IS_ON(deopt)) {
687      // Print out the stack to verify that it was a partial-fragment deopt.
688      LOG(INFO) << "Continue-ing from deopt. Stack is:";
689      QuickExceptionHandler::DumpFramesWithType(self, true);
690    }
691
692    mirror::Throwable* pending_exception = nullptr;
693    bool from_code = false;
694    self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code);
695
696    // Push a transition back into managed code onto the linked list in thread.
697    self->PushManagedStackFragment(&fragment);
698
699    // Ensure that the stack is still in order.
700    if (kIsDebugBuild) {
701      class DummyStackVisitor : public StackVisitor {
702       public:
703        explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
704            : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
705
706        bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
707          // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
708          // logic. Just always say we want to continue.
709          return true;
710        }
711      };
712      DummyStackVisitor dsv(self);
713      dsv.WalkStack();
714    }
715
716    // Restore the exception that was pending before deoptimization then interpret the
717    // deoptimized frames.
718    if (pending_exception != nullptr) {
719      self->SetException(pending_exception);
720    }
721    interpreter::EnterInterpreterFromDeoptimize(self, deopt_frame, from_code, &result);
722  } else {
723    const char* old_cause = self->StartAssertNoThreadSuspension(
724        "Building interpreter shadow frame");
725    uint16_t num_regs = code_item->registers_size_;
726    // No last shadow coming from quick.
727    ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
728        CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
729    ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
730    size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
731    BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
732                                                      shadow_frame, first_arg_reg);
733    shadow_frame_builder.VisitArguments();
734    const bool needs_initialization =
735        method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
736    // Push a transition back into managed code onto the linked list in thread.
737    self->PushManagedStackFragment(&fragment);
738    self->PushShadowFrame(shadow_frame);
739    self->EndAssertNoThreadSuspension(old_cause);
740
741    if (needs_initialization) {
742      // Ensure static method's class is initialized.
743      StackHandleScope<1> hs(self);
744      Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass()));
745      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
746        DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(shadow_frame->GetMethod());
747        self->PopManagedStackFragment(fragment);
748        return 0;
749      }
750    }
751
752    result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame);
753  }
754
755  // Pop transition.
756  self->PopManagedStackFragment(fragment);
757
758  // Request a stack deoptimization if needed
759  ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
760  uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
761  // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
762  // should be done and it knows the real return pc.
763  if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
764               Dbg::IsForcedInterpreterNeededForUpcall(self, caller) &&
765               Runtime::Current()->IsDeoptimizeable(caller_pc))) {
766    // Push the context of the deoptimization stack so we can restore the return value and the
767    // exception before executing the deoptimized frames.
768    self->PushDeoptimizationContext(
769        result, shorty[0] == 'L', /* from_code */ false, self->GetException());
770
771    // Set special exception to cause deoptimization.
772    self->SetException(Thread::GetDeoptimizationException());
773  }
774
775  // No need to restore the args since the method has already been run by the interpreter.
776  return result.GetJ();
777}
778
779// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
780// to jobjects.
781class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
782 public:
783  BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
784                            ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
785      QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
786
787  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
788
789  void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
790
791 private:
792  ScopedObjectAccessUnchecked* const soa_;
793  std::vector<jvalue>* const args_;
794  // References which we must update when exiting in case the GC moved the objects.
795  std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
796
797  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
798};
799
800void BuildQuickArgumentVisitor::Visit() {
801  jvalue val;
802  Primitive::Type type = GetParamPrimitiveType();
803  switch (type) {
804    case Primitive::kPrimNot: {
805      StackReference<mirror::Object>* stack_ref =
806          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
807      val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
808      references_.push_back(std::make_pair(val.l, stack_ref));
809      break;
810    }
811    case Primitive::kPrimLong:  // Fall-through.
812    case Primitive::kPrimDouble:
813      if (IsSplitLongOrDouble()) {
814        val.j = ReadSplitLongParam();
815      } else {
816        val.j = *reinterpret_cast<jlong*>(GetParamAddress());
817      }
818      break;
819    case Primitive::kPrimBoolean:  // Fall-through.
820    case Primitive::kPrimByte:     // Fall-through.
821    case Primitive::kPrimChar:     // Fall-through.
822    case Primitive::kPrimShort:    // Fall-through.
823    case Primitive::kPrimInt:      // Fall-through.
824    case Primitive::kPrimFloat:
825      val.i = *reinterpret_cast<jint*>(GetParamAddress());
826      break;
827    case Primitive::kPrimVoid:
828      LOG(FATAL) << "UNREACHABLE";
829      UNREACHABLE();
830  }
831  args_->push_back(val);
832}
833
834void BuildQuickArgumentVisitor::FixupReferences() {
835  // Fixup any references which may have changed.
836  for (const auto& pair : references_) {
837    pair.second->Assign(soa_->Decode<mirror::Object>(pair.first).Ptr());
838    soa_->Env()->DeleteLocalRef(pair.first);
839  }
840}
841
842// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
843// which is responsible for recording callee save registers. We explicitly place into jobjects the
844// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
845// field within the proxy object, which will box the primitive arguments and deal with error cases.
846extern "C" uint64_t artQuickProxyInvokeHandler(
847    ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
848    REQUIRES_SHARED(Locks::mutator_lock_) {
849  DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
850  DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
851  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
852  const char* old_cause =
853      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
854  // Register the top of the managed stack, making stack crawlable.
855  DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method);
856  self->VerifyStack();
857  // Start new JNI local reference state.
858  JNIEnvExt* env = self->GetJniEnv();
859  ScopedObjectAccessUnchecked soa(env);
860  ScopedJniEnvLocalRefState env_state(env);
861  // Create local ref. copies of proxy method and the receiver.
862  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
863
864  // Placing arguments into args vector and remove the receiver.
865  ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
866  CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
867                                       << PrettyMethod(non_proxy_method);
868  std::vector<jvalue> args;
869  uint32_t shorty_len = 0;
870  const char* shorty = non_proxy_method->GetShorty(&shorty_len);
871  BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args);
872
873  local_ref_visitor.VisitArguments();
874  DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method);
875  args.erase(args.begin());
876
877  // Convert proxy method into expected interface method.
878  ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize);
879  DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method);
880  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
881  self->EndAssertNoThreadSuspension(old_cause);
882  DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
883  DCHECK(!Runtime::Current()->IsActiveTransaction());
884  jobject interface_method_jobj = soa.AddLocalReference<jobject>(
885      mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(),
886                                                                      interface_method));
887
888  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
889  // that performs allocations.
890  JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
891  // Restore references which might have moved.
892  local_ref_visitor.FixupReferences();
893  return result.GetJ();
894}
895
896// Read object references held in arguments from quick frames and place in a JNI local references,
897// so they don't get garbage collected.
898class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
899 public:
900  RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
901                               uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
902      QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
903
904  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
905
906  void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
907
908 private:
909  ScopedObjectAccessUnchecked* const soa_;
910  // References which we must update when exiting in case the GC moved the objects.
911  std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
912
913  DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
914};
915
916void RememberForGcArgumentVisitor::Visit() {
917  if (IsParamAReference()) {
918    StackReference<mirror::Object>* stack_ref =
919        reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
920    jobject reference =
921        soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
922    references_.push_back(std::make_pair(reference, stack_ref));
923  }
924}
925
926void RememberForGcArgumentVisitor::FixupReferences() {
927  // Fixup any references which may have changed.
928  for (const auto& pair : references_) {
929    pair.second->Assign(soa_->Decode<mirror::Object>(pair.first).Ptr());
930    soa_->Env()->DeleteLocalRef(pair.first);
931  }
932}
933
934// Lazily resolve a method for quick. Called by stub code.
935extern "C" const void* artQuickResolutionTrampoline(
936    ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
937    REQUIRES_SHARED(Locks::mutator_lock_) {
938  // The resolution trampoline stashes the resolved method into the callee-save frame to transport
939  // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
940  // does not have the same stack layout as the callee-save method).
941  ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
942  // Start new JNI local reference state
943  JNIEnvExt* env = self->GetJniEnv();
944  ScopedObjectAccessUnchecked soa(env);
945  ScopedJniEnvLocalRefState env_state(env);
946  const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
947
948  // Compute details about the called method (avoid GCs)
949  ClassLinker* linker = Runtime::Current()->GetClassLinker();
950  InvokeType invoke_type;
951  MethodReference called_method(nullptr, 0);
952  const bool called_method_known_on_entry = !called->IsRuntimeMethod();
953  ArtMethod* caller = nullptr;
954  if (!called_method_known_on_entry) {
955    caller = QuickArgumentVisitor::GetCallingMethod(sp);
956    uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
957    const DexFile::CodeItem* code;
958    called_method.dex_file = caller->GetDexFile();
959    code = caller->GetCodeItem();
960    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
961    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
962    Instruction::Code instr_code = instr->Opcode();
963    bool is_range;
964    switch (instr_code) {
965      case Instruction::INVOKE_DIRECT:
966        invoke_type = kDirect;
967        is_range = false;
968        break;
969      case Instruction::INVOKE_DIRECT_RANGE:
970        invoke_type = kDirect;
971        is_range = true;
972        break;
973      case Instruction::INVOKE_STATIC:
974        invoke_type = kStatic;
975        is_range = false;
976        break;
977      case Instruction::INVOKE_STATIC_RANGE:
978        invoke_type = kStatic;
979        is_range = true;
980        break;
981      case Instruction::INVOKE_SUPER:
982        invoke_type = kSuper;
983        is_range = false;
984        break;
985      case Instruction::INVOKE_SUPER_RANGE:
986        invoke_type = kSuper;
987        is_range = true;
988        break;
989      case Instruction::INVOKE_VIRTUAL:
990        invoke_type = kVirtual;
991        is_range = false;
992        break;
993      case Instruction::INVOKE_VIRTUAL_RANGE:
994        invoke_type = kVirtual;
995        is_range = true;
996        break;
997      case Instruction::INVOKE_INTERFACE:
998        invoke_type = kInterface;
999        is_range = false;
1000        break;
1001      case Instruction::INVOKE_INTERFACE_RANGE:
1002        invoke_type = kInterface;
1003        is_range = true;
1004        break;
1005      default:
1006        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr);
1007        UNREACHABLE();
1008    }
1009    called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
1010  } else {
1011    invoke_type = kStatic;
1012    called_method.dex_file = called->GetDexFile();
1013    called_method.dex_method_index = called->GetDexMethodIndex();
1014  }
1015  uint32_t shorty_len;
1016  const char* shorty =
1017      called_method.dex_file->GetMethodShorty(
1018          called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len);
1019  RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
1020  visitor.VisitArguments();
1021  self->EndAssertNoThreadSuspension(old_cause);
1022  const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
1023  // Resolve method filling in dex cache.
1024  if (!called_method_known_on_entry) {
1025    StackHandleScope<1> hs(self);
1026    mirror::Object* dummy = nullptr;
1027    HandleWrapper<mirror::Object> h_receiver(
1028        hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
1029    DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
1030    called = linker->ResolveMethod<ClassLinker::kForceICCECheck>(
1031        self, called_method.dex_method_index, caller, invoke_type);
1032  }
1033  const void* code = nullptr;
1034  if (LIKELY(!self->IsExceptionPending())) {
1035    // Incompatible class change should have been handled in resolve method.
1036    CHECK(!called->CheckIncompatibleClassChange(invoke_type))
1037        << PrettyMethod(called) << " " << invoke_type;
1038    if (virtual_or_interface || invoke_type == kSuper) {
1039      // Refine called method based on receiver for kVirtual/kInterface, and
1040      // caller for kSuper.
1041      ArtMethod* orig_called = called;
1042      if (invoke_type == kVirtual) {
1043        CHECK(receiver != nullptr) << invoke_type;
1044        called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize);
1045      } else if (invoke_type == kInterface) {
1046        CHECK(receiver != nullptr) << invoke_type;
1047        called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize);
1048      } else {
1049        DCHECK_EQ(invoke_type, kSuper);
1050        CHECK(caller != nullptr) << invoke_type;
1051        StackHandleScope<2> hs(self);
1052        Handle<mirror::DexCache> dex_cache(
1053            hs.NewHandle(caller->GetDeclaringClass()->GetDexCache()));
1054        Handle<mirror::ClassLoader> class_loader(
1055            hs.NewHandle(caller->GetDeclaringClass()->GetClassLoader()));
1056        // TODO Maybe put this into a mirror::Class function.
1057        mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod(
1058            called_method.dex_method_index, dex_cache, class_loader);
1059        if (ref_class->IsInterface()) {
1060          called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize);
1061        } else {
1062          called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
1063              called->GetMethodIndex(), kRuntimePointerSize);
1064        }
1065      }
1066
1067      CHECK(called != nullptr) << PrettyMethod(orig_called) << " "
1068                               << PrettyTypeOf(receiver) << " "
1069                               << invoke_type << " " << orig_called->GetVtableIndex();
1070
1071      // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
1072      // of the sharpened method avoiding dirtying the dex cache if possible.
1073      // Note, called_method.dex_method_index references the dex method before the
1074      // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares
1075      // about the name and signature.
1076      uint32_t update_dex_cache_method_index = called->GetDexMethodIndex();
1077      if (!called->HasSameDexCacheResolvedMethods(caller, kRuntimePointerSize)) {
1078        // Calling from one dex file to another, need to compute the method index appropriate to
1079        // the caller's dex file. Since we get here only if the original called was a runtime
1080        // method, we've got the correct dex_file and a dex_method_idx from above.
1081        DCHECK(!called_method_known_on_entry);
1082        DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
1083        const DexFile* caller_dex_file = called_method.dex_file;
1084        uint32_t caller_method_name_and_sig_index = called_method.dex_method_index;
1085        update_dex_cache_method_index =
1086            called->FindDexMethodIndexInOtherDexFile(*caller_dex_file,
1087                                                     caller_method_name_and_sig_index);
1088      }
1089      if ((update_dex_cache_method_index != DexFile::kDexNoIndex) &&
1090          (caller->GetDexCacheResolvedMethod(
1091              update_dex_cache_method_index, kRuntimePointerSize) != called)) {
1092        caller->SetDexCacheResolvedMethod(update_dex_cache_method_index,
1093                                          called,
1094                                          kRuntimePointerSize);
1095      }
1096    } else if (invoke_type == kStatic) {
1097      const auto called_dex_method_idx = called->GetDexMethodIndex();
1098      // For static invokes, we may dispatch to the static method in the superclass but resolve
1099      // using the subclass. To prevent getting slow paths on each invoke, we force set the
1100      // resolved method for the super class dex method index if we are in the same dex file.
1101      // b/19175856
1102      if (called->GetDexFile() == called_method.dex_file &&
1103          called_method.dex_method_index != called_dex_method_idx) {
1104        called->GetDexCache()->SetResolvedMethod(called_dex_method_idx,
1105                                                 called,
1106                                                 kRuntimePointerSize);
1107      }
1108    }
1109
1110    // Ensure that the called method's class is initialized.
1111    StackHandleScope<1> hs(soa.Self());
1112    Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
1113    linker->EnsureInitialized(soa.Self(), called_class, true, true);
1114    if (LIKELY(called_class->IsInitialized())) {
1115      if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
1116        // If we are single-stepping or the called method is deoptimized (by a
1117        // breakpoint, for example), then we have to execute the called method
1118        // with the interpreter.
1119        code = GetQuickToInterpreterBridge();
1120      } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) {
1121        // If the caller is deoptimized (by a breakpoint, for example), we have to
1122        // continue its execution with interpreter when returning from the called
1123        // method. Because we do not want to execute the called method with the
1124        // interpreter, we wrap its execution into the instrumentation stubs.
1125        // When the called method returns, it will execute the instrumentation
1126        // exit hook that will determine the need of the interpreter with a call
1127        // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if
1128        // it is needed.
1129        code = GetQuickInstrumentationEntryPoint();
1130      } else {
1131        code = called->GetEntryPointFromQuickCompiledCode();
1132      }
1133    } else if (called_class->IsInitializing()) {
1134      if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
1135        // If we are single-stepping or the called method is deoptimized (by a
1136        // breakpoint, for example), then we have to execute the called method
1137        // with the interpreter.
1138        code = GetQuickToInterpreterBridge();
1139      } else if (invoke_type == kStatic) {
1140        // Class is still initializing, go to oat and grab code (trampoline must be left in place
1141        // until class is initialized to stop races between threads).
1142        code = linker->GetQuickOatCodeFor(called);
1143      } else {
1144        // No trampoline for non-static methods.
1145        code = called->GetEntryPointFromQuickCompiledCode();
1146      }
1147    } else {
1148      DCHECK(called_class->IsErroneous());
1149    }
1150  }
1151  CHECK_EQ(code == nullptr, self->IsExceptionPending());
1152  // Fixup any locally saved objects may have moved during a GC.
1153  visitor.FixupReferences();
1154  // Place called method in callee-save frame to be placed as first argument to quick method.
1155  *sp = called;
1156
1157  return code;
1158}
1159
1160/*
1161 * This class uses a couple of observations to unite the different calling conventions through
1162 * a few constants.
1163 *
1164 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
1165 *    possible alignment.
1166 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
1167 *    types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
1168 *    when we have to split things
1169 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
1170 *    and we can use Int handling directly.
1171 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
1172 *    necessary when widening. Also, widening of Ints will take place implicitly, and the
1173 *    extension should be compatible with Aarch64, which mandates copying the available bits
1174 *    into LSB and leaving the rest unspecified.
1175 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
1176 *    the stack.
1177 * 6) There is only little endian.
1178 *
1179 *
1180 * Actual work is supposed to be done in a delegate of the template type. The interface is as
1181 * follows:
1182 *
1183 * void PushGpr(uintptr_t):   Add a value for the next GPR
1184 *
1185 * void PushFpr4(float):      Add a value for the next FPR of size 32b. Is only called if we need
1186 *                            padding, that is, think the architecture is 32b and aligns 64b.
1187 *
1188 * void PushFpr8(uint64_t):   Push a double. We _will_ call this on 32b, it's the callee's job to
1189 *                            split this if necessary. The current state will have aligned, if
1190 *                            necessary.
1191 *
1192 * void PushStack(uintptr_t): Push a value to the stack.
1193 *
1194 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
1195 *                                          as this might be important for null initialization.
1196 *                                          Must return the jobject, that is, the reference to the
1197 *                                          entry in the HandleScope (nullptr if necessary).
1198 *
1199 */
1200template<class T> class BuildNativeCallFrameStateMachine {
1201 public:
1202#if defined(__arm__)
1203  // TODO: These are all dummy values!
1204  static constexpr bool kNativeSoftFloatAbi = true;
1205  static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs, r0-r3
1206  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
1207
1208  static constexpr size_t kRegistersNeededForLong = 2;
1209  static constexpr size_t kRegistersNeededForDouble = 2;
1210  static constexpr bool kMultiRegistersAligned = true;
1211  static constexpr bool kMultiFPRegistersWidened = false;
1212  static constexpr bool kMultiGPRegistersWidened = false;
1213  static constexpr bool kAlignLongOnStack = true;
1214  static constexpr bool kAlignDoubleOnStack = true;
1215#elif defined(__aarch64__)
1216  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
1217  static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
1218  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
1219
1220  static constexpr size_t kRegistersNeededForLong = 1;
1221  static constexpr size_t kRegistersNeededForDouble = 1;
1222  static constexpr bool kMultiRegistersAligned = false;
1223  static constexpr bool kMultiFPRegistersWidened = false;
1224  static constexpr bool kMultiGPRegistersWidened = false;
1225  static constexpr bool kAlignLongOnStack = false;
1226  static constexpr bool kAlignDoubleOnStack = false;
1227#elif defined(__mips__) && !defined(__LP64__)
1228  static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
1229  static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs.
1230  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
1231
1232  static constexpr size_t kRegistersNeededForLong = 2;
1233  static constexpr size_t kRegistersNeededForDouble = 2;
1234  static constexpr bool kMultiRegistersAligned = true;
1235  static constexpr bool kMultiFPRegistersWidened = true;
1236  static constexpr bool kMultiGPRegistersWidened = false;
1237  static constexpr bool kAlignLongOnStack = true;
1238  static constexpr bool kAlignDoubleOnStack = true;
1239#elif defined(__mips__) && defined(__LP64__)
1240  // Let the code prepare GPRs only and we will load the FPRs with same data.
1241  static constexpr bool kNativeSoftFloatAbi = true;
1242  static constexpr size_t kNumNativeGprArgs = 8;
1243  static constexpr size_t kNumNativeFprArgs = 0;
1244
1245  static constexpr size_t kRegistersNeededForLong = 1;
1246  static constexpr size_t kRegistersNeededForDouble = 1;
1247  static constexpr bool kMultiRegistersAligned = false;
1248  static constexpr bool kMultiFPRegistersWidened = false;
1249  static constexpr bool kMultiGPRegistersWidened = true;
1250  static constexpr bool kAlignLongOnStack = false;
1251  static constexpr bool kAlignDoubleOnStack = false;
1252#elif defined(__i386__)
1253  // TODO: Check these!
1254  static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
1255  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
1256  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
1257
1258  static constexpr size_t kRegistersNeededForLong = 2;
1259  static constexpr size_t kRegistersNeededForDouble = 2;
1260  static constexpr bool kMultiRegistersAligned = false;  // x86 not using regs, anyways
1261  static constexpr bool kMultiFPRegistersWidened = false;
1262  static constexpr bool kMultiGPRegistersWidened = false;
1263  static constexpr bool kAlignLongOnStack = false;
1264  static constexpr bool kAlignDoubleOnStack = false;
1265#elif defined(__x86_64__)
1266  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
1267  static constexpr size_t kNumNativeGprArgs = 6;  // 6 arguments passed in GPRs.
1268  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
1269
1270  static constexpr size_t kRegistersNeededForLong = 1;
1271  static constexpr size_t kRegistersNeededForDouble = 1;
1272  static constexpr bool kMultiRegistersAligned = false;
1273  static constexpr bool kMultiFPRegistersWidened = false;
1274  static constexpr bool kMultiGPRegistersWidened = false;
1275  static constexpr bool kAlignLongOnStack = false;
1276  static constexpr bool kAlignDoubleOnStack = false;
1277#else
1278#error "Unsupported architecture"
1279#endif
1280
1281 public:
1282  explicit BuildNativeCallFrameStateMachine(T* delegate)
1283      : gpr_index_(kNumNativeGprArgs),
1284        fpr_index_(kNumNativeFprArgs),
1285        stack_entries_(0),
1286        delegate_(delegate) {
1287    // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
1288    // the next register is even; counting down is just to make the compiler happy...
1289    static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
1290    static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
1291  }
1292
1293  virtual ~BuildNativeCallFrameStateMachine() {}
1294
1295  bool HavePointerGpr() const {
1296    return gpr_index_ > 0;
1297  }
1298
1299  void AdvancePointer(const void* val) {
1300    if (HavePointerGpr()) {
1301      gpr_index_--;
1302      PushGpr(reinterpret_cast<uintptr_t>(val));
1303    } else {
1304      stack_entries_++;  // TODO: have a field for pointer length as multiple of 32b
1305      PushStack(reinterpret_cast<uintptr_t>(val));
1306      gpr_index_ = 0;
1307    }
1308  }
1309
1310  bool HaveHandleScopeGpr() const {
1311    return gpr_index_ > 0;
1312  }
1313
1314  void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
1315    uintptr_t handle = PushHandle(ptr);
1316    if (HaveHandleScopeGpr()) {
1317      gpr_index_--;
1318      PushGpr(handle);
1319    } else {
1320      stack_entries_++;
1321      PushStack(handle);
1322      gpr_index_ = 0;
1323    }
1324  }
1325
1326  bool HaveIntGpr() const {
1327    return gpr_index_ > 0;
1328  }
1329
1330  void AdvanceInt(uint32_t val) {
1331    if (HaveIntGpr()) {
1332      gpr_index_--;
1333      if (kMultiGPRegistersWidened) {
1334        DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1335        PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1336      } else {
1337        PushGpr(val);
1338      }
1339    } else {
1340      stack_entries_++;
1341      if (kMultiGPRegistersWidened) {
1342        DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
1343        PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
1344      } else {
1345        PushStack(val);
1346      }
1347      gpr_index_ = 0;
1348    }
1349  }
1350
1351  bool HaveLongGpr() const {
1352    return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
1353  }
1354
1355  bool LongGprNeedsPadding() const {
1356    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1357        kAlignLongOnStack &&                  // and when it needs alignment
1358        (gpr_index_ & 1) == 1;                // counter is odd, see constructor
1359  }
1360
1361  bool LongStackNeedsPadding() const {
1362    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1363        kAlignLongOnStack &&                  // and when it needs 8B alignment
1364        (stack_entries_ & 1) == 1;            // counter is odd
1365  }
1366
1367  void AdvanceLong(uint64_t val) {
1368    if (HaveLongGpr()) {
1369      if (LongGprNeedsPadding()) {
1370        PushGpr(0);
1371        gpr_index_--;
1372      }
1373      if (kRegistersNeededForLong == 1) {
1374        PushGpr(static_cast<uintptr_t>(val));
1375      } else {
1376        PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1377        PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1378      }
1379      gpr_index_ -= kRegistersNeededForLong;
1380    } else {
1381      if (LongStackNeedsPadding()) {
1382        PushStack(0);
1383        stack_entries_++;
1384      }
1385      if (kRegistersNeededForLong == 1) {
1386        PushStack(static_cast<uintptr_t>(val));
1387        stack_entries_++;
1388      } else {
1389        PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1390        PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1391        stack_entries_ += 2;
1392      }
1393      gpr_index_ = 0;
1394    }
1395  }
1396
1397  bool HaveFloatFpr() const {
1398    return fpr_index_ > 0;
1399  }
1400
1401  void AdvanceFloat(float val) {
1402    if (kNativeSoftFloatAbi) {
1403      AdvanceInt(bit_cast<uint32_t, float>(val));
1404    } else {
1405      if (HaveFloatFpr()) {
1406        fpr_index_--;
1407        if (kRegistersNeededForDouble == 1) {
1408          if (kMultiFPRegistersWidened) {
1409            PushFpr8(bit_cast<uint64_t, double>(val));
1410          } else {
1411            // No widening, just use the bits.
1412            PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val)));
1413          }
1414        } else {
1415          PushFpr4(val);
1416        }
1417      } else {
1418        stack_entries_++;
1419        if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) {
1420          // Need to widen before storing: Note the "double" in the template instantiation.
1421          // Note: We need to jump through those hoops to make the compiler happy.
1422          DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
1423          PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val)));
1424        } else {
1425          PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val)));
1426        }
1427        fpr_index_ = 0;
1428      }
1429    }
1430  }
1431
1432  bool HaveDoubleFpr() const {
1433    return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1434  }
1435
1436  bool DoubleFprNeedsPadding() const {
1437    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1438        kAlignDoubleOnStack &&                  // and when it needs alignment
1439        (fpr_index_ & 1) == 1;                  // counter is odd, see constructor
1440  }
1441
1442  bool DoubleStackNeedsPadding() const {
1443    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1444        kAlignDoubleOnStack &&                  // and when it needs 8B alignment
1445        (stack_entries_ & 1) == 1;              // counter is odd
1446  }
1447
1448  void AdvanceDouble(uint64_t val) {
1449    if (kNativeSoftFloatAbi) {
1450      AdvanceLong(val);
1451    } else {
1452      if (HaveDoubleFpr()) {
1453        if (DoubleFprNeedsPadding()) {
1454          PushFpr4(0);
1455          fpr_index_--;
1456        }
1457        PushFpr8(val);
1458        fpr_index_ -= kRegistersNeededForDouble;
1459      } else {
1460        if (DoubleStackNeedsPadding()) {
1461          PushStack(0);
1462          stack_entries_++;
1463        }
1464        if (kRegistersNeededForDouble == 1) {
1465          PushStack(static_cast<uintptr_t>(val));
1466          stack_entries_++;
1467        } else {
1468          PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1469          PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1470          stack_entries_ += 2;
1471        }
1472        fpr_index_ = 0;
1473      }
1474    }
1475  }
1476
1477  uint32_t GetStackEntries() const {
1478    return stack_entries_;
1479  }
1480
1481  uint32_t GetNumberOfUsedGprs() const {
1482    return kNumNativeGprArgs - gpr_index_;
1483  }
1484
1485  uint32_t GetNumberOfUsedFprs() const {
1486    return kNumNativeFprArgs - fpr_index_;
1487  }
1488
1489 private:
1490  void PushGpr(uintptr_t val) {
1491    delegate_->PushGpr(val);
1492  }
1493  void PushFpr4(float val) {
1494    delegate_->PushFpr4(val);
1495  }
1496  void PushFpr8(uint64_t val) {
1497    delegate_->PushFpr8(val);
1498  }
1499  void PushStack(uintptr_t val) {
1500    delegate_->PushStack(val);
1501  }
1502  uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
1503    return delegate_->PushHandle(ref);
1504  }
1505
1506  uint32_t gpr_index_;      // Number of free GPRs
1507  uint32_t fpr_index_;      // Number of free FPRs
1508  uint32_t stack_entries_;  // Stack entries are in multiples of 32b, as floats are usually not
1509                            // extended
1510  T* const delegate_;             // What Push implementation gets called
1511};
1512
1513// Computes the sizes of register stacks and call stack area. Handling of references can be extended
1514// in subclasses.
1515//
1516// To handle native pointers, use "L" in the shorty for an object reference, which simulates
1517// them with handles.
1518class ComputeNativeCallFrameSize {
1519 public:
1520  ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
1521
1522  virtual ~ComputeNativeCallFrameSize() {}
1523
1524  uint32_t GetStackSize() const {
1525    return num_stack_entries_ * sizeof(uintptr_t);
1526  }
1527
1528  uint8_t* LayoutCallStack(uint8_t* sp8) const {
1529    sp8 -= GetStackSize();
1530    // Align by kStackAlignment.
1531    sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1532    return sp8;
1533  }
1534
1535  uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr)
1536      const {
1537    // Assumption is OK right now, as we have soft-float arm
1538    size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
1539    sp8 -= fregs * sizeof(uintptr_t);
1540    *start_fpr = reinterpret_cast<uint32_t*>(sp8);
1541    size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
1542    sp8 -= iregs * sizeof(uintptr_t);
1543    *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
1544    return sp8;
1545  }
1546
1547  uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr,
1548                            uint32_t** start_fpr) const {
1549    // Native call stack.
1550    sp8 = LayoutCallStack(sp8);
1551    *start_stack = reinterpret_cast<uintptr_t*>(sp8);
1552
1553    // Put fprs and gprs below.
1554    sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr);
1555
1556    // Return the new bottom.
1557    return sp8;
1558  }
1559
1560  virtual void WalkHeader(
1561      BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
1562      REQUIRES_SHARED(Locks::mutator_lock_) {
1563  }
1564
1565  void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) {
1566    BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
1567
1568    WalkHeader(&sm);
1569
1570    for (uint32_t i = 1; i < shorty_len; ++i) {
1571      Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
1572      switch (cur_type_) {
1573        case Primitive::kPrimNot:
1574          // TODO: fix abuse of mirror types.
1575          sm.AdvanceHandleScope(
1576              reinterpret_cast<mirror::Object*>(0x12345678));
1577          break;
1578
1579        case Primitive::kPrimBoolean:
1580        case Primitive::kPrimByte:
1581        case Primitive::kPrimChar:
1582        case Primitive::kPrimShort:
1583        case Primitive::kPrimInt:
1584          sm.AdvanceInt(0);
1585          break;
1586        case Primitive::kPrimFloat:
1587          sm.AdvanceFloat(0);
1588          break;
1589        case Primitive::kPrimDouble:
1590          sm.AdvanceDouble(0);
1591          break;
1592        case Primitive::kPrimLong:
1593          sm.AdvanceLong(0);
1594          break;
1595        default:
1596          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1597          UNREACHABLE();
1598      }
1599    }
1600
1601    num_stack_entries_ = sm.GetStackEntries();
1602  }
1603
1604  void PushGpr(uintptr_t /* val */) {
1605    // not optimizing registers, yet
1606  }
1607
1608  void PushFpr4(float /* val */) {
1609    // not optimizing registers, yet
1610  }
1611
1612  void PushFpr8(uint64_t /* val */) {
1613    // not optimizing registers, yet
1614  }
1615
1616  void PushStack(uintptr_t /* val */) {
1617    // counting is already done in the superclass
1618  }
1619
1620  virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
1621    return reinterpret_cast<uintptr_t>(nullptr);
1622  }
1623
1624 protected:
1625  uint32_t num_stack_entries_;
1626};
1627
1628class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
1629 public:
1630  explicit ComputeGenericJniFrameSize(bool critical_native)
1631    : num_handle_scope_references_(0), critical_native_(critical_native) {}
1632
1633  // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
1634  // is at *m = sp. Will update to point to the bottom of the save frame.
1635  //
1636  // Note: assumes ComputeAll() has been run before.
1637  void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
1638      REQUIRES_SHARED(Locks::mutator_lock_) {
1639    ArtMethod* method = **m;
1640
1641    DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
1642
1643    uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
1644
1645    // First, fix up the layout of the callee-save frame.
1646    // We have to squeeze in the HandleScope, and relocate the method pointer.
1647
1648    // "Free" the slot for the method.
1649    sp8 += sizeof(void*);  // In the callee-save frame we use a full pointer.
1650
1651    // Under the callee saves put handle scope and new method stack reference.
1652    size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
1653    size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*);
1654
1655    sp8 -= scope_and_method;
1656    // Align by kStackAlignment.
1657    sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1658
1659    uint8_t* sp8_table = sp8 + sizeof(ArtMethod*);
1660    *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(),
1661                                        num_handle_scope_references_);
1662
1663    // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
1664    uint8_t* method_pointer = sp8;
1665    auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer);
1666    *new_method_ref = method;
1667    *m = new_method_ref;
1668  }
1669
1670  // Adds space for the cookie. Note: may leave stack unaligned.
1671  void LayoutCookie(uint8_t** sp) const {
1672    // Reference cookie and padding
1673    *sp -= 8;
1674  }
1675
1676  // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
1677  // Returns the new bottom. Note: this may be unaligned.
1678  uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
1679      REQUIRES_SHARED(Locks::mutator_lock_) {
1680    // First, fix up the layout of the callee-save frame.
1681    // We have to squeeze in the HandleScope, and relocate the method pointer.
1682    LayoutCalleeSaveFrame(self, m, sp, handle_scope);
1683
1684    // The bottom of the callee-save frame is now where the method is, *m.
1685    uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m);
1686
1687    // Add space for cookie.
1688    LayoutCookie(&sp8);
1689
1690    return sp8;
1691  }
1692
1693  // WARNING: After this, *sp won't be pointing to the method anymore!
1694  uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
1695                         HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
1696                         uint32_t** start_fpr)
1697      REQUIRES_SHARED(Locks::mutator_lock_) {
1698    Walk(shorty, shorty_len);
1699
1700    // JNI part.
1701    uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope);
1702
1703    sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr);
1704
1705    // Return the new bottom.
1706    return sp8;
1707  }
1708
1709  uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
1710
1711  // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
1712  void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
1713      REQUIRES_SHARED(Locks::mutator_lock_);
1714
1715 private:
1716  uint32_t num_handle_scope_references_;
1717  const bool critical_native_;
1718};
1719
1720uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
1721  num_handle_scope_references_++;
1722  return reinterpret_cast<uintptr_t>(nullptr);
1723}
1724
1725void ComputeGenericJniFrameSize::WalkHeader(
1726    BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
1727  // First 2 parameters are always excluded for @CriticalNative.
1728  if (UNLIKELY(critical_native_)) {
1729    return;
1730  }
1731
1732  // JNIEnv
1733  sm->AdvancePointer(nullptr);
1734
1735  // Class object or this as first argument
1736  sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
1737}
1738
1739// Class to push values to three separate regions. Used to fill the native call part. Adheres to
1740// the template requirements of BuildGenericJniFrameStateMachine.
1741class FillNativeCall {
1742 public:
1743  FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
1744      cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
1745
1746  virtual ~FillNativeCall() {}
1747
1748  void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
1749    cur_gpr_reg_ = gpr_regs;
1750    cur_fpr_reg_ = fpr_regs;
1751    cur_stack_arg_ = stack_args;
1752  }
1753
1754  void PushGpr(uintptr_t val) {
1755    *cur_gpr_reg_ = val;
1756    cur_gpr_reg_++;
1757  }
1758
1759  void PushFpr4(float val) {
1760    *cur_fpr_reg_ = val;
1761    cur_fpr_reg_++;
1762  }
1763
1764  void PushFpr8(uint64_t val) {
1765    uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1766    *tmp = val;
1767    cur_fpr_reg_ += 2;
1768  }
1769
1770  void PushStack(uintptr_t val) {
1771    *cur_stack_arg_ = val;
1772    cur_stack_arg_++;
1773  }
1774
1775  virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) {
1776    LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
1777    UNREACHABLE();
1778  }
1779
1780 private:
1781  uintptr_t* cur_gpr_reg_;
1782  uint32_t* cur_fpr_reg_;
1783  uintptr_t* cur_stack_arg_;
1784};
1785
1786// Visits arguments on the stack placing them into a region lower down the stack for the benefit
1787// of transitioning into native code.
1788class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
1789 public:
1790  BuildGenericJniFrameVisitor(Thread* self,
1791                              bool is_static,
1792                              bool critical_native,
1793                              const char* shorty,
1794                              uint32_t shorty_len,
1795                              ArtMethod*** sp)
1796     : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
1797       jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native),
1798       sm_(&jni_call_) {
1799    ComputeGenericJniFrameSize fsc(critical_native);
1800    uintptr_t* start_gpr_reg;
1801    uint32_t* start_fpr_reg;
1802    uintptr_t* start_stack_arg;
1803    bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
1804                                             &handle_scope_,
1805                                             &start_stack_arg,
1806                                             &start_gpr_reg, &start_fpr_reg);
1807
1808    jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
1809
1810    // First 2 parameters are always excluded for CriticalNative methods.
1811    if (LIKELY(!critical_native)) {
1812      // jni environment is always first argument
1813      sm_.AdvancePointer(self->GetJniEnv());
1814
1815      if (is_static) {
1816        sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
1817      }  // else "this" reference is already handled by QuickArgumentVisitor.
1818    }
1819  }
1820
1821  void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
1822
1823  void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
1824
1825  StackReference<mirror::Object>* GetFirstHandleScopeEntry() {
1826    return handle_scope_->GetHandle(0).GetReference();
1827  }
1828
1829  jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
1830    return handle_scope_->GetHandle(0).ToJObject();
1831  }
1832
1833  void* GetBottomOfUsedArea() const {
1834    return bottom_of_used_area_;
1835  }
1836
1837 private:
1838  // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
1839  class FillJniCall FINAL : public FillNativeCall {
1840   public:
1841    FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
1842                HandleScope* handle_scope, bool critical_native)
1843      : FillNativeCall(gpr_regs, fpr_regs, stack_args),
1844                       handle_scope_(handle_scope),
1845        cur_entry_(0),
1846        critical_native_(critical_native) {}
1847
1848    uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
1849
1850    void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
1851      FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
1852      handle_scope_ = scope;
1853      cur_entry_ = 0U;
1854    }
1855
1856    void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) {
1857      // Initialize padding entries.
1858      size_t expected_slots = handle_scope_->NumberOfReferences();
1859      while (cur_entry_ < expected_slots) {
1860        handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
1861      }
1862
1863      if (!critical_native_) {
1864        // Non-critical natives have at least the self class (jclass) or this (jobject).
1865        DCHECK_NE(cur_entry_, 0U);
1866      }
1867    }
1868
1869    bool CriticalNative() const {
1870      return critical_native_;
1871    }
1872
1873   private:
1874    HandleScope* handle_scope_;
1875    size_t cur_entry_;
1876    const bool critical_native_;
1877  };
1878
1879  HandleScope* handle_scope_;
1880  FillJniCall jni_call_;
1881  void* bottom_of_used_area_;
1882
1883  BuildNativeCallFrameStateMachine<FillJniCall> sm_;
1884
1885  DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1886};
1887
1888uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
1889  uintptr_t tmp;
1890  MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_);
1891  h.Assign(ref);
1892  tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
1893  cur_entry_++;
1894  return tmp;
1895}
1896
1897void BuildGenericJniFrameVisitor::Visit() {
1898  Primitive::Type type = GetParamPrimitiveType();
1899  switch (type) {
1900    case Primitive::kPrimLong: {
1901      jlong long_arg;
1902      if (IsSplitLongOrDouble()) {
1903        long_arg = ReadSplitLongParam();
1904      } else {
1905        long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
1906      }
1907      sm_.AdvanceLong(long_arg);
1908      break;
1909    }
1910    case Primitive::kPrimDouble: {
1911      uint64_t double_arg;
1912      if (IsSplitLongOrDouble()) {
1913        // Read into union so that we don't case to a double.
1914        double_arg = ReadSplitLongParam();
1915      } else {
1916        double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
1917      }
1918      sm_.AdvanceDouble(double_arg);
1919      break;
1920    }
1921    case Primitive::kPrimNot: {
1922      StackReference<mirror::Object>* stack_ref =
1923          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1924      sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
1925      break;
1926    }
1927    case Primitive::kPrimFloat:
1928      sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
1929      break;
1930    case Primitive::kPrimBoolean:  // Fall-through.
1931    case Primitive::kPrimByte:     // Fall-through.
1932    case Primitive::kPrimChar:     // Fall-through.
1933    case Primitive::kPrimShort:    // Fall-through.
1934    case Primitive::kPrimInt:      // Fall-through.
1935      sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
1936      break;
1937    case Primitive::kPrimVoid:
1938      LOG(FATAL) << "UNREACHABLE";
1939      UNREACHABLE();
1940  }
1941}
1942
1943void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
1944  // Clear out rest of the scope.
1945  jni_call_.ResetRemainingScopeSlots();
1946  if (!jni_call_.CriticalNative()) {
1947    // Install HandleScope.
1948    self->PushHandleScope(handle_scope_);
1949  }
1950}
1951
1952#if defined(__arm__) || defined(__aarch64__)
1953extern "C" void* artFindNativeMethod();
1954#else
1955extern "C" void* artFindNativeMethod(Thread* self);
1956#endif
1957
1958static uint64_t artQuickGenericJniEndJNIRef(Thread* self,
1959                                            uint32_t cookie,
1960                                            bool fast_native ATTRIBUTE_UNUSED,
1961                                            jobject l,
1962                                            jobject lock) {
1963  // TODO: add entrypoints for @FastNative returning objects.
1964  if (lock != nullptr) {
1965    return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
1966  } else {
1967    return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
1968  }
1969}
1970
1971static void artQuickGenericJniEndJNINonRef(Thread* self,
1972                                           uint32_t cookie,
1973                                           bool fast_native,
1974                                           jobject lock) {
1975  if (lock != nullptr) {
1976    JniMethodEndSynchronized(cookie, lock, self);
1977    // Ignore "fast_native" here because synchronized functions aren't very fast.
1978  } else {
1979    if (UNLIKELY(fast_native)) {
1980      JniMethodFastEnd(cookie, self);
1981    } else {
1982      JniMethodEnd(cookie, self);
1983    }
1984  }
1985}
1986
1987/*
1988 * Initializes an alloca region assumed to be directly below sp for a native call:
1989 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
1990 * The final element on the stack is a pointer to the native code.
1991 *
1992 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
1993 * We need to fix this, as the handle scope needs to go into the callee-save frame.
1994 *
1995 * The return of this function denotes:
1996 * 1) How many bytes of the alloca can be released, if the value is non-negative.
1997 * 2) An error, if the value is negative.
1998 */
1999extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
2000    REQUIRES_SHARED(Locks::mutator_lock_) {
2001  ArtMethod* called = *sp;
2002  DCHECK(called->IsNative()) << PrettyMethod(called, true);
2003  uint32_t shorty_len = 0;
2004  const char* shorty = called->GetShorty(&shorty_len);
2005  bool critical_native = called->IsAnnotatedWithCriticalNative();
2006  bool fast_native = called->IsAnnotatedWithFastNative();
2007  bool normal_native = !critical_native && !fast_native;
2008
2009  // Run the visitor and update sp.
2010  BuildGenericJniFrameVisitor visitor(self,
2011                                      called->IsStatic(),
2012                                      critical_native,
2013                                      shorty,
2014                                      shorty_len,
2015                                      &sp);
2016  {
2017    ScopedAssertNoThreadSuspension sants(__FUNCTION__);
2018    visitor.VisitArguments();
2019    // FinalizeHandleScope pushes the handle scope on the thread.
2020    visitor.FinalizeHandleScope(self);
2021  }
2022
2023  // Fix up managed-stack things in Thread.
2024  self->SetTopOfStack(sp);
2025
2026  self->VerifyStack();
2027
2028  uint32_t cookie;
2029  uint32_t* sp32;
2030  // Skip calling JniMethodStart for @CriticalNative.
2031  if (LIKELY(!critical_native)) {
2032    // Start JNI, save the cookie.
2033    if (called->IsSynchronized()) {
2034      DCHECK(normal_native) << " @FastNative and synchronize is not supported";
2035      cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
2036      if (self->IsExceptionPending()) {
2037        self->PopHandleScope();
2038        // A negative value denotes an error.
2039        return GetTwoWordFailureValue();
2040      }
2041    } else {
2042      if (fast_native) {
2043        cookie = JniMethodFastStart(self);
2044      } else {
2045        DCHECK(normal_native);
2046        cookie = JniMethodStart(self);
2047      }
2048    }
2049    sp32 = reinterpret_cast<uint32_t*>(sp);
2050    *(sp32 - 1) = cookie;
2051  }
2052
2053  // Retrieve the stored native code.
2054  void* nativeCode = called->GetEntryPointFromJni();
2055
2056  // There are two cases for the content of nativeCode:
2057  // 1) Pointer to the native function.
2058  // 2) Pointer to the trampoline for native code binding.
2059  // In the second case, we need to execute the binding and continue with the actual native function
2060  // pointer.
2061  DCHECK(nativeCode != nullptr);
2062  if (nativeCode == GetJniDlsymLookupStub()) {
2063#if defined(__arm__) || defined(__aarch64__)
2064    nativeCode = artFindNativeMethod();
2065#else
2066    nativeCode = artFindNativeMethod(self);
2067#endif
2068
2069    if (nativeCode == nullptr) {
2070      DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
2071
2072      // @CriticalNative calls do not need to call back into JniMethodEnd.
2073      if (LIKELY(!critical_native)) {
2074        // End JNI, as the assembly will move to deliver the exception.
2075        jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
2076        if (shorty[0] == 'L') {
2077          artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock);
2078        } else {
2079          artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock);
2080        }
2081      }
2082
2083      return GetTwoWordFailureValue();
2084    }
2085    // Note that the native code pointer will be automatically set by artFindNativeMethod().
2086  }
2087
2088  // Return native code addr(lo) and bottom of alloca address(hi).
2089  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()),
2090                                reinterpret_cast<uintptr_t>(nativeCode));
2091}
2092
2093// Defined in quick_jni_entrypoints.cc.
2094extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie,
2095                                    jvalue result, uint64_t result_f, ArtMethod* called,
2096                                    HandleScope* handle_scope);
2097/*
2098 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
2099 * unlocking.
2100 */
2101extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
2102                                                    jvalue result,
2103                                                    uint64_t result_f) {
2104  // We're here just back from a native call. We don't have the shared mutator lock at this point
2105  // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing
2106  // anything that requires a mutator lock before that would cause problems as GC may have the
2107  // exclusive mutator lock and may be moving objects, etc.
2108  ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
2109  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
2110  ArtMethod* called = *sp;
2111  uint32_t cookie = *(sp32 - 1);
2112  HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp));
2113  return GenericJniMethodEnd(self, cookie, result, result_f, called, table);
2114}
2115
2116// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
2117// for the method pointer.
2118//
2119// It is valid to use this, as at the usage points here (returns from C functions) we are assuming
2120// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
2121
2122template<InvokeType type, bool access_check>
2123static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
2124                                     ArtMethod** sp) {
2125  ScopedQuickEntrypointChecks sqec(self);
2126  DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
2127  ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2128  ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
2129  if (UNLIKELY(method == nullptr)) {
2130    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
2131    uint32_t shorty_len;
2132    const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
2133    {
2134      // Remember the args in case a GC happens in FindMethodFromCode.
2135      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2136      RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
2137      visitor.VisitArguments();
2138      method = FindMethodFromCode<type, access_check>(method_idx, &this_object, caller_method,
2139                                                      self);
2140      visitor.FixupReferences();
2141    }
2142
2143    if (UNLIKELY(method == nullptr)) {
2144      CHECK(self->IsExceptionPending());
2145      return GetTwoWordFailureValue();  // Failure.
2146    }
2147  }
2148  DCHECK(!self->IsExceptionPending());
2149  const void* code = method->GetEntryPointFromQuickCompiledCode();
2150
2151  // When we return, the caller will branch to this address, so it had better not be 0!
2152  DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method)
2153                          << " location: "
2154                          << method->GetDexFile()->GetLocation();
2155
2156  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2157                                reinterpret_cast<uintptr_t>(method));
2158}
2159
2160// Explicit artInvokeCommon template function declarations to please analysis tool.
2161#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
2162  template REQUIRES_SHARED(Locks::mutator_lock_)                                          \
2163  TwoWordReturn artInvokeCommon<type, access_check>(                                            \
2164      uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2165
2166EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
2167EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
2168EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
2169EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
2170EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
2171EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
2172EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
2173EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
2174EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
2175EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
2176#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
2177
2178// See comments in runtime_support_asm.S
2179extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
2180    uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2181    REQUIRES_SHARED(Locks::mutator_lock_) {
2182  return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
2183}
2184
2185extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
2186    uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2187    REQUIRES_SHARED(Locks::mutator_lock_) {
2188  return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
2189}
2190
2191extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
2192    uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2193    REQUIRES_SHARED(Locks::mutator_lock_) {
2194  return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
2195}
2196
2197extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
2198    uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2199    REQUIRES_SHARED(Locks::mutator_lock_) {
2200  return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
2201}
2202
2203extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
2204    uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
2205    REQUIRES_SHARED(Locks::mutator_lock_) {
2206  return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
2207}
2208
2209// Determine target of interface dispatch. This object is known non-null. First argument
2210// is there for consistency but should not be used, as some architectures overwrite it
2211// in the assembly trampoline.
2212extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED,
2213                                                      mirror::Object* this_object,
2214                                                      Thread* self,
2215                                                      ArtMethod** sp)
2216    REQUIRES_SHARED(Locks::mutator_lock_) {
2217  ScopedQuickEntrypointChecks sqec(self);
2218  StackHandleScope<1> hs(self);
2219  Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
2220
2221  ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
2222
2223  // Fetch the dex_method_idx of the target interface method from the caller.
2224  uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
2225
2226  const DexFile::CodeItem* code_item = caller_method->GetCodeItem();
2227  CHECK_LT(dex_pc, code_item->insns_size_in_code_units_);
2228  const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]);
2229  Instruction::Code instr_code = instr->Opcode();
2230  CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
2231        instr_code == Instruction::INVOKE_INTERFACE_RANGE)
2232      << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr);
2233  uint32_t dex_method_idx;
2234  if (instr_code == Instruction::INVOKE_INTERFACE) {
2235    dex_method_idx = instr->VRegB_35c();
2236  } else {
2237    CHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
2238    dex_method_idx = instr->VRegB_3rc();
2239  }
2240
2241  ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod(
2242      dex_method_idx, kRuntimePointerSize);
2243  DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method);
2244  ArtMethod* method = nullptr;
2245  ImTable* imt = cls->GetImt(kRuntimePointerSize);
2246
2247  if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
2248    // If the dex cache already resolved the interface method, look whether we have
2249    // a match in the ImtConflictTable.
2250    ArtMethod* conflict_method = imt->Get(ImTable::GetImtIndex(interface_method),
2251                                          kRuntimePointerSize);
2252    if (LIKELY(conflict_method->IsRuntimeMethod())) {
2253      ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
2254      DCHECK(current_table != nullptr);
2255      method = current_table->Lookup(interface_method, kRuntimePointerSize);
2256    } else {
2257      // It seems we aren't really a conflict method!
2258      method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
2259    }
2260    if (method != nullptr) {
2261      return GetTwoWordSuccessValue(
2262          reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()),
2263          reinterpret_cast<uintptr_t>(method));
2264    }
2265
2266    // No match, use the IfTable.
2267    method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
2268    if (UNLIKELY(method == nullptr)) {
2269      ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
2270          interface_method, this_object, caller_method);
2271      return GetTwoWordFailureValue();  // Failure.
2272    }
2273  } else {
2274    // The dex cache did not resolve the method, look it up in the dex file
2275    // of the caller,
2276    DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod());
2277    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
2278        ->GetDexFile();
2279    uint32_t shorty_len;
2280    const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx),
2281                                                   &shorty_len);
2282    {
2283      // Remember the args in case a GC happens in FindMethodFromCode.
2284      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
2285      RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
2286      visitor.VisitArguments();
2287      method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, caller_method,
2288                                                     self);
2289      visitor.FixupReferences();
2290    }
2291
2292    if (UNLIKELY(method == nullptr)) {
2293      CHECK(self->IsExceptionPending());
2294      return GetTwoWordFailureValue();  // Failure.
2295    }
2296    interface_method =
2297        caller_method->GetDexCacheResolvedMethod(dex_method_idx, kRuntimePointerSize);
2298    DCHECK(!interface_method->IsRuntimeMethod());
2299  }
2300
2301  // We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
2302  // We create a new table with the new pair { interface_method, method }.
2303  uint32_t imt_index = ImTable::GetImtIndex(interface_method);
2304  ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
2305  if (conflict_method->IsRuntimeMethod()) {
2306    ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
2307        cls.Get(),
2308        conflict_method,
2309        interface_method,
2310        method,
2311        /*force_new_conflict_method*/false);
2312    if (new_conflict_method != conflict_method) {
2313      // Update the IMT if we create a new conflict method. No fence needed here, as the
2314      // data is consistent.
2315      imt->Set(imt_index,
2316               new_conflict_method,
2317               kRuntimePointerSize);
2318    }
2319  }
2320
2321  const void* code = method->GetEntryPointFromQuickCompiledCode();
2322
2323  // When we return, the caller will branch to this address, so it had better not be 0!
2324  DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method)
2325                          << " location: " << method->GetDexFile()->GetLocation();
2326
2327  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
2328                                reinterpret_cast<uintptr_t>(method));
2329}
2330
2331}  // namespace art
2332