quick_trampoline_entrypoints.cc revision 6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "callee_save_frame.h"
18#include "common_throws.h"
19#include "dex_file-inl.h"
20#include "dex_instruction-inl.h"
21#include "entrypoints/entrypoint_utils-inl.h"
22#include "entrypoints/runtime_asm_entrypoints.h"
23#include "gc/accounting/card_table-inl.h"
24#include "instruction_set.h"
25#include "interpreter/interpreter.h"
26#include "mirror/art_method-inl.h"
27#include "mirror/class-inl.h"
28#include "mirror/dex_cache-inl.h"
29#include "mirror/object-inl.h"
30#include "mirror/object_array-inl.h"
31#include "runtime.h"
32#include "scoped_thread_state_change.h"
33
34namespace art {
35
36// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
37class QuickArgumentVisitor {
38  // Number of bytes for each out register in the caller method's frame.
39  static constexpr size_t kBytesStackArgLocation = 4;
40  // Frame size in bytes of a callee-save frame for RefsAndArgs.
41  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
42      GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
43#if defined(__arm__)
44  // The callee save frame is pointed to by SP.
45  // | argN       |  |
46  // | ...        |  |
47  // | arg4       |  |
48  // | arg3 spill |  |  Caller's frame
49  // | arg2 spill |  |
50  // | arg1 spill |  |
51  // | Method*    | ---
52  // | LR         |
53  // | ...        |    4x6 bytes callee saves
54  // | R3         |
55  // | R2         |
56  // | R1         |
57  // | S15        |
58  // | :          |
59  // | S0         |
60  // |            |    4x2 bytes padding
61  // | Method*    |  <- sp
62  static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat;
63  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat;
64  static constexpr size_t kNumQuickGprArgs = 3;
65  static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
66  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
67      arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs);  // Offset of first FPR arg.
68  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
69      arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs);  // Offset of first GPR arg.
70  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
71      arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs);  // Offset of return address.
72  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
73    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
74  }
75#elif defined(__aarch64__)
76  // The callee save frame is pointed to by SP.
77  // | argN       |  |
78  // | ...        |  |
79  // | arg4       |  |
80  // | arg3 spill |  |  Caller's frame
81  // | arg2 spill |  |
82  // | arg1 spill |  |
83  // | Method*    | ---
84  // | LR         |
85  // | X29        |
86  // |  :         |
87  // | X20        |
88  // | X7         |
89  // | :          |
90  // | X1         |
91  // | D7         |
92  // |  :         |
93  // | D0         |
94  // |            |    padding
95  // | Method*    |  <- sp
96  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
97  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
98  static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
99  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
100  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
101      arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs);  // Offset of first FPR arg.
102  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
103      arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs);  // Offset of first GPR arg.
104  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
105      arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs);  // Offset of return address.
106  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
107    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
108  }
109#elif defined(__mips__)
110  // The callee save frame is pointed to by SP.
111  // | argN       |  |
112  // | ...        |  |
113  // | arg4       |  |
114  // | arg3 spill |  |  Caller's frame
115  // | arg2 spill |  |
116  // | arg1 spill |  |
117  // | Method*    | ---
118  // | RA         |
119  // | ...        |    callee saves
120  // | A3         |    arg3
121  // | A2         |    arg2
122  // | A1         |    arg1
123  // | A0/Method* |  <- sp
124  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
125  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
126  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
127  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
128  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
129  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
130  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60;  // Offset of return address.
131  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
132    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
133  }
134#elif defined(__i386__)
135  // The callee save frame is pointed to by SP.
136  // | argN        |  |
137  // | ...         |  |
138  // | arg4        |  |
139  // | arg3 spill  |  |  Caller's frame
140  // | arg2 spill  |  |
141  // | arg1 spill  |  |
142  // | Method*     | ---
143  // | Return      |
144  // | EBP,ESI,EDI |    callee saves
145  // | EBX         |    arg3
146  // | EDX         |    arg2
147  // | ECX         |    arg1
148  // | EAX/Method* |  <- sp
149  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
150  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
151  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
152  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
153  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
154  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
155  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28;  // Offset of return address.
156  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
157    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
158  }
159#elif defined(__x86_64__)
160  // The callee save frame is pointed to by SP.
161  // | argN            |  |
162  // | ...             |  |
163  // | reg. arg spills |  |  Caller's frame
164  // | Method*         | ---
165  // | Return          |
166  // | R15             |    callee save
167  // | R14             |    callee save
168  // | R13             |    callee save
169  // | R12             |    callee save
170  // | R9              |    arg5
171  // | R8              |    arg4
172  // | RSI/R6          |    arg1
173  // | RBP/R5          |    callee save
174  // | RBX/R3          |    callee save
175  // | RDX/R2          |    arg2
176  // | RCX/R1          |    arg3
177  // | XMM7            |    float arg 8
178  // | XMM6            |    float arg 7
179  // | XMM5            |    float arg 6
180  // | XMM4            |    float arg 5
181  // | XMM3            |    float arg 4
182  // | XMM2            |    float arg 3
183  // | XMM1            |    float arg 2
184  // | XMM0            |    float arg 1
185  // | Padding         |
186  // | RDI/Method*     |  <- sp
187  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
188  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
189  static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
190  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
191  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
192  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8;  // Offset of first GPR arg.
193  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8;  // Offset of return address.
194  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
195    switch (gpr_index) {
196      case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
197      case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
198      case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
199      case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
200      case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
201      default:
202      LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
203      return 0;
204    }
205  }
206#else
207#error "Unsupported architecture"
208#endif
209
210 public:
211  static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
212      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
213    DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
214    uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
215    return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
216  }
217
218  // For the given quick ref and args quick frame, return the caller's PC.
219  static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
220      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
221    DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
222    uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
223    return *reinterpret_cast<uintptr_t*>(lr);
224  }
225
226  QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty,
227                       uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
228          is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
229          gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
230          fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
231          stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
232              + sizeof(StackReference<mirror::ArtMethod>)),  // Skip StackReference<ArtMethod>.
233          gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
234          cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
235    COMPILE_ASSERT(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), knum_of_quick_fpr_arg_unexpected);
236    COMPILE_ASSERT(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
237        kdouble_align_unexpected);
238    // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
239    // next register is even.
240    COMPILE_ASSERT(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
241        knum_quick_fpr_args_not_even);
242  }
243
244  virtual ~QuickArgumentVisitor() {}
245
246  virtual void Visit() = 0;
247
248  Primitive::Type GetParamPrimitiveType() const {
249    return cur_type_;
250  }
251
252  uint8_t* GetParamAddress() const {
253    if (!kQuickSoftFloatAbi) {
254      Primitive::Type type = GetParamPrimitiveType();
255      if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
256        if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
257          if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
258            return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
259          }
260        } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
261          return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
262        }
263        return stack_args_ + (stack_index_ * kBytesStackArgLocation);
264      }
265    }
266    if (gpr_index_ < kNumQuickGprArgs) {
267      return gpr_args_ + GprIndexToGprOffset(gpr_index_);
268    }
269    return stack_args_ + (stack_index_ * kBytesStackArgLocation);
270  }
271
272  bool IsSplitLongOrDouble() const {
273    if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
274      return is_split_long_or_double_;
275    } else {
276      return false;  // An optimization for when GPR and FPRs are 64bit.
277    }
278  }
279
280  bool IsParamAReference() const {
281    return GetParamPrimitiveType() == Primitive::kPrimNot;
282  }
283
284  bool IsParamALongOrDouble() const {
285    Primitive::Type type = GetParamPrimitiveType();
286    return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
287  }
288
289  uint64_t ReadSplitLongParam() const {
290    DCHECK(IsSplitLongOrDouble());
291    // Read low half from register.
292    uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
293    // Read high half from the stack. As current stack_index_ indexes the argument, the high part
294    // index should be (stack_index_ + 1).
295    uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_
296        + (stack_index_ + 1) * kBytesStackArgLocation);
297    return (low_half & 0xffffffffULL) | (high_half << 32);
298  }
299
300  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
301    // (a) 'stack_args_' should point to the first method's argument
302    // (b) whatever the argument type it is, the 'stack_index_' should
303    //     be moved forward along with every visiting.
304    gpr_index_ = 0;
305    fpr_index_ = 0;
306    if (kQuickDoubleRegAlignedFloatBackFilled) {
307      fpr_double_index_ = 0;
308    }
309    stack_index_ = 0;
310    if (!is_static_) {  // Handle this.
311      cur_type_ = Primitive::kPrimNot;
312      is_split_long_or_double_ = false;
313      Visit();
314      stack_index_++;
315      if (kNumQuickGprArgs > 0) {
316        gpr_index_++;
317      }
318    }
319    for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
320      cur_type_ = Primitive::GetType(shorty_[shorty_index]);
321      switch (cur_type_) {
322        case Primitive::kPrimNot:
323        case Primitive::kPrimBoolean:
324        case Primitive::kPrimByte:
325        case Primitive::kPrimChar:
326        case Primitive::kPrimShort:
327        case Primitive::kPrimInt:
328          is_split_long_or_double_ = false;
329          Visit();
330          stack_index_++;
331          if (gpr_index_ < kNumQuickGprArgs) {
332            gpr_index_++;
333          }
334          break;
335        case Primitive::kPrimFloat:
336          is_split_long_or_double_ = false;
337          Visit();
338          stack_index_++;
339          if (kQuickSoftFloatAbi) {
340            if (gpr_index_ < kNumQuickGprArgs) {
341              gpr_index_++;
342            }
343          } else {
344            if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
345              fpr_index_++;
346              if (kQuickDoubleRegAlignedFloatBackFilled) {
347                // Double should not overlap with float.
348                // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
349                fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
350                // Float should not overlap with double.
351                if (fpr_index_ % 2 == 0) {
352                  fpr_index_ = std::max(fpr_double_index_, fpr_index_);
353                }
354              }
355            }
356          }
357          break;
358        case Primitive::kPrimDouble:
359        case Primitive::kPrimLong:
360          if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
361            is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
362                ((gpr_index_ + 1) == kNumQuickGprArgs);
363            Visit();
364            if (kBytesStackArgLocation == 4) {
365              stack_index_+= 2;
366            } else {
367              CHECK_EQ(kBytesStackArgLocation, 8U);
368              stack_index_++;
369            }
370            if (gpr_index_ < kNumQuickGprArgs) {
371              gpr_index_++;
372              if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
373                if (gpr_index_ < kNumQuickGprArgs) {
374                  gpr_index_++;
375                }
376              }
377            }
378          } else {
379            is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
380                ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
381            Visit();
382            if (kBytesStackArgLocation == 4) {
383              stack_index_+= 2;
384            } else {
385              CHECK_EQ(kBytesStackArgLocation, 8U);
386              stack_index_++;
387            }
388            if (kQuickDoubleRegAlignedFloatBackFilled) {
389              if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
390                fpr_double_index_ += 2;
391                // Float should not overlap with double.
392                if (fpr_index_ % 2 == 0) {
393                  fpr_index_ = std::max(fpr_double_index_, fpr_index_);
394                }
395              }
396            } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
397              fpr_index_++;
398              if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
399                if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
400                  fpr_index_++;
401                }
402              }
403            }
404          }
405          break;
406        default:
407          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
408      }
409    }
410  }
411
412 protected:
413  const bool is_static_;
414  const char* const shorty_;
415  const uint32_t shorty_len_;
416
417 private:
418  uint8_t* const gpr_args_;  // Address of GPR arguments in callee save frame.
419  uint8_t* const fpr_args_;  // Address of FPR arguments in callee save frame.
420  uint8_t* const stack_args_;  // Address of stack arguments in caller's frame.
421  uint32_t gpr_index_;  // Index into spilled GPRs.
422  // Index into spilled FPRs.
423  // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
424  // holds a higher register number.
425  uint32_t fpr_index_;
426  // Index into spilled FPRs for aligned double.
427  // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
428  // terms of singles, may be behind fpr_index.
429  uint32_t fpr_double_index_;
430  uint32_t stack_index_;  // Index into arguments on the stack.
431  // The current type of argument during VisitArguments.
432  Primitive::Type cur_type_;
433  // Does a 64bit parameter straddle the register and stack arguments?
434  bool is_split_long_or_double_;
435};
436
437// Visits arguments on the stack placing them into the shadow frame.
438class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
439 public:
440  BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
441                               const char* shorty, uint32_t shorty_len, ShadowFrame* sf,
442                               size_t first_arg_reg) :
443      QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
444
445  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
446
447 private:
448  ShadowFrame* const sf_;
449  uint32_t cur_reg_;
450
451  DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
452};
453
454void BuildQuickShadowFrameVisitor::Visit() {
455  Primitive::Type type = GetParamPrimitiveType();
456  switch (type) {
457    case Primitive::kPrimLong:  // Fall-through.
458    case Primitive::kPrimDouble:
459      if (IsSplitLongOrDouble()) {
460        sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
461      } else {
462        sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
463      }
464      ++cur_reg_;
465      break;
466    case Primitive::kPrimNot: {
467        StackReference<mirror::Object>* stack_ref =
468            reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
469        sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
470      }
471      break;
472    case Primitive::kPrimBoolean:  // Fall-through.
473    case Primitive::kPrimByte:     // Fall-through.
474    case Primitive::kPrimChar:     // Fall-through.
475    case Primitive::kPrimShort:    // Fall-through.
476    case Primitive::kPrimInt:      // Fall-through.
477    case Primitive::kPrimFloat:
478      sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
479      break;
480    case Primitive::kPrimVoid:
481      LOG(FATAL) << "UNREACHABLE";
482      UNREACHABLE();
483  }
484  ++cur_reg_;
485}
486
487extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
488                                                StackReference<mirror::ArtMethod>* sp)
489    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
490  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
491  // frame.
492  ScopedQuickEntrypointChecks sqec(self);
493
494  if (method->IsAbstract()) {
495    ThrowAbstractMethodError(method);
496    return 0;
497  } else {
498    DCHECK(!method->IsNative()) << PrettyMethod(method);
499    const char* old_cause = self->StartAssertNoThreadSuspension(
500        "Building interpreter shadow frame");
501    const DexFile::CodeItem* code_item = method->GetCodeItem();
502    DCHECK(code_item != nullptr) << PrettyMethod(method);
503    uint16_t num_regs = code_item->registers_size_;
504    void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
505    // No last shadow coming from quick.
506    ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory));
507    size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
508    uint32_t shorty_len = 0;
509    const char* shorty = method->GetShorty(&shorty_len);
510    BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
511                                                      shadow_frame, first_arg_reg);
512    shadow_frame_builder.VisitArguments();
513    // Push a transition back into managed code onto the linked list in thread.
514    ManagedStack fragment;
515    self->PushManagedStackFragment(&fragment);
516    self->PushShadowFrame(shadow_frame);
517    self->EndAssertNoThreadSuspension(old_cause);
518
519    StackHandleScope<1> hs(self);
520    MethodHelper mh(hs.NewHandle(method));
521    if (mh.Get()->IsStatic() && !mh.Get()->GetDeclaringClass()->IsInitialized()) {
522      // Ensure static method's class is initialized.
523      StackHandleScope<1> hs(self);
524      Handle<mirror::Class> h_class(hs.NewHandle(mh.Get()->GetDeclaringClass()));
525      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
526        DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(mh.Get());
527        self->PopManagedStackFragment(fragment);
528        return 0;
529      }
530    }
531    JValue result = interpreter::EnterInterpreterFromEntryPoint(self, &mh, code_item, shadow_frame);
532    // Pop transition.
533    self->PopManagedStackFragment(fragment);
534    // No need to restore the args since the method has already been run by the interpreter.
535    return result.GetJ();
536  }
537}
538
539// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
540// to jobjects.
541class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
542 public:
543  BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
544                            const char* shorty, uint32_t shorty_len,
545                            ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
546      QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
547
548  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
549
550  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
551
552 private:
553  ScopedObjectAccessUnchecked* const soa_;
554  std::vector<jvalue>* const args_;
555  // References which we must update when exiting in case the GC moved the objects.
556  std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
557
558  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
559};
560
561void BuildQuickArgumentVisitor::Visit() {
562  jvalue val;
563  Primitive::Type type = GetParamPrimitiveType();
564  switch (type) {
565    case Primitive::kPrimNot: {
566      StackReference<mirror::Object>* stack_ref =
567          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
568      val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
569      references_.push_back(std::make_pair(val.l, stack_ref));
570      break;
571    }
572    case Primitive::kPrimLong:  // Fall-through.
573    case Primitive::kPrimDouble:
574      if (IsSplitLongOrDouble()) {
575        val.j = ReadSplitLongParam();
576      } else {
577        val.j = *reinterpret_cast<jlong*>(GetParamAddress());
578      }
579      break;
580    case Primitive::kPrimBoolean:  // Fall-through.
581    case Primitive::kPrimByte:     // Fall-through.
582    case Primitive::kPrimChar:     // Fall-through.
583    case Primitive::kPrimShort:    // Fall-through.
584    case Primitive::kPrimInt:      // Fall-through.
585    case Primitive::kPrimFloat:
586      val.i = *reinterpret_cast<jint*>(GetParamAddress());
587      break;
588    case Primitive::kPrimVoid:
589      LOG(FATAL) << "UNREACHABLE";
590      UNREACHABLE();
591  }
592  args_->push_back(val);
593}
594
595void BuildQuickArgumentVisitor::FixupReferences() {
596  // Fixup any references which may have changed.
597  for (const auto& pair : references_) {
598    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
599    soa_->Env()->DeleteLocalRef(pair.first);
600  }
601}
602
603// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
604// which is responsible for recording callee save registers. We explicitly place into jobjects the
605// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
606// field within the proxy object, which will box the primitive arguments and deal with error cases.
607extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
608                                               mirror::Object* receiver,
609                                               Thread* self, StackReference<mirror::ArtMethod>* sp)
610    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
611  DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
612  DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
613  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
614  const char* old_cause =
615      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
616  // Register the top of the managed stack, making stack crawlable.
617  DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
618  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
619            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
620      << PrettyMethod(proxy_method);
621  self->VerifyStack();
622  // Start new JNI local reference state.
623  JNIEnvExt* env = self->GetJniEnv();
624  ScopedObjectAccessUnchecked soa(env);
625  ScopedJniEnvLocalRefState env_state(env);
626  // Create local ref. copies of proxy method and the receiver.
627  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
628
629  // Placing arguments into args vector and remove the receiver.
630  mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy();
631  CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
632                                       << PrettyMethod(non_proxy_method);
633  std::vector<jvalue> args;
634  uint32_t shorty_len = 0;
635  const char* shorty = proxy_method->GetShorty(&shorty_len);
636  BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args);
637
638  local_ref_visitor.VisitArguments();
639  DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method);
640  args.erase(args.begin());
641
642  // Convert proxy method into expected interface method.
643  mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
644  DCHECK(interface_method != NULL) << PrettyMethod(proxy_method);
645  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
646  jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
647
648  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
649  // that performs allocations.
650  self->EndAssertNoThreadSuspension(old_cause);
651  JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
652  // Restore references which might have moved.
653  local_ref_visitor.FixupReferences();
654  return result.GetJ();
655}
656
657// Read object references held in arguments from quick frames and place in a JNI local references,
658// so they don't get garbage collected.
659class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
660 public:
661  RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
662                               const char* shorty, uint32_t shorty_len,
663                               ScopedObjectAccessUnchecked* soa) :
664      QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
665
666  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
667
668  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
669
670 private:
671  ScopedObjectAccessUnchecked* const soa_;
672  // References which we must update when exiting in case the GC moved the objects.
673  std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
674
675  DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
676};
677
678void RememberForGcArgumentVisitor::Visit() {
679  if (IsParamAReference()) {
680    StackReference<mirror::Object>* stack_ref =
681        reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
682    jobject reference =
683        soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
684    references_.push_back(std::make_pair(reference, stack_ref));
685  }
686}
687
688void RememberForGcArgumentVisitor::FixupReferences() {
689  // Fixup any references which may have changed.
690  for (const auto& pair : references_) {
691    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
692    soa_->Env()->DeleteLocalRef(pair.first);
693  }
694}
695
696// Lazily resolve a method for quick. Called by stub code.
697extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
698                                                    mirror::Object* receiver,
699                                                    Thread* self,
700                                                    StackReference<mirror::ArtMethod>* sp)
701    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
702  ScopedQuickEntrypointChecks sqec(self);
703  // Start new JNI local reference state
704  JNIEnvExt* env = self->GetJniEnv();
705  ScopedObjectAccessUnchecked soa(env);
706  ScopedJniEnvLocalRefState env_state(env);
707  const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
708
709  // Compute details about the called method (avoid GCs)
710  ClassLinker* linker = Runtime::Current()->GetClassLinker();
711  mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
712  InvokeType invoke_type;
713  const DexFile* dex_file;
714  uint32_t dex_method_idx;
715  if (called->IsRuntimeMethod()) {
716    uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
717    const DexFile::CodeItem* code;
718    dex_file = caller->GetDexFile();
719    code = caller->GetCodeItem();
720    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
721    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
722    Instruction::Code instr_code = instr->Opcode();
723    bool is_range;
724    switch (instr_code) {
725      case Instruction::INVOKE_DIRECT:
726        invoke_type = kDirect;
727        is_range = false;
728        break;
729      case Instruction::INVOKE_DIRECT_RANGE:
730        invoke_type = kDirect;
731        is_range = true;
732        break;
733      case Instruction::INVOKE_STATIC:
734        invoke_type = kStatic;
735        is_range = false;
736        break;
737      case Instruction::INVOKE_STATIC_RANGE:
738        invoke_type = kStatic;
739        is_range = true;
740        break;
741      case Instruction::INVOKE_SUPER:
742        invoke_type = kSuper;
743        is_range = false;
744        break;
745      case Instruction::INVOKE_SUPER_RANGE:
746        invoke_type = kSuper;
747        is_range = true;
748        break;
749      case Instruction::INVOKE_VIRTUAL:
750        invoke_type = kVirtual;
751        is_range = false;
752        break;
753      case Instruction::INVOKE_VIRTUAL_RANGE:
754        invoke_type = kVirtual;
755        is_range = true;
756        break;
757      case Instruction::INVOKE_INTERFACE:
758        invoke_type = kInterface;
759        is_range = false;
760        break;
761      case Instruction::INVOKE_INTERFACE_RANGE:
762        invoke_type = kInterface;
763        is_range = true;
764        break;
765      default:
766        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
767        // Avoid used uninitialized warnings.
768        invoke_type = kDirect;
769        is_range = false;
770    }
771    dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
772  } else {
773    invoke_type = kStatic;
774    dex_file = called->GetDexFile();
775    dex_method_idx = called->GetDexMethodIndex();
776  }
777  uint32_t shorty_len;
778  const char* shorty =
779      dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
780  RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
781  visitor.VisitArguments();
782  self->EndAssertNoThreadSuspension(old_cause);
783  bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
784  // Resolve method filling in dex cache.
785  if (UNLIKELY(called->IsRuntimeMethod())) {
786    StackHandleScope<1> hs(self);
787    mirror::Object* dummy = nullptr;
788    HandleWrapper<mirror::Object> h_receiver(
789        hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
790    called = linker->ResolveMethod(self, dex_method_idx, &caller, invoke_type);
791  }
792  const void* code = NULL;
793  if (LIKELY(!self->IsExceptionPending())) {
794    // Incompatible class change should have been handled in resolve method.
795    CHECK(!called->CheckIncompatibleClassChange(invoke_type))
796        << PrettyMethod(called) << " " << invoke_type;
797    if (virtual_or_interface) {
798      // Refine called method based on receiver.
799      CHECK(receiver != nullptr) << invoke_type;
800
801      mirror::ArtMethod* orig_called = called;
802      if (invoke_type == kVirtual) {
803        called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
804      } else {
805        called = receiver->GetClass()->FindVirtualMethodForInterface(called);
806      }
807
808      CHECK(called != nullptr) << PrettyMethod(orig_called) << " "
809                               << PrettyTypeOf(receiver) << " "
810                               << invoke_type << " " << orig_called->GetVtableIndex();
811
812      // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
813      // of the sharpened method.
814      if (called->HasSameDexCacheResolvedMethods(caller)) {
815        caller->SetDexCacheResolvedMethod(called->GetDexMethodIndex(), called);
816      } else {
817        // Calling from one dex file to another, need to compute the method index appropriate to
818        // the caller's dex file. Since we get here only if the original called was a runtime
819        // method, we've got the correct dex_file and a dex_method_idx from above.
820        DCHECK_EQ(caller->GetDexFile(), dex_file);
821        StackHandleScope<1> hs(self);
822        MethodHelper mh(hs.NewHandle(called));
823        uint32_t method_index = mh.FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx);
824        if (method_index != DexFile::kDexNoIndex) {
825          caller->SetDexCacheResolvedMethod(method_index, called);
826        }
827      }
828    }
829    // Ensure that the called method's class is initialized.
830    StackHandleScope<1> hs(soa.Self());
831    Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
832    linker->EnsureInitialized(soa.Self(), called_class, true, true);
833    if (LIKELY(called_class->IsInitialized())) {
834      code = called->GetEntryPointFromQuickCompiledCode();
835    } else if (called_class->IsInitializing()) {
836      if (invoke_type == kStatic) {
837        // Class is still initializing, go to oat and grab code (trampoline must be left in place
838        // until class is initialized to stop races between threads).
839        code = linker->GetQuickOatCodeFor(called);
840      } else {
841        // No trampoline for non-static methods.
842        code = called->GetEntryPointFromQuickCompiledCode();
843      }
844    } else {
845      DCHECK(called_class->IsErroneous());
846    }
847  }
848  CHECK_EQ(code == NULL, self->IsExceptionPending());
849  // Fixup any locally saved objects may have moved during a GC.
850  visitor.FixupReferences();
851  // Place called method in callee-save frame to be placed as first argument to quick method.
852  sp->Assign(called);
853  return code;
854}
855
856/*
857 * This class uses a couple of observations to unite the different calling conventions through
858 * a few constants.
859 *
860 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
861 *    possible alignment.
862 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
863 *    types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
864 *    when we have to split things
865 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
866 *    and we can use Int handling directly.
867 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
868 *    necessary when widening. Also, widening of Ints will take place implicitly, and the
869 *    extension should be compatible with Aarch64, which mandates copying the available bits
870 *    into LSB and leaving the rest unspecified.
871 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
872 *    the stack.
873 * 6) There is only little endian.
874 *
875 *
876 * Actual work is supposed to be done in a delegate of the template type. The interface is as
877 * follows:
878 *
879 * void PushGpr(uintptr_t):   Add a value for the next GPR
880 *
881 * void PushFpr4(float):      Add a value for the next FPR of size 32b. Is only called if we need
882 *                            padding, that is, think the architecture is 32b and aligns 64b.
883 *
884 * void PushFpr8(uint64_t):   Push a double. We _will_ call this on 32b, it's the callee's job to
885 *                            split this if necessary. The current state will have aligned, if
886 *                            necessary.
887 *
888 * void PushStack(uintptr_t): Push a value to the stack.
889 *
890 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
891 *                                          as this might be important for null initialization.
892 *                                          Must return the jobject, that is, the reference to the
893 *                                          entry in the HandleScope (nullptr if necessary).
894 *
895 */
896template<class T> class BuildNativeCallFrameStateMachine {
897 public:
898#if defined(__arm__)
899  // TODO: These are all dummy values!
900  static constexpr bool kNativeSoftFloatAbi = true;
901  static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs, r0-r3
902  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
903
904  static constexpr size_t kRegistersNeededForLong = 2;
905  static constexpr size_t kRegistersNeededForDouble = 2;
906  static constexpr bool kMultiRegistersAligned = true;
907  static constexpr bool kMultiRegistersWidened = false;
908  static constexpr bool kAlignLongOnStack = true;
909  static constexpr bool kAlignDoubleOnStack = true;
910#elif defined(__aarch64__)
911  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
912  static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
913  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
914
915  static constexpr size_t kRegistersNeededForLong = 1;
916  static constexpr size_t kRegistersNeededForDouble = 1;
917  static constexpr bool kMultiRegistersAligned = false;
918  static constexpr bool kMultiRegistersWidened = false;
919  static constexpr bool kAlignLongOnStack = false;
920  static constexpr bool kAlignDoubleOnStack = false;
921#elif defined(__mips__)
922  // TODO: These are all dummy values!
923  static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
924  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
925  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
926
927  static constexpr size_t kRegistersNeededForLong = 2;
928  static constexpr size_t kRegistersNeededForDouble = 2;
929  static constexpr bool kMultiRegistersAligned = true;
930  static constexpr bool kMultiRegistersWidened = true;
931  static constexpr bool kAlignLongOnStack = false;
932  static constexpr bool kAlignDoubleOnStack = false;
933#elif defined(__i386__)
934  // TODO: Check these!
935  static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
936  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
937  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
938
939  static constexpr size_t kRegistersNeededForLong = 2;
940  static constexpr size_t kRegistersNeededForDouble = 2;
941  static constexpr bool kMultiRegistersAligned = false;  // x86 not using regs, anyways
942  static constexpr bool kMultiRegistersWidened = false;
943  static constexpr bool kAlignLongOnStack = false;
944  static constexpr bool kAlignDoubleOnStack = false;
945#elif defined(__x86_64__)
946  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
947  static constexpr size_t kNumNativeGprArgs = 6;  // 6 arguments passed in GPRs.
948  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
949
950  static constexpr size_t kRegistersNeededForLong = 1;
951  static constexpr size_t kRegistersNeededForDouble = 1;
952  static constexpr bool kMultiRegistersAligned = false;
953  static constexpr bool kMultiRegistersWidened = false;
954  static constexpr bool kAlignLongOnStack = false;
955  static constexpr bool kAlignDoubleOnStack = false;
956#else
957#error "Unsupported architecture"
958#endif
959
960 public:
961  explicit BuildNativeCallFrameStateMachine(T* delegate)
962      : gpr_index_(kNumNativeGprArgs),
963        fpr_index_(kNumNativeFprArgs),
964        stack_entries_(0),
965        delegate_(delegate) {
966    // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
967    // the next register is even; counting down is just to make the compiler happy...
968    COMPILE_ASSERT(kNumNativeGprArgs % 2 == 0U, knum_native_gpr_args_not_even);
969    COMPILE_ASSERT(kNumNativeFprArgs % 2 == 0U, knum_native_fpr_args_not_even);
970  }
971
972  virtual ~BuildNativeCallFrameStateMachine() {}
973
974  bool HavePointerGpr() const {
975    return gpr_index_ > 0;
976  }
977
978  void AdvancePointer(const void* val) {
979    if (HavePointerGpr()) {
980      gpr_index_--;
981      PushGpr(reinterpret_cast<uintptr_t>(val));
982    } else {
983      stack_entries_++;  // TODO: have a field for pointer length as multiple of 32b
984      PushStack(reinterpret_cast<uintptr_t>(val));
985      gpr_index_ = 0;
986    }
987  }
988
989  bool HaveHandleScopeGpr() const {
990    return gpr_index_ > 0;
991  }
992
993  void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
994    uintptr_t handle = PushHandle(ptr);
995    if (HaveHandleScopeGpr()) {
996      gpr_index_--;
997      PushGpr(handle);
998    } else {
999      stack_entries_++;
1000      PushStack(handle);
1001      gpr_index_ = 0;
1002    }
1003  }
1004
1005  bool HaveIntGpr() const {
1006    return gpr_index_ > 0;
1007  }
1008
1009  void AdvanceInt(uint32_t val) {
1010    if (HaveIntGpr()) {
1011      gpr_index_--;
1012      PushGpr(val);
1013    } else {
1014      stack_entries_++;
1015      PushStack(val);
1016      gpr_index_ = 0;
1017    }
1018  }
1019
1020  bool HaveLongGpr() const {
1021    return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
1022  }
1023
1024  bool LongGprNeedsPadding() const {
1025    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1026        kAlignLongOnStack &&                  // and when it needs alignment
1027        (gpr_index_ & 1) == 1;                // counter is odd, see constructor
1028  }
1029
1030  bool LongStackNeedsPadding() const {
1031    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1032        kAlignLongOnStack &&                  // and when it needs 8B alignment
1033        (stack_entries_ & 1) == 1;            // counter is odd
1034  }
1035
1036  void AdvanceLong(uint64_t val) {
1037    if (HaveLongGpr()) {
1038      if (LongGprNeedsPadding()) {
1039        PushGpr(0);
1040        gpr_index_--;
1041      }
1042      if (kRegistersNeededForLong == 1) {
1043        PushGpr(static_cast<uintptr_t>(val));
1044      } else {
1045        PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1046        PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1047      }
1048      gpr_index_ -= kRegistersNeededForLong;
1049    } else {
1050      if (LongStackNeedsPadding()) {
1051        PushStack(0);
1052        stack_entries_++;
1053      }
1054      if (kRegistersNeededForLong == 1) {
1055        PushStack(static_cast<uintptr_t>(val));
1056        stack_entries_++;
1057      } else {
1058        PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1059        PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1060        stack_entries_ += 2;
1061      }
1062      gpr_index_ = 0;
1063    }
1064  }
1065
1066  bool HaveFloatFpr() const {
1067    return fpr_index_ > 0;
1068  }
1069
1070  void AdvanceFloat(float val) {
1071    if (kNativeSoftFloatAbi) {
1072      AdvanceInt(bit_cast<float, uint32_t>(val));
1073    } else {
1074      if (HaveFloatFpr()) {
1075        fpr_index_--;
1076        if (kRegistersNeededForDouble == 1) {
1077          if (kMultiRegistersWidened) {
1078            PushFpr8(bit_cast<double, uint64_t>(val));
1079          } else {
1080            // No widening, just use the bits.
1081            PushFpr8(bit_cast<float, uint64_t>(val));
1082          }
1083        } else {
1084          PushFpr4(val);
1085        }
1086      } else {
1087        stack_entries_++;
1088        if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) {
1089          // Need to widen before storing: Note the "double" in the template instantiation.
1090          // Note: We need to jump through those hoops to make the compiler happy.
1091          DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
1092          PushStack(static_cast<uintptr_t>(bit_cast<double, uint64_t>(val)));
1093        } else {
1094          PushStack(bit_cast<float, uintptr_t>(val));
1095        }
1096        fpr_index_ = 0;
1097      }
1098    }
1099  }
1100
1101  bool HaveDoubleFpr() const {
1102    return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1103  }
1104
1105  bool DoubleFprNeedsPadding() const {
1106    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1107        kAlignDoubleOnStack &&                  // and when it needs alignment
1108        (fpr_index_ & 1) == 1;                  // counter is odd, see constructor
1109  }
1110
1111  bool DoubleStackNeedsPadding() const {
1112    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1113        kAlignDoubleOnStack &&                  // and when it needs 8B alignment
1114        (stack_entries_ & 1) == 1;              // counter is odd
1115  }
1116
1117  void AdvanceDouble(uint64_t val) {
1118    if (kNativeSoftFloatAbi) {
1119      AdvanceLong(val);
1120    } else {
1121      if (HaveDoubleFpr()) {
1122        if (DoubleFprNeedsPadding()) {
1123          PushFpr4(0);
1124          fpr_index_--;
1125        }
1126        PushFpr8(val);
1127        fpr_index_ -= kRegistersNeededForDouble;
1128      } else {
1129        if (DoubleStackNeedsPadding()) {
1130          PushStack(0);
1131          stack_entries_++;
1132        }
1133        if (kRegistersNeededForDouble == 1) {
1134          PushStack(static_cast<uintptr_t>(val));
1135          stack_entries_++;
1136        } else {
1137          PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1138          PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1139          stack_entries_ += 2;
1140        }
1141        fpr_index_ = 0;
1142      }
1143    }
1144  }
1145
1146  uint32_t GetStackEntries() const {
1147    return stack_entries_;
1148  }
1149
1150  uint32_t GetNumberOfUsedGprs() const {
1151    return kNumNativeGprArgs - gpr_index_;
1152  }
1153
1154  uint32_t GetNumberOfUsedFprs() const {
1155    return kNumNativeFprArgs - fpr_index_;
1156  }
1157
1158 private:
1159  void PushGpr(uintptr_t val) {
1160    delegate_->PushGpr(val);
1161  }
1162  void PushFpr4(float val) {
1163    delegate_->PushFpr4(val);
1164  }
1165  void PushFpr8(uint64_t val) {
1166    delegate_->PushFpr8(val);
1167  }
1168  void PushStack(uintptr_t val) {
1169    delegate_->PushStack(val);
1170  }
1171  uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1172    return delegate_->PushHandle(ref);
1173  }
1174
1175  uint32_t gpr_index_;      // Number of free GPRs
1176  uint32_t fpr_index_;      // Number of free FPRs
1177  uint32_t stack_entries_;  // Stack entries are in multiples of 32b, as floats are usually not
1178                            // extended
1179  T* const delegate_;             // What Push implementation gets called
1180};
1181
1182// Computes the sizes of register stacks and call stack area. Handling of references can be extended
1183// in subclasses.
1184//
1185// To handle native pointers, use "L" in the shorty for an object reference, which simulates
1186// them with handles.
1187class ComputeNativeCallFrameSize {
1188 public:
1189  ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
1190
1191  virtual ~ComputeNativeCallFrameSize() {}
1192
1193  uint32_t GetStackSize() const {
1194    return num_stack_entries_ * sizeof(uintptr_t);
1195  }
1196
1197  uint8_t* LayoutCallStack(uint8_t* sp8) const {
1198    sp8 -= GetStackSize();
1199    // Align by kStackAlignment.
1200    sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1201    return sp8;
1202  }
1203
1204  uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr)
1205      const {
1206    // Assumption is OK right now, as we have soft-float arm
1207    size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
1208    sp8 -= fregs * sizeof(uintptr_t);
1209    *start_fpr = reinterpret_cast<uint32_t*>(sp8);
1210    size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
1211    sp8 -= iregs * sizeof(uintptr_t);
1212    *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
1213    return sp8;
1214  }
1215
1216  uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr,
1217                            uint32_t** start_fpr) const {
1218    // Native call stack.
1219    sp8 = LayoutCallStack(sp8);
1220    *start_stack = reinterpret_cast<uintptr_t*>(sp8);
1221
1222    // Put fprs and gprs below.
1223    sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr);
1224
1225    // Return the new bottom.
1226    return sp8;
1227  }
1228
1229  virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
1230      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1231    UNUSED(sm);
1232  }
1233
1234  void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1235    BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
1236
1237    WalkHeader(&sm);
1238
1239    for (uint32_t i = 1; i < shorty_len; ++i) {
1240      Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
1241      switch (cur_type_) {
1242        case Primitive::kPrimNot:
1243          // TODO: fix abuse of mirror types.
1244          sm.AdvanceHandleScope(
1245              reinterpret_cast<mirror::Object*>(0x12345678));
1246          break;
1247
1248        case Primitive::kPrimBoolean:
1249        case Primitive::kPrimByte:
1250        case Primitive::kPrimChar:
1251        case Primitive::kPrimShort:
1252        case Primitive::kPrimInt:
1253          sm.AdvanceInt(0);
1254          break;
1255        case Primitive::kPrimFloat:
1256          sm.AdvanceFloat(0);
1257          break;
1258        case Primitive::kPrimDouble:
1259          sm.AdvanceDouble(0);
1260          break;
1261        case Primitive::kPrimLong:
1262          sm.AdvanceLong(0);
1263          break;
1264        default:
1265          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1266      }
1267    }
1268
1269    num_stack_entries_ = sm.GetStackEntries();
1270  }
1271
1272  void PushGpr(uintptr_t /* val */) {
1273    // not optimizing registers, yet
1274  }
1275
1276  void PushFpr4(float /* val */) {
1277    // not optimizing registers, yet
1278  }
1279
1280  void PushFpr8(uint64_t /* val */) {
1281    // not optimizing registers, yet
1282  }
1283
1284  void PushStack(uintptr_t /* val */) {
1285    // counting is already done in the superclass
1286  }
1287
1288  virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
1289    return reinterpret_cast<uintptr_t>(nullptr);
1290  }
1291
1292 protected:
1293  uint32_t num_stack_entries_;
1294};
1295
1296class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
1297 public:
1298  ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {}
1299
1300  // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
1301  // is at *m = sp. Will update to point to the bottom of the save frame.
1302  //
1303  // Note: assumes ComputeAll() has been run before.
1304  void LayoutCalleeSaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp,
1305                             HandleScope** handle_scope)
1306      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1307    mirror::ArtMethod* method = (*m)->AsMirrorPtr();
1308
1309    uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
1310
1311    // First, fix up the layout of the callee-save frame.
1312    // We have to squeeze in the HandleScope, and relocate the method pointer.
1313
1314    // "Free" the slot for the method.
1315    sp8 += sizeof(void*);  // In the callee-save frame we use a full pointer.
1316
1317    // Under the callee saves put handle scope and new method stack reference.
1318    size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
1319    size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
1320
1321    sp8 -= scope_and_method;
1322    // Align by kStackAlignment.
1323    sp8 = reinterpret_cast<uint8_t*>(RoundDown(
1324        reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1325
1326    uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
1327    *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(),
1328                                        num_handle_scope_references_);
1329
1330    // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
1331    uint8_t* method_pointer = sp8;
1332    StackReference<mirror::ArtMethod>* new_method_ref =
1333        reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer);
1334    new_method_ref->Assign(method);
1335    *m = new_method_ref;
1336  }
1337
1338  // Adds space for the cookie. Note: may leave stack unaligned.
1339  void LayoutCookie(uint8_t** sp) const {
1340    // Reference cookie and padding
1341    *sp -= 8;
1342  }
1343
1344  // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
1345  // Returns the new bottom. Note: this may be unaligned.
1346  uint8_t* LayoutJNISaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp,
1347                              HandleScope** handle_scope)
1348      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1349    // First, fix up the layout of the callee-save frame.
1350    // We have to squeeze in the HandleScope, and relocate the method pointer.
1351    LayoutCalleeSaveFrame(self, m, sp, handle_scope);
1352
1353    // The bottom of the callee-save frame is now where the method is, *m.
1354    uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m);
1355
1356    // Add space for cookie.
1357    LayoutCookie(&sp8);
1358
1359    return sp8;
1360  }
1361
1362  // WARNING: After this, *sp won't be pointing to the method anymore!
1363  uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m,
1364                         const char* shorty, uint32_t shorty_len, HandleScope** handle_scope,
1365                         uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
1366      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1367    Walk(shorty, shorty_len);
1368
1369    // JNI part.
1370    uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope);
1371
1372    sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr);
1373
1374    // Return the new bottom.
1375    return sp8;
1376  }
1377
1378  uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
1379
1380  // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
1381  void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
1382      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
1383
1384 private:
1385  uint32_t num_handle_scope_references_;
1386};
1387
1388uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
1389  num_handle_scope_references_++;
1390  return reinterpret_cast<uintptr_t>(nullptr);
1391}
1392
1393void ComputeGenericJniFrameSize::WalkHeader(
1394    BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
1395  // JNIEnv
1396  sm->AdvancePointer(nullptr);
1397
1398  // Class object or this as first argument
1399  sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
1400}
1401
1402// Class to push values to three separate regions. Used to fill the native call part. Adheres to
1403// the template requirements of BuildGenericJniFrameStateMachine.
1404class FillNativeCall {
1405 public:
1406  FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
1407      cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
1408
1409  virtual ~FillNativeCall() {}
1410
1411  void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
1412    cur_gpr_reg_ = gpr_regs;
1413    cur_fpr_reg_ = fpr_regs;
1414    cur_stack_arg_ = stack_args;
1415  }
1416
1417  void PushGpr(uintptr_t val) {
1418    *cur_gpr_reg_ = val;
1419    cur_gpr_reg_++;
1420  }
1421
1422  void PushFpr4(float val) {
1423    *cur_fpr_reg_ = val;
1424    cur_fpr_reg_++;
1425  }
1426
1427  void PushFpr8(uint64_t val) {
1428    uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1429    *tmp = val;
1430    cur_fpr_reg_ += 2;
1431  }
1432
1433  void PushStack(uintptr_t val) {
1434    *cur_stack_arg_ = val;
1435    cur_stack_arg_++;
1436  }
1437
1438  virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1439    LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
1440    UNREACHABLE();
1441  }
1442
1443 private:
1444  uintptr_t* cur_gpr_reg_;
1445  uint32_t* cur_fpr_reg_;
1446  uintptr_t* cur_stack_arg_;
1447};
1448
1449// Visits arguments on the stack placing them into a region lower down the stack for the benefit
1450// of transitioning into native code.
1451class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
1452 public:
1453  BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len,
1454                              StackReference<mirror::ArtMethod>** sp)
1455     : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
1456       jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) {
1457    ComputeGenericJniFrameSize fsc;
1458    uintptr_t* start_gpr_reg;
1459    uint32_t* start_fpr_reg;
1460    uintptr_t* start_stack_arg;
1461    bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
1462                                             &handle_scope_,
1463                                             &start_stack_arg,
1464                                             &start_gpr_reg, &start_fpr_reg);
1465
1466    jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
1467
1468    // jni environment is always first argument
1469    sm_.AdvancePointer(self->GetJniEnv());
1470
1471    if (is_static) {
1472      sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass());
1473    }
1474  }
1475
1476  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
1477
1478  void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
1479
1480  StackReference<mirror::Object>* GetFirstHandleScopeEntry()
1481      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1482    return handle_scope_->GetHandle(0).GetReference();
1483  }
1484
1485  jobject GetFirstHandleScopeJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1486    return handle_scope_->GetHandle(0).ToJObject();
1487  }
1488
1489  void* GetBottomOfUsedArea() const {
1490    return bottom_of_used_area_;
1491  }
1492
1493 private:
1494  // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
1495  class FillJniCall FINAL : public FillNativeCall {
1496   public:
1497    FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
1498                HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
1499                                             handle_scope_(handle_scope), cur_entry_(0) {}
1500
1501    uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
1502
1503    void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
1504      FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
1505      handle_scope_ = scope;
1506      cur_entry_ = 0U;
1507    }
1508
1509    void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1510      // Initialize padding entries.
1511      size_t expected_slots = handle_scope_->NumberOfReferences();
1512      while (cur_entry_ < expected_slots) {
1513        handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
1514      }
1515      DCHECK_NE(cur_entry_, 0U);
1516    }
1517
1518   private:
1519    HandleScope* handle_scope_;
1520    size_t cur_entry_;
1521  };
1522
1523  HandleScope* handle_scope_;
1524  FillJniCall jni_call_;
1525  void* bottom_of_used_area_;
1526
1527  BuildNativeCallFrameStateMachine<FillJniCall> sm_;
1528
1529  DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1530};
1531
1532uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
1533  uintptr_t tmp;
1534  MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_);
1535  h.Assign(ref);
1536  tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
1537  cur_entry_++;
1538  return tmp;
1539}
1540
1541void BuildGenericJniFrameVisitor::Visit() {
1542  Primitive::Type type = GetParamPrimitiveType();
1543  switch (type) {
1544    case Primitive::kPrimLong: {
1545      jlong long_arg;
1546      if (IsSplitLongOrDouble()) {
1547        long_arg = ReadSplitLongParam();
1548      } else {
1549        long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
1550      }
1551      sm_.AdvanceLong(long_arg);
1552      break;
1553    }
1554    case Primitive::kPrimDouble: {
1555      uint64_t double_arg;
1556      if (IsSplitLongOrDouble()) {
1557        // Read into union so that we don't case to a double.
1558        double_arg = ReadSplitLongParam();
1559      } else {
1560        double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
1561      }
1562      sm_.AdvanceDouble(double_arg);
1563      break;
1564    }
1565    case Primitive::kPrimNot: {
1566      StackReference<mirror::Object>* stack_ref =
1567          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1568      sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
1569      break;
1570    }
1571    case Primitive::kPrimFloat:
1572      sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
1573      break;
1574    case Primitive::kPrimBoolean:  // Fall-through.
1575    case Primitive::kPrimByte:     // Fall-through.
1576    case Primitive::kPrimChar:     // Fall-through.
1577    case Primitive::kPrimShort:    // Fall-through.
1578    case Primitive::kPrimInt:      // Fall-through.
1579      sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
1580      break;
1581    case Primitive::kPrimVoid:
1582      LOG(FATAL) << "UNREACHABLE";
1583      UNREACHABLE();
1584  }
1585}
1586
1587void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
1588  // Clear out rest of the scope.
1589  jni_call_.ResetRemainingScopeSlots();
1590  // Install HandleScope.
1591  self->PushHandleScope(handle_scope_);
1592}
1593
1594#if defined(__arm__) || defined(__aarch64__)
1595extern "C" void* artFindNativeMethod();
1596#else
1597extern "C" void* artFindNativeMethod(Thread* self);
1598#endif
1599
1600uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
1601  if (lock != nullptr) {
1602    return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
1603  } else {
1604    return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
1605  }
1606}
1607
1608void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
1609  if (lock != nullptr) {
1610    JniMethodEndSynchronized(cookie, lock, self);
1611  } else {
1612    JniMethodEnd(cookie, self);
1613  }
1614}
1615
1616/*
1617 * Initializes an alloca region assumed to be directly below sp for a native call:
1618 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
1619 * The final element on the stack is a pointer to the native code.
1620 *
1621 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
1622 * We need to fix this, as the handle scope needs to go into the callee-save frame.
1623 *
1624 * The return of this function denotes:
1625 * 1) How many bytes of the alloca can be released, if the value is non-negative.
1626 * 2) An error, if the value is negative.
1627 */
1628extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self,
1629                                                      StackReference<mirror::ArtMethod>* sp)
1630    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1631  mirror::ArtMethod* called = sp->AsMirrorPtr();
1632  DCHECK(called->IsNative()) << PrettyMethod(called, true);
1633  uint32_t shorty_len = 0;
1634  const char* shorty = called->GetShorty(&shorty_len);
1635
1636  // Run the visitor and update sp.
1637  BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp);
1638  visitor.VisitArguments();
1639  visitor.FinalizeHandleScope(self);
1640
1641  // Fix up managed-stack things in Thread.
1642  self->SetTopOfStack(sp);
1643
1644  self->VerifyStack();
1645
1646  // Start JNI, save the cookie.
1647  uint32_t cookie;
1648  if (called->IsSynchronized()) {
1649    cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
1650    if (self->IsExceptionPending()) {
1651      self->PopHandleScope();
1652      // A negative value denotes an error.
1653      return GetTwoWordFailureValue();
1654    }
1655  } else {
1656    cookie = JniMethodStart(self);
1657  }
1658  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1659  *(sp32 - 1) = cookie;
1660
1661  // Retrieve the stored native code.
1662  const void* nativeCode = called->GetNativeMethod();
1663
1664  // There are two cases for the content of nativeCode:
1665  // 1) Pointer to the native function.
1666  // 2) Pointer to the trampoline for native code binding.
1667  // In the second case, we need to execute the binding and continue with the actual native function
1668  // pointer.
1669  DCHECK(nativeCode != nullptr);
1670  if (nativeCode == GetJniDlsymLookupStub()) {
1671#if defined(__arm__) || defined(__aarch64__)
1672    nativeCode = artFindNativeMethod();
1673#else
1674    nativeCode = artFindNativeMethod(self);
1675#endif
1676
1677    if (nativeCode == nullptr) {
1678      DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
1679
1680      // End JNI, as the assembly will move to deliver the exception.
1681      jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
1682      if (shorty[0] == 'L') {
1683        artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
1684      } else {
1685        artQuickGenericJniEndJNINonRef(self, cookie, lock);
1686      }
1687
1688      return GetTwoWordFailureValue();
1689    }
1690    // Note that the native code pointer will be automatically set by artFindNativeMethod().
1691  }
1692
1693  // Return native code addr(lo) and bottom of alloca address(hi).
1694  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()),
1695                                reinterpret_cast<uintptr_t>(nativeCode));
1696}
1697
1698/*
1699 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
1700 * unlocking.
1701 */
1702extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f)
1703    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1704  StackReference<mirror::ArtMethod>* sp = self->GetManagedStack()->GetTopQuickFrame();
1705  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1706  mirror::ArtMethod* called = sp->AsMirrorPtr();
1707  uint32_t cookie = *(sp32 - 1);
1708
1709  jobject lock = nullptr;
1710  if (called->IsSynchronized()) {
1711    HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp)
1712        + sizeof(StackReference<mirror::ArtMethod>));
1713    lock = table->GetHandle(0).ToJObject();
1714  }
1715
1716  char return_shorty_char = called->GetShorty()[0];
1717
1718  if (return_shorty_char == 'L') {
1719    return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock);
1720  } else {
1721    artQuickGenericJniEndJNINonRef(self, cookie, lock);
1722
1723    switch (return_shorty_char) {
1724      case 'F': {
1725        if (kRuntimeISA == kX86) {
1726          // Convert back the result to float.
1727          double d = bit_cast<uint64_t, double>(result_f);
1728          return bit_cast<float, uint32_t>(static_cast<float>(d));
1729        } else {
1730          return result_f;
1731        }
1732      }
1733      case 'D':
1734        return result_f;
1735      case 'Z':
1736        return result.z;
1737      case 'B':
1738        return result.b;
1739      case 'C':
1740        return result.c;
1741      case 'S':
1742        return result.s;
1743      case 'I':
1744        return result.i;
1745      case 'J':
1746        return result.j;
1747      case 'V':
1748        return 0;
1749      default:
1750        LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
1751        return 0;
1752    }
1753  }
1754}
1755
1756// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
1757// for the method pointer.
1758//
1759// It is valid to use this, as at the usage points here (returns from C functions) we are assuming
1760// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations).
1761
1762template<InvokeType type, bool access_check>
1763static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1764                                     mirror::ArtMethod* caller_method,
1765                                     Thread* self, StackReference<mirror::ArtMethod>* sp);
1766
1767template<InvokeType type, bool access_check>
1768static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1769                                     mirror::ArtMethod* caller_method,
1770                                     Thread* self, StackReference<mirror::ArtMethod>* sp) {
1771  ScopedQuickEntrypointChecks sqec(self);
1772  DCHECK_EQ(sp->AsMirrorPtr(), Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
1773  mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
1774                                             type);
1775  if (UNLIKELY(method == nullptr)) {
1776    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1777    uint32_t shorty_len;
1778    const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
1779    {
1780      // Remember the args in case a GC happens in FindMethodFromCode.
1781      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1782      RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
1783      visitor.VisitArguments();
1784      method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method,
1785                                                      self);
1786      visitor.FixupReferences();
1787    }
1788
1789    if (UNLIKELY(method == NULL)) {
1790      CHECK(self->IsExceptionPending());
1791      return GetTwoWordFailureValue();  // Failure.
1792    }
1793  }
1794  DCHECK(!self->IsExceptionPending());
1795  const void* code = method->GetEntryPointFromQuickCompiledCode();
1796
1797  // When we return, the caller will branch to this address, so it had better not be 0!
1798  DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method)
1799                          << " location: "
1800                          << method->GetDexFile()->GetLocation();
1801
1802  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
1803                                reinterpret_cast<uintptr_t>(method));
1804}
1805
1806// Explicit artInvokeCommon template function declarations to please analysis tool.
1807#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
1808  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
1809  TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx,                        \
1810                                                    mirror::Object* this_object,                \
1811                                                    mirror::ArtMethod* caller_method,           \
1812                                                    Thread* self,                               \
1813                                                    StackReference<mirror::ArtMethod>* sp)      \
1814
1815EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
1816EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
1817EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
1818EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
1819EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
1820EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
1821EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
1822EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
1823EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
1824EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
1825#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
1826
1827// See comments in runtime_support_asm.S
1828extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
1829    uint32_t method_idx, mirror::Object* this_object,
1830    mirror::ArtMethod* caller_method, Thread* self,
1831    StackReference<mirror::ArtMethod>* sp)
1832        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1833  return artInvokeCommon<kInterface, true>(method_idx, this_object,
1834                                           caller_method, self, sp);
1835}
1836
1837extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
1838    uint32_t method_idx, mirror::Object* this_object,
1839    mirror::ArtMethod* caller_method, Thread* self,
1840    StackReference<mirror::ArtMethod>* sp)
1841        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1842  return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method,
1843                                        self, sp);
1844}
1845
1846extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
1847    uint32_t method_idx, mirror::Object* this_object,
1848    mirror::ArtMethod* caller_method, Thread* self,
1849    StackReference<mirror::ArtMethod>* sp)
1850        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1851  return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method,
1852                                        self, sp);
1853}
1854
1855extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
1856    uint32_t method_idx, mirror::Object* this_object,
1857    mirror::ArtMethod* caller_method, Thread* self,
1858    StackReference<mirror::ArtMethod>* sp)
1859        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1860  return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method,
1861                                       self, sp);
1862}
1863
1864extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
1865    uint32_t method_idx, mirror::Object* this_object,
1866    mirror::ArtMethod* caller_method, Thread* self,
1867    StackReference<mirror::ArtMethod>* sp)
1868        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1869  return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method,
1870                                         self, sp);
1871}
1872
1873// Determine target of interface dispatch. This object is known non-null.
1874extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
1875                                                      mirror::Object* this_object,
1876                                                      mirror::ArtMethod* caller_method,
1877                                                      Thread* self,
1878                                                      StackReference<mirror::ArtMethod>* sp)
1879    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1880  ScopedQuickEntrypointChecks sqec(self);
1881  mirror::ArtMethod* method;
1882  if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
1883    method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
1884    if (UNLIKELY(method == NULL)) {
1885      ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
1886                                                                 caller_method);
1887      return GetTwoWordFailureValue();  // Failure.
1888    }
1889  } else {
1890    DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
1891
1892    // Find the caller PC.
1893    constexpr size_t pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsAndArgs);
1894    uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + pc_offset);
1895
1896    // Map the caller PC to a dex PC.
1897    uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
1898    const DexFile::CodeItem* code = caller_method->GetCodeItem();
1899    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
1900    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
1901    Instruction::Code instr_code = instr->Opcode();
1902    CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
1903          instr_code == Instruction::INVOKE_INTERFACE_RANGE)
1904        << "Unexpected call into interface trampoline: " << instr->DumpString(NULL);
1905    uint32_t dex_method_idx;
1906    if (instr_code == Instruction::INVOKE_INTERFACE) {
1907      dex_method_idx = instr->VRegB_35c();
1908    } else {
1909      DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
1910      dex_method_idx = instr->VRegB_3rc();
1911    }
1912
1913    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
1914        ->GetDexFile();
1915    uint32_t shorty_len;
1916    const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx),
1917                                                   &shorty_len);
1918    {
1919      // Remember the args in case a GC happens in FindMethodFromCode.
1920      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1921      RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
1922      visitor.VisitArguments();
1923      method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method,
1924                                                     self);
1925      visitor.FixupReferences();
1926    }
1927
1928    if (UNLIKELY(method == nullptr)) {
1929      CHECK(self->IsExceptionPending());
1930      return GetTwoWordFailureValue();  // Failure.
1931    }
1932  }
1933  const void* code = method->GetEntryPointFromQuickCompiledCode();
1934
1935  // When we return, the caller will branch to this address, so it had better not be 0!
1936  DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method)
1937                          << " location: " << method->GetDexFile()->GetLocation();
1938
1939  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
1940                                reinterpret_cast<uintptr_t>(method));
1941}
1942
1943}  // namespace art
1944