quick_trampoline_entrypoints.cc revision 6c5cb212fa7010ae7caf9dc765533aa967c95342
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "callee_save_frame.h"
18#include "common_throws.h"
19#include "dex_file-inl.h"
20#include "dex_instruction-inl.h"
21#include "entrypoints/entrypoint_utils.h"
22#include "gc/accounting/card_table-inl.h"
23#include "instruction_set.h"
24#include "interpreter/interpreter.h"
25#include "mirror/art_method-inl.h"
26#include "mirror/class-inl.h"
27#include "mirror/dex_cache-inl.h"
28#include "mirror/object-inl.h"
29#include "mirror/object_array-inl.h"
30#include "object_utils.h"
31#include "runtime.h"
32#include "scoped_thread_state_change.h"
33
34namespace art {
35
36// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
37class QuickArgumentVisitor {
38  // Number of bytes for each out register in the caller method's frame.
39  static constexpr size_t kBytesStackArgLocation = 4;
40  // Frame size in bytes of a callee-save frame for RefsAndArgs.
41  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
42      GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
43#if defined(__arm__)
44  // The callee save frame is pointed to by SP.
45  // | argN       |  |
46  // | ...        |  |
47  // | arg4       |  |
48  // | arg3 spill |  |  Caller's frame
49  // | arg2 spill |  |
50  // | arg1 spill |  |
51  // | Method*    | ---
52  // | LR         |
53  // | ...        |    callee saves
54  // | R3         |    arg3
55  // | R2         |    arg2
56  // | R1         |    arg1
57  // | R0         |    padding
58  // | Method*    |  <- sp
59  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
60  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
61  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
62  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
63  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8;  // Offset of first GPR arg.
64  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44;  // Offset of return address.
65  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
66    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
67  }
68#elif defined(__aarch64__)
69  // The callee save frame is pointed to by SP.
70  // | argN       |  |
71  // | ...        |  |
72  // | arg4       |  |
73  // | arg3 spill |  |  Caller's frame
74  // | arg2 spill |  |
75  // | arg1 spill |  |
76  // | Method*    | ---
77  // | LR         |
78  // | X28        |
79  // |  :         |
80  // | X19        |
81  // | X7         |
82  // | :          |
83  // | X1         |
84  // | D15        |
85  // |  :         |
86  // | D0         |
87  // |            |    padding
88  // | Method*    |  <- sp
89  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
90  static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
91  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
92  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
93  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144;  // Offset of first GPR arg.
94  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296;  // Offset of return address.
95  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
96    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
97  }
98#elif defined(__mips__)
99  // The callee save frame is pointed to by SP.
100  // | argN       |  |
101  // | ...        |  |
102  // | arg4       |  |
103  // | arg3 spill |  |  Caller's frame
104  // | arg2 spill |  |
105  // | arg1 spill |  |
106  // | Method*    | ---
107  // | RA         |
108  // | ...        |    callee saves
109  // | A3         |    arg3
110  // | A2         |    arg2
111  // | A1         |    arg1
112  // | A0/Method* |  <- sp
113  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
114  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
115  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
116  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
117  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
118  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60;  // Offset of return address.
119  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
120    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
121  }
122#elif defined(__i386__)
123  // The callee save frame is pointed to by SP.
124  // | argN        |  |
125  // | ...         |  |
126  // | arg4        |  |
127  // | arg3 spill  |  |  Caller's frame
128  // | arg2 spill  |  |
129  // | arg1 spill  |  |
130  // | Method*     | ---
131  // | Return      |
132  // | EBP,ESI,EDI |    callee saves
133  // | EBX         |    arg3
134  // | EDX         |    arg2
135  // | ECX         |    arg1
136  // | EAX/Method* |  <- sp
137  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
138  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
139  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
140  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
141  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
142  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28;  // Offset of return address.
143  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
144    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
145  }
146#elif defined(__x86_64__)
147  // The callee save frame is pointed to by SP.
148  // | argN            |  |
149  // | ...             |  |
150  // | reg. arg spills |  |  Caller's frame
151  // | Method*         | ---
152  // | Return          |
153  // | R15             |    callee save
154  // | R14             |    callee save
155  // | R13             |    callee save
156  // | R12             |    callee save
157  // | R9              |    arg5
158  // | R8              |    arg4
159  // | RSI/R6          |    arg1
160  // | RBP/R5          |    callee save
161  // | RBX/R3          |    callee save
162  // | RDX/R2          |    arg2
163  // | RCX/R1          |    arg3
164  // | XMM7            |    float arg 8
165  // | XMM6            |    float arg 7
166  // | XMM5            |    float arg 6
167  // | XMM4            |    float arg 5
168  // | XMM3            |    float arg 4
169  // | XMM2            |    float arg 3
170  // | XMM1            |    float arg 2
171  // | XMM0            |    float arg 1
172  // | Padding         |
173  // | RDI/Method*     |  <- sp
174  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
175  static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
176  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
177  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
178  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80;  // Offset of first GPR arg.
179  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168;  // Offset of return address.
180  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
181    switch (gpr_index) {
182      case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
183      case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
184      case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
185      case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
186      case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
187      default:
188        LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
189        return 0;
190    }
191  }
192#else
193#error "Unsupported architecture"
194#endif
195
196 public:
197  static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
198      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
199    DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
200    byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
201    return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
202  }
203
204  // For the given quick ref and args quick frame, return the caller's PC.
205  static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
206      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
207    DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
208    byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
209    return *reinterpret_cast<uintptr_t*>(lr);
210  }
211
212  QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
213                       const char* shorty, uint32_t shorty_len)
214      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
215      is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
216      gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
217      fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
218      stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
219                  + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
220      gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid),
221      is_split_long_or_double_(false) { }
222
223  virtual ~QuickArgumentVisitor() {}
224
225  virtual void Visit() = 0;
226
227  Primitive::Type GetParamPrimitiveType() const {
228    return cur_type_;
229  }
230
231  byte* GetParamAddress() const {
232    if (!kQuickSoftFloatAbi) {
233      Primitive::Type type = GetParamPrimitiveType();
234      if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
235        if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
236          return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
237        }
238        return stack_args_ + (stack_index_ * kBytesStackArgLocation);
239      }
240    }
241    if (gpr_index_ < kNumQuickGprArgs) {
242      return gpr_args_ + GprIndexToGprOffset(gpr_index_);
243    }
244    return stack_args_ + (stack_index_ * kBytesStackArgLocation);
245  }
246
247  bool IsSplitLongOrDouble() const {
248    if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
249      return is_split_long_or_double_;
250    } else {
251      return false;  // An optimization for when GPR and FPRs are 64bit.
252    }
253  }
254
255  bool IsParamAReference() const {
256    return GetParamPrimitiveType() == Primitive::kPrimNot;
257  }
258
259  bool IsParamALongOrDouble() const {
260    Primitive::Type type = GetParamPrimitiveType();
261    return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
262  }
263
264  uint64_t ReadSplitLongParam() const {
265    DCHECK(IsSplitLongOrDouble());
266    uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
267    uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
268    return (low_half & 0xffffffffULL) | (high_half << 32);
269  }
270
271  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
272    // This implementation doesn't support reg-spill area for hard float
273    // ABI targets such as x86_64 and aarch64. So, for those targets whose
274    // 'kQuickSoftFloatAbi' is 'false':
275    //     (a) 'stack_args_' should point to the first method's argument
276    //     (b) whatever the argument type it is, the 'stack_index_' should
277    //         be moved forward along with every visiting.
278    gpr_index_ = 0;
279    fpr_index_ = 0;
280    stack_index_ = 0;
281    if (!is_static_) {  // Handle this.
282      cur_type_ = Primitive::kPrimNot;
283      is_split_long_or_double_ = false;
284      Visit();
285      if (!kQuickSoftFloatAbi || kNumQuickGprArgs == 0) {
286        stack_index_++;
287      }
288      if (kNumQuickGprArgs > 0) {
289        gpr_index_++;
290      }
291    }
292    for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
293      cur_type_ = Primitive::GetType(shorty_[shorty_index]);
294      switch (cur_type_) {
295        case Primitive::kPrimNot:
296        case Primitive::kPrimBoolean:
297        case Primitive::kPrimByte:
298        case Primitive::kPrimChar:
299        case Primitive::kPrimShort:
300        case Primitive::kPrimInt:
301          is_split_long_or_double_ = false;
302          Visit();
303          if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
304            stack_index_++;
305          }
306          if (gpr_index_ < kNumQuickGprArgs) {
307            gpr_index_++;
308          }
309          break;
310        case Primitive::kPrimFloat:
311          is_split_long_or_double_ = false;
312          Visit();
313          if (kQuickSoftFloatAbi) {
314            if (gpr_index_ < kNumQuickGprArgs) {
315              gpr_index_++;
316            } else {
317              stack_index_++;
318            }
319          } else {
320            if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
321              fpr_index_++;
322            }
323            stack_index_++;
324          }
325          break;
326        case Primitive::kPrimDouble:
327        case Primitive::kPrimLong:
328          if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
329            is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
330                ((gpr_index_ + 1) == kNumQuickGprArgs);
331            Visit();
332            if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
333              if (kBytesStackArgLocation == 4) {
334                stack_index_+= 2;
335              } else {
336                CHECK_EQ(kBytesStackArgLocation, 8U);
337                stack_index_++;
338              }
339            }
340            if (gpr_index_ < kNumQuickGprArgs) {
341              gpr_index_++;
342              if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
343                if (gpr_index_ < kNumQuickGprArgs) {
344                  gpr_index_++;
345                } else if (kQuickSoftFloatAbi) {
346                  stack_index_++;
347                }
348              }
349            }
350          } else {
351            is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
352                ((fpr_index_ + 1) == kNumQuickFprArgs);
353            Visit();
354            if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
355              fpr_index_++;
356              if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
357                if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
358                  fpr_index_++;
359                }
360              }
361            }
362            if (kBytesStackArgLocation == 4) {
363              stack_index_+= 2;
364            } else {
365              CHECK_EQ(kBytesStackArgLocation, 8U);
366              stack_index_++;
367            }
368          }
369          break;
370        default:
371          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
372      }
373    }
374  }
375
376 private:
377  static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty,
378                                             uint32_t shorty_len) {
379    if (kQuickSoftFloatAbi) {
380      CHECK_EQ(kNumQuickFprArgs, 0U);
381      return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA))
382          + sizeof(StackReference<mirror::ArtMethod>) /* StackReference<ArtMethod> */;
383    } else {
384      // For now, there is no reg-spill area for the targets with
385      // hard float ABI. So, the offset pointing to the first method's
386      // parameter ('this' for non-static methods) should be returned.
387      return sizeof(StackReference<mirror::ArtMethod>);  // Skip StackReference<ArtMethod>.
388    }
389  }
390
391  const bool is_static_;
392  const char* const shorty_;
393  const uint32_t shorty_len_;
394  byte* const gpr_args_;  // Address of GPR arguments in callee save frame.
395  byte* const fpr_args_;  // Address of FPR arguments in callee save frame.
396  byte* const stack_args_;  // Address of stack arguments in caller's frame.
397  uint32_t gpr_index_;  // Index into spilled GPRs.
398  uint32_t fpr_index_;  // Index into spilled FPRs.
399  uint32_t stack_index_;  // Index into arguments on the stack.
400  // The current type of argument during VisitArguments.
401  Primitive::Type cur_type_;
402  // Does a 64bit parameter straddle the register and stack arguments?
403  bool is_split_long_or_double_;
404};
405
406// Visits arguments on the stack placing them into the shadow frame.
407class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
408 public:
409  BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
410                               const char* shorty, uint32_t shorty_len, ShadowFrame* sf,
411                               size_t first_arg_reg) :
412    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
413
414  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
415
416 private:
417  ShadowFrame* const sf_;
418  uint32_t cur_reg_;
419
420  DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
421};
422
423void BuildQuickShadowFrameVisitor::Visit()  {
424  Primitive::Type type = GetParamPrimitiveType();
425  switch (type) {
426    case Primitive::kPrimLong:  // Fall-through.
427    case Primitive::kPrimDouble:
428      if (IsSplitLongOrDouble()) {
429        sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
430      } else {
431        sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
432      }
433      ++cur_reg_;
434      break;
435    case Primitive::kPrimNot: {
436        StackReference<mirror::Object>* stack_ref =
437            reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
438        sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
439      }
440      break;
441    case Primitive::kPrimBoolean:  // Fall-through.
442    case Primitive::kPrimByte:     // Fall-through.
443    case Primitive::kPrimChar:     // Fall-through.
444    case Primitive::kPrimShort:    // Fall-through.
445    case Primitive::kPrimInt:      // Fall-through.
446    case Primitive::kPrimFloat:
447      sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
448      break;
449    case Primitive::kPrimVoid:
450      LOG(FATAL) << "UNREACHABLE";
451      break;
452  }
453  ++cur_reg_;
454}
455
456extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
457                                                StackReference<mirror::ArtMethod>* sp)
458    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
459  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
460  // frame.
461  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
462
463  if (method->IsAbstract()) {
464    ThrowAbstractMethodError(method);
465    return 0;
466  } else {
467    DCHECK(!method->IsNative()) << PrettyMethod(method);
468    const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame");
469    const DexFile::CodeItem* code_item = method->GetCodeItem();
470    DCHECK(code_item != nullptr) << PrettyMethod(method);
471    uint16_t num_regs = code_item->registers_size_;
472    void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
473    ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL,  // No last shadow coming from quick.
474                                                  method, 0, memory));
475    size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
476    uint32_t shorty_len = 0;
477    const char* shorty = method->GetShorty(&shorty_len);
478    BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
479                                                      shadow_frame, first_arg_reg);
480    shadow_frame_builder.VisitArguments();
481    // Push a transition back into managed code onto the linked list in thread.
482    ManagedStack fragment;
483    self->PushManagedStackFragment(&fragment);
484    self->PushShadowFrame(shadow_frame);
485    self->EndAssertNoThreadSuspension(old_cause);
486
487    if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) {
488      // Ensure static method's class is initialized.
489      StackHandleScope<1> hs(self);
490      Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
491      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
492        DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
493        self->PopManagedStackFragment(fragment);
494        return 0;
495      }
496    }
497
498    StackHandleScope<1> hs(self);
499    MethodHelper mh(hs.NewHandle(method));
500    JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
501    // Pop transition.
502    self->PopManagedStackFragment(fragment);
503    // No need to restore the args since the method has already been run by the interpreter.
504    return result.GetJ();
505  }
506}
507
508// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
509// to jobjects.
510class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
511 public:
512  BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
513                            const char* shorty, uint32_t shorty_len,
514                            ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
515    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
516
517  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
518
519  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
520
521 private:
522  ScopedObjectAccessUnchecked* const soa_;
523  std::vector<jvalue>* const args_;
524  // References which we must update when exiting in case the GC moved the objects.
525  std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
526
527  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
528};
529
530void BuildQuickArgumentVisitor::Visit() {
531  jvalue val;
532  Primitive::Type type = GetParamPrimitiveType();
533  switch (type) {
534    case Primitive::kPrimNot: {
535      StackReference<mirror::Object>* stack_ref =
536          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
537      val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
538      references_.push_back(std::make_pair(val.l, stack_ref));
539      break;
540    }
541    case Primitive::kPrimLong:  // Fall-through.
542    case Primitive::kPrimDouble:
543      if (IsSplitLongOrDouble()) {
544        val.j = ReadSplitLongParam();
545      } else {
546        val.j = *reinterpret_cast<jlong*>(GetParamAddress());
547      }
548      break;
549    case Primitive::kPrimBoolean:  // Fall-through.
550    case Primitive::kPrimByte:     // Fall-through.
551    case Primitive::kPrimChar:     // Fall-through.
552    case Primitive::kPrimShort:    // Fall-through.
553    case Primitive::kPrimInt:      // Fall-through.
554    case Primitive::kPrimFloat:
555      val.i = *reinterpret_cast<jint*>(GetParamAddress());
556      break;
557    case Primitive::kPrimVoid:
558      LOG(FATAL) << "UNREACHABLE";
559      val.j = 0;
560      break;
561  }
562  args_->push_back(val);
563}
564
565void BuildQuickArgumentVisitor::FixupReferences() {
566  // Fixup any references which may have changed.
567  for (const auto& pair : references_) {
568    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
569    soa_->Env()->DeleteLocalRef(pair.first);
570  }
571}
572
573// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
574// which is responsible for recording callee save registers. We explicitly place into jobjects the
575// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
576// field within the proxy object, which will box the primitive arguments and deal with error cases.
577extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
578                                               mirror::Object* receiver,
579                                               Thread* self, StackReference<mirror::ArtMethod>* sp)
580    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
581  DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
582  DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
583  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
584  const char* old_cause =
585      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
586  // Register the top of the managed stack, making stack crawlable.
587  DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
588  self->SetTopOfStack(sp, 0);
589  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
590            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
591      << PrettyMethod(proxy_method);
592  self->VerifyStack();
593  // Start new JNI local reference state.
594  JNIEnvExt* env = self->GetJniEnv();
595  ScopedObjectAccessUnchecked soa(env);
596  ScopedJniEnvLocalRefState env_state(env);
597  // Create local ref. copies of proxy method and the receiver.
598  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
599
600  // Placing arguments into args vector and remove the receiver.
601  mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy();
602  CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
603      << PrettyMethod(non_proxy_method);
604  std::vector<jvalue> args;
605  uint32_t shorty_len = 0;
606  const char* shorty = proxy_method->GetShorty(&shorty_len);
607  BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args);
608
609  local_ref_visitor.VisitArguments();
610  DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method);
611  args.erase(args.begin());
612
613  // Convert proxy method into expected interface method.
614  mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
615  DCHECK(interface_method != NULL) << PrettyMethod(proxy_method);
616  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
617  jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
618
619  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
620  // that performs allocations.
621  self->EndAssertNoThreadSuspension(old_cause);
622  JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
623  // Restore references which might have moved.
624  local_ref_visitor.FixupReferences();
625  return result.GetJ();
626}
627
628// Read object references held in arguments from quick frames and place in a JNI local references,
629// so they don't get garbage collected.
630class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
631 public:
632  RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
633                               const char* shorty, uint32_t shorty_len,
634                               ScopedObjectAccessUnchecked* soa) :
635    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
636
637  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
638
639  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
640
641 private:
642  ScopedObjectAccessUnchecked* const soa_;
643  // References which we must update when exiting in case the GC moved the objects.
644  std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
645  DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
646};
647
648void RememberForGcArgumentVisitor::Visit() {
649  if (IsParamAReference()) {
650    StackReference<mirror::Object>* stack_ref =
651        reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
652    jobject reference =
653        soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
654    references_.push_back(std::make_pair(reference, stack_ref));
655  }
656}
657
658void RememberForGcArgumentVisitor::FixupReferences() {
659  // Fixup any references which may have changed.
660  for (const auto& pair : references_) {
661    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
662    soa_->Env()->DeleteLocalRef(pair.first);
663  }
664}
665
666
667// Lazily resolve a method for quick. Called by stub code.
668extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
669                                                    mirror::Object* receiver,
670                                                    Thread* self,
671                                                    StackReference<mirror::ArtMethod>* sp)
672    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
673  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
674  // Start new JNI local reference state
675  JNIEnvExt* env = self->GetJniEnv();
676  ScopedObjectAccessUnchecked soa(env);
677  ScopedJniEnvLocalRefState env_state(env);
678  const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
679
680  // Compute details about the called method (avoid GCs)
681  ClassLinker* linker = Runtime::Current()->GetClassLinker();
682  mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
683  InvokeType invoke_type;
684  const DexFile* dex_file;
685  uint32_t dex_method_idx;
686  if (called->IsRuntimeMethod()) {
687    uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
688    const DexFile::CodeItem* code;
689    dex_file = caller->GetDexFile();
690    code = caller->GetCodeItem();
691    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
692    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
693    Instruction::Code instr_code = instr->Opcode();
694    bool is_range;
695    switch (instr_code) {
696      case Instruction::INVOKE_DIRECT:
697        invoke_type = kDirect;
698        is_range = false;
699        break;
700      case Instruction::INVOKE_DIRECT_RANGE:
701        invoke_type = kDirect;
702        is_range = true;
703        break;
704      case Instruction::INVOKE_STATIC:
705        invoke_type = kStatic;
706        is_range = false;
707        break;
708      case Instruction::INVOKE_STATIC_RANGE:
709        invoke_type = kStatic;
710        is_range = true;
711        break;
712      case Instruction::INVOKE_SUPER:
713        invoke_type = kSuper;
714        is_range = false;
715        break;
716      case Instruction::INVOKE_SUPER_RANGE:
717        invoke_type = kSuper;
718        is_range = true;
719        break;
720      case Instruction::INVOKE_VIRTUAL:
721        invoke_type = kVirtual;
722        is_range = false;
723        break;
724      case Instruction::INVOKE_VIRTUAL_RANGE:
725        invoke_type = kVirtual;
726        is_range = true;
727        break;
728      case Instruction::INVOKE_INTERFACE:
729        invoke_type = kInterface;
730        is_range = false;
731        break;
732      case Instruction::INVOKE_INTERFACE_RANGE:
733        invoke_type = kInterface;
734        is_range = true;
735        break;
736      default:
737        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
738        // Avoid used uninitialized warnings.
739        invoke_type = kDirect;
740        is_range = false;
741    }
742    dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
743
744  } else {
745    invoke_type = kStatic;
746    dex_file = called->GetDexFile();
747    dex_method_idx = called->GetDexMethodIndex();
748  }
749  uint32_t shorty_len;
750  const char* shorty =
751      dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
752  RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
753  visitor.VisitArguments();
754  self->EndAssertNoThreadSuspension(old_cause);
755  bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
756  // Resolve method filling in dex cache.
757  if (UNLIKELY(called->IsRuntimeMethod())) {
758    StackHandleScope<1> hs(self);
759    mirror::Object* dummy = nullptr;
760    HandleWrapper<mirror::Object> h_receiver(
761        hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
762    called = linker->ResolveMethod(self, dex_method_idx, &caller, invoke_type);
763  }
764  const void* code = NULL;
765  if (LIKELY(!self->IsExceptionPending())) {
766    // Incompatible class change should have been handled in resolve method.
767    CHECK(!called->CheckIncompatibleClassChange(invoke_type))
768        << PrettyMethod(called) << " " << invoke_type;
769    if (virtual_or_interface) {
770      // Refine called method based on receiver.
771      CHECK(receiver != nullptr) << invoke_type;
772
773      mirror::ArtMethod* orig_called = called;
774      if (invoke_type == kVirtual) {
775        called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
776      } else {
777        called = receiver->GetClass()->FindVirtualMethodForInterface(called);
778      }
779
780      CHECK(called != nullptr) << PrettyMethod(orig_called) << " "
781                               << PrettyTypeOf(receiver) << " "
782                               << invoke_type << " " << orig_called->GetVtableIndex();
783
784      // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
785      // of the sharpened method.
786      if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) {
787        caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called);
788      } else {
789        // Calling from one dex file to another, need to compute the method index appropriate to
790        // the caller's dex file. Since we get here only if the original called was a runtime
791        // method, we've got the correct dex_file and a dex_method_idx from above.
792        DCHECK_EQ(caller->GetDexFile(), dex_file);
793        StackHandleScope<1> hs(self);
794        MethodHelper mh(hs.NewHandle(called));
795        uint32_t method_index = mh.FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx);
796        if (method_index != DexFile::kDexNoIndex) {
797          caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called);
798        }
799      }
800    }
801    // Ensure that the called method's class is initialized.
802    StackHandleScope<1> hs(soa.Self());
803    Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
804    linker->EnsureInitialized(called_class, true, true);
805    if (LIKELY(called_class->IsInitialized())) {
806      code = called->GetEntryPointFromQuickCompiledCode();
807    } else if (called_class->IsInitializing()) {
808      if (invoke_type == kStatic) {
809        // Class is still initializing, go to oat and grab code (trampoline must be left in place
810        // until class is initialized to stop races between threads).
811        code = linker->GetQuickOatCodeFor(called);
812      } else {
813        // No trampoline for non-static methods.
814        code = called->GetEntryPointFromQuickCompiledCode();
815      }
816    } else {
817      DCHECK(called_class->IsErroneous());
818    }
819  }
820  CHECK_EQ(code == NULL, self->IsExceptionPending());
821  // Fixup any locally saved objects may have moved during a GC.
822  visitor.FixupReferences();
823  // Place called method in callee-save frame to be placed as first argument to quick method.
824  sp->Assign(called);
825  return code;
826}
827
828
829
830/*
831 * This class uses a couple of observations to unite the different calling conventions through
832 * a few constants.
833 *
834 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
835 *    possible alignment.
836 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
837 *    types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
838 *    when we have to split things
839 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
840 *    and we can use Int handling directly.
841 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
842 *    necessary when widening. Also, widening of Ints will take place implicitly, and the
843 *    extension should be compatible with Aarch64, which mandates copying the available bits
844 *    into LSB and leaving the rest unspecified.
845 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
846 *    the stack.
847 * 6) There is only little endian.
848 *
849 *
850 * Actual work is supposed to be done in a delegate of the template type. The interface is as
851 * follows:
852 *
853 * void PushGpr(uintptr_t):   Add a value for the next GPR
854 *
855 * void PushFpr4(float):      Add a value for the next FPR of size 32b. Is only called if we need
856 *                            padding, that is, think the architecture is 32b and aligns 64b.
857 *
858 * void PushFpr8(uint64_t):   Push a double. We _will_ call this on 32b, it's the callee's job to
859 *                            split this if necessary. The current state will have aligned, if
860 *                            necessary.
861 *
862 * void PushStack(uintptr_t): Push a value to the stack.
863 *
864 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
865 *                                          as this might be important for null initialization.
866 *                                          Must return the jobject, that is, the reference to the
867 *                                          entry in the HandleScope (nullptr if necessary).
868 *
869 */
870template <class T> class BuildGenericJniFrameStateMachine {
871 public:
872#if defined(__arm__)
873  // TODO: These are all dummy values!
874  static constexpr bool kNativeSoftFloatAbi = true;
875  static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs, r0-r3
876  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
877
878  static constexpr size_t kRegistersNeededForLong = 2;
879  static constexpr size_t kRegistersNeededForDouble = 2;
880  static constexpr bool kMultiRegistersAligned = true;
881  static constexpr bool kMultiRegistersWidened = false;
882  static constexpr bool kAlignLongOnStack = true;
883  static constexpr bool kAlignDoubleOnStack = true;
884#elif defined(__aarch64__)
885  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
886  static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
887  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
888
889  static constexpr size_t kRegistersNeededForLong = 1;
890  static constexpr size_t kRegistersNeededForDouble = 1;
891  static constexpr bool kMultiRegistersAligned = false;
892  static constexpr bool kMultiRegistersWidened = false;
893  static constexpr bool kAlignLongOnStack = false;
894  static constexpr bool kAlignDoubleOnStack = false;
895#elif defined(__mips__)
896  // TODO: These are all dummy values!
897  static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
898  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
899  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
900
901  static constexpr size_t kRegistersNeededForLong = 2;
902  static constexpr size_t kRegistersNeededForDouble = 2;
903  static constexpr bool kMultiRegistersAligned = true;
904  static constexpr bool kMultiRegistersWidened = true;
905  static constexpr bool kAlignLongOnStack = false;
906  static constexpr bool kAlignDoubleOnStack = false;
907#elif defined(__i386__)
908  // TODO: Check these!
909  static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
910  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
911  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
912
913  static constexpr size_t kRegistersNeededForLong = 2;
914  static constexpr size_t kRegistersNeededForDouble = 2;
915  static constexpr bool kMultiRegistersAligned = false;       // x86 not using regs, anyways
916  static constexpr bool kMultiRegistersWidened = false;
917  static constexpr bool kAlignLongOnStack = false;
918  static constexpr bool kAlignDoubleOnStack = false;
919#elif defined(__x86_64__)
920  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
921  static constexpr size_t kNumNativeGprArgs = 6;  // 6 arguments passed in GPRs.
922  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
923
924  static constexpr size_t kRegistersNeededForLong = 1;
925  static constexpr size_t kRegistersNeededForDouble = 1;
926  static constexpr bool kMultiRegistersAligned = false;
927  static constexpr bool kMultiRegistersWidened = false;
928  static constexpr bool kAlignLongOnStack = false;
929  static constexpr bool kAlignDoubleOnStack = false;
930#else
931#error "Unsupported architecture"
932#endif
933
934 public:
935  explicit BuildGenericJniFrameStateMachine(T* delegate) : gpr_index_(kNumNativeGprArgs),
936                                                           fpr_index_(kNumNativeFprArgs),
937                                                           stack_entries_(0),
938                                                           delegate_(delegate) {
939    // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
940    // the next register is even; counting down is just to make the compiler happy...
941    CHECK_EQ(kNumNativeGprArgs % 2, 0U);
942    CHECK_EQ(kNumNativeFprArgs % 2, 0U);
943  }
944
945  virtual ~BuildGenericJniFrameStateMachine() {}
946
947  bool HavePointerGpr() {
948    return gpr_index_ > 0;
949  }
950
951  void AdvancePointer(void* val) {
952    if (HavePointerGpr()) {
953      gpr_index_--;
954      PushGpr(reinterpret_cast<uintptr_t>(val));
955    } else {
956      stack_entries_++;         // TODO: have a field for pointer length as multiple of 32b
957      PushStack(reinterpret_cast<uintptr_t>(val));
958      gpr_index_ = 0;
959    }
960  }
961
962
963  bool HaveHandleScopeGpr() {
964    return gpr_index_ > 0;
965  }
966
967  void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
968    uintptr_t handle = PushHandle(ptr);
969    if (HaveHandleScopeGpr()) {
970      gpr_index_--;
971      PushGpr(handle);
972    } else {
973      stack_entries_++;
974      PushStack(handle);
975      gpr_index_ = 0;
976    }
977  }
978
979
980  bool HaveIntGpr() {
981    return gpr_index_ > 0;
982  }
983
984  void AdvanceInt(uint32_t val) {
985    if (HaveIntGpr()) {
986      gpr_index_--;
987      PushGpr(val);
988    } else {
989      stack_entries_++;
990      PushStack(val);
991      gpr_index_ = 0;
992    }
993  }
994
995
996  bool HaveLongGpr() {
997    return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
998  }
999
1000  bool LongGprNeedsPadding() {
1001    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1002        kAlignLongOnStack &&                  // and when it needs alignment
1003        (gpr_index_ & 1) == 1;                // counter is odd, see constructor
1004  }
1005
1006  bool LongStackNeedsPadding() {
1007    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1008        kAlignLongOnStack &&                  // and when it needs 8B alignment
1009        (stack_entries_ & 1) == 1;            // counter is odd
1010  }
1011
1012  void AdvanceLong(uint64_t val) {
1013    if (HaveLongGpr()) {
1014      if (LongGprNeedsPadding()) {
1015        PushGpr(0);
1016        gpr_index_--;
1017      }
1018      if (kRegistersNeededForLong == 1) {
1019        PushGpr(static_cast<uintptr_t>(val));
1020      } else {
1021        PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1022        PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1023      }
1024      gpr_index_ -= kRegistersNeededForLong;
1025    } else {
1026      if (LongStackNeedsPadding()) {
1027        PushStack(0);
1028        stack_entries_++;
1029      }
1030      if (kRegistersNeededForLong == 1) {
1031        PushStack(static_cast<uintptr_t>(val));
1032        stack_entries_++;
1033      } else {
1034        PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1035        PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1036        stack_entries_ += 2;
1037      }
1038      gpr_index_ = 0;
1039    }
1040  }
1041
1042
1043  bool HaveFloatFpr() {
1044    return fpr_index_ > 0;
1045  }
1046
1047  template <typename U, typename V> V convert(U in) {
1048    CHECK_LE(sizeof(U), sizeof(V));
1049    union { U u; V v; } tmp;
1050    tmp.u = in;
1051    return tmp.v;
1052  }
1053
1054  void AdvanceFloat(float val) {
1055    if (kNativeSoftFloatAbi) {
1056      AdvanceInt(convert<float, uint32_t>(val));
1057    } else {
1058      if (HaveFloatFpr()) {
1059        fpr_index_--;
1060        if (kRegistersNeededForDouble == 1) {
1061          if (kMultiRegistersWidened) {
1062            PushFpr8(convert<double, uint64_t>(val));
1063          } else {
1064            // No widening, just use the bits.
1065            PushFpr8(convert<float, uint64_t>(val));
1066          }
1067        } else {
1068          PushFpr4(val);
1069        }
1070      } else {
1071        stack_entries_++;
1072        if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) {
1073          // Need to widen before storing: Note the "double" in the template instantiation.
1074          PushStack(convert<double, uintptr_t>(val));
1075        } else {
1076          PushStack(convert<float, uintptr_t>(val));
1077        }
1078        fpr_index_ = 0;
1079      }
1080    }
1081  }
1082
1083
1084  bool HaveDoubleFpr() {
1085    return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1086  }
1087
1088  bool DoubleFprNeedsPadding() {
1089    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1090        kAlignDoubleOnStack &&                  // and when it needs alignment
1091        (fpr_index_ & 1) == 1;                  // counter is odd, see constructor
1092  }
1093
1094  bool DoubleStackNeedsPadding() {
1095    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1096        kAlignDoubleOnStack &&                  // and when it needs 8B alignment
1097        (stack_entries_ & 1) == 1;              // counter is odd
1098  }
1099
1100  void AdvanceDouble(uint64_t val) {
1101    if (kNativeSoftFloatAbi) {
1102      AdvanceLong(val);
1103    } else {
1104      if (HaveDoubleFpr()) {
1105        if (DoubleFprNeedsPadding()) {
1106          PushFpr4(0);
1107          fpr_index_--;
1108        }
1109        PushFpr8(val);
1110        fpr_index_ -= kRegistersNeededForDouble;
1111      } else {
1112        if (DoubleStackNeedsPadding()) {
1113          PushStack(0);
1114          stack_entries_++;
1115        }
1116        if (kRegistersNeededForDouble == 1) {
1117          PushStack(static_cast<uintptr_t>(val));
1118          stack_entries_++;
1119        } else {
1120          PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1121          PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1122          stack_entries_ += 2;
1123        }
1124        fpr_index_ = 0;
1125      }
1126    }
1127  }
1128
1129  uint32_t getStackEntries() {
1130    return stack_entries_;
1131  }
1132
1133  uint32_t getNumberOfUsedGprs() {
1134    return kNumNativeGprArgs - gpr_index_;
1135  }
1136
1137  uint32_t getNumberOfUsedFprs() {
1138    return kNumNativeFprArgs - fpr_index_;
1139  }
1140
1141 private:
1142  void PushGpr(uintptr_t val) {
1143    delegate_->PushGpr(val);
1144  }
1145  void PushFpr4(float val) {
1146    delegate_->PushFpr4(val);
1147  }
1148  void PushFpr8(uint64_t val) {
1149    delegate_->PushFpr8(val);
1150  }
1151  void PushStack(uintptr_t val) {
1152    delegate_->PushStack(val);
1153  }
1154  uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1155    return delegate_->PushHandle(ref);
1156  }
1157
1158  uint32_t gpr_index_;      // Number of free GPRs
1159  uint32_t fpr_index_;      // Number of free FPRs
1160  uint32_t stack_entries_;  // Stack entries are in multiples of 32b, as floats are usually not
1161                            // extended
1162  T* delegate_;             // What Push implementation gets called
1163};
1164
1165class ComputeGenericJniFrameSize FINAL {
1166 public:
1167  ComputeGenericJniFrameSize() : num_handle_scope_references_(0), num_stack_entries_(0) {}
1168
1169  uint32_t GetStackSize() {
1170    return num_stack_entries_ * sizeof(uintptr_t);
1171  }
1172
1173  // WARNING: After this, *sp won't be pointing to the method anymore!
1174  void ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty,
1175                     uint32_t shorty_len, void* sp, HandleScope** table,
1176                     uint32_t* handle_scope_entries, uintptr_t** start_stack, uintptr_t** start_gpr,
1177                     uint32_t** start_fpr, void** code_return, size_t* overall_size)
1178      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1179    ComputeAll(is_static, shorty, shorty_len);
1180
1181    mirror::ArtMethod* method = (*m)->AsMirrorPtr();
1182
1183    uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
1184
1185    // First, fix up the layout of the callee-save frame.
1186    // We have to squeeze in the HandleScope, and relocate the method pointer.
1187
1188    // "Free" the slot for the method.
1189    sp8 += kPointerSize;  // In the callee-save frame we use a full pointer.
1190
1191    // Under the callee saves put handle scope and new method stack reference.
1192    *handle_scope_entries = num_handle_scope_references_;
1193
1194    size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
1195    size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
1196
1197    sp8 -= scope_and_method;
1198    // Align by kStackAlignment.
1199    sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1200
1201    uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
1202    *table = reinterpret_cast<HandleScope*>(sp8_table);
1203    (*table)->SetNumberOfReferences(num_handle_scope_references_);
1204
1205    // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
1206    uint8_t* method_pointer = sp8;
1207    StackReference<mirror::ArtMethod>* new_method_ref =
1208        reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer);
1209    new_method_ref->Assign(method);
1210    *m = new_method_ref;
1211
1212    // Reference cookie and padding
1213    sp8 -= 8;
1214    // Store HandleScope size
1215    *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(handle_scope_size & 0xFFFFFFFF);
1216
1217    // Next comes the native call stack.
1218    sp8 -= GetStackSize();
1219    // Align by kStackAlignment.
1220    sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1221    *start_stack = reinterpret_cast<uintptr_t*>(sp8);
1222
1223    // put fprs and gprs below
1224    // Assumption is OK right now, as we have soft-float arm
1225    size_t fregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeFprArgs;
1226    sp8 -= fregs * sizeof(uintptr_t);
1227    *start_fpr = reinterpret_cast<uint32_t*>(sp8);
1228    size_t iregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeGprArgs;
1229    sp8 -= iregs * sizeof(uintptr_t);
1230    *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
1231
1232    // reserve space for the code pointer
1233    sp8 -= kPointerSize;
1234    *code_return = reinterpret_cast<void*>(sp8);
1235
1236    *overall_size = reinterpret_cast<uint8_t*>(sp) - sp8;
1237
1238    // The new SP is stored at the end of the alloca, so it can be immediately popped
1239    sp8 = reinterpret_cast<uint8_t*>(sp) - 5 * KB;
1240    *(reinterpret_cast<uint8_t**>(sp8)) = method_pointer;
1241  }
1242
1243  void ComputeHandleScopeOffset() { }  // nothing to do, static right now
1244
1245  void ComputeAll(bool is_static, const char* shorty, uint32_t shorty_len)
1246      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1247    BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize> sm(this);
1248
1249    // JNIEnv
1250    sm.AdvancePointer(nullptr);
1251
1252    // Class object or this as first argument
1253    sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
1254
1255    for (uint32_t i = 1; i < shorty_len; ++i) {
1256      Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
1257      switch (cur_type_) {
1258        case Primitive::kPrimNot:
1259          sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
1260          break;
1261
1262        case Primitive::kPrimBoolean:
1263        case Primitive::kPrimByte:
1264        case Primitive::kPrimChar:
1265        case Primitive::kPrimShort:
1266        case Primitive::kPrimInt:
1267          sm.AdvanceInt(0);
1268          break;
1269        case Primitive::kPrimFloat:
1270          sm.AdvanceFloat(0);
1271          break;
1272        case Primitive::kPrimDouble:
1273          sm.AdvanceDouble(0);
1274          break;
1275        case Primitive::kPrimLong:
1276          sm.AdvanceLong(0);
1277          break;
1278        default:
1279          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1280      }
1281    }
1282
1283    num_stack_entries_ = sm.getStackEntries();
1284  }
1285
1286  void PushGpr(uintptr_t /* val */) {
1287    // not optimizing registers, yet
1288  }
1289
1290  void PushFpr4(float /* val */) {
1291    // not optimizing registers, yet
1292  }
1293
1294  void PushFpr8(uint64_t /* val */) {
1295    // not optimizing registers, yet
1296  }
1297
1298  void PushStack(uintptr_t /* val */) {
1299    // counting is already done in the superclass
1300  }
1301
1302  uintptr_t PushHandle(mirror::Object* /* ptr */) {
1303    num_handle_scope_references_++;
1304    return reinterpret_cast<uintptr_t>(nullptr);
1305  }
1306
1307 private:
1308  uint32_t num_handle_scope_references_;
1309  uint32_t num_stack_entries_;
1310};
1311
1312// Visits arguments on the stack placing them into a region lower down the stack for the benefit
1313// of transitioning into native code.
1314class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
1315 public:
1316  BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static,
1317                              const char* shorty, uint32_t shorty_len, Thread* self) :
1318      QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
1319    ComputeGenericJniFrameSize fsc;
1320    fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &handle_scope_, &handle_scope_expected_refs_,
1321                      &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_,
1322                      &alloca_used_size_);
1323    handle_scope_number_of_references_ = 0;
1324    cur_hs_entry_ = GetFirstHandleScopeEntry();
1325
1326    // jni environment is always first argument
1327    sm_.AdvancePointer(self->GetJniEnv());
1328
1329    if (is_static) {
1330      sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass());
1331    }
1332  }
1333
1334  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
1335
1336  void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
1337
1338  StackReference<mirror::Object>* GetFirstHandleScopeEntry()
1339      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1340    return handle_scope_->GetHandle(0).GetReference();
1341  }
1342
1343  jobject GetFirstHandleScopeJObject()
1344      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1345    return handle_scope_->GetHandle(0).ToJObject();
1346  }
1347
1348  void PushGpr(uintptr_t val) {
1349    *cur_gpr_reg_ = val;
1350    cur_gpr_reg_++;
1351  }
1352
1353  void PushFpr4(float val) {
1354    *cur_fpr_reg_ = val;
1355    cur_fpr_reg_++;
1356  }
1357
1358  void PushFpr8(uint64_t val) {
1359    uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1360    *tmp = val;
1361    cur_fpr_reg_ += 2;
1362  }
1363
1364  void PushStack(uintptr_t val) {
1365    *cur_stack_arg_ = val;
1366    cur_stack_arg_++;
1367  }
1368
1369  uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1370    uintptr_t tmp;
1371    if (ref == nullptr) {
1372      *cur_hs_entry_ = StackReference<mirror::Object>();
1373      tmp = reinterpret_cast<uintptr_t>(nullptr);
1374    } else {
1375      *cur_hs_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
1376      tmp = reinterpret_cast<uintptr_t>(cur_hs_entry_);
1377    }
1378    cur_hs_entry_++;
1379    handle_scope_number_of_references_++;
1380    return tmp;
1381  }
1382
1383  // Size of the part of the alloca that we actually need.
1384  size_t GetAllocaUsedSize() {
1385    return alloca_used_size_;
1386  }
1387
1388  void* GetCodeReturn() {
1389    return code_return_;
1390  }
1391
1392 private:
1393  uint32_t handle_scope_number_of_references_;
1394  StackReference<mirror::Object>* cur_hs_entry_;
1395  HandleScope* handle_scope_;
1396  uint32_t handle_scope_expected_refs_;
1397  uintptr_t* cur_gpr_reg_;
1398  uint32_t* cur_fpr_reg_;
1399  uintptr_t* cur_stack_arg_;
1400  // StackReference<mirror::Object>* top_of_handle_scope_;
1401  void* code_return_;
1402  size_t alloca_used_size_;
1403
1404  BuildGenericJniFrameStateMachine<BuildGenericJniFrameVisitor> sm_;
1405
1406  DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1407};
1408
1409void BuildGenericJniFrameVisitor::Visit() {
1410  Primitive::Type type = GetParamPrimitiveType();
1411  switch (type) {
1412    case Primitive::kPrimLong: {
1413      jlong long_arg;
1414      if (IsSplitLongOrDouble()) {
1415        long_arg = ReadSplitLongParam();
1416      } else {
1417        long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
1418      }
1419      sm_.AdvanceLong(long_arg);
1420      break;
1421    }
1422    case Primitive::kPrimDouble: {
1423      uint64_t double_arg;
1424      if (IsSplitLongOrDouble()) {
1425        // Read into union so that we don't case to a double.
1426        double_arg = ReadSplitLongParam();
1427      } else {
1428        double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
1429      }
1430      sm_.AdvanceDouble(double_arg);
1431      break;
1432    }
1433    case Primitive::kPrimNot: {
1434      StackReference<mirror::Object>* stack_ref =
1435          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1436      sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
1437      break;
1438    }
1439    case Primitive::kPrimFloat:
1440      sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
1441      break;
1442    case Primitive::kPrimBoolean:  // Fall-through.
1443    case Primitive::kPrimByte:     // Fall-through.
1444    case Primitive::kPrimChar:     // Fall-through.
1445    case Primitive::kPrimShort:    // Fall-through.
1446    case Primitive::kPrimInt:      // Fall-through.
1447      sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
1448      break;
1449    case Primitive::kPrimVoid:
1450      LOG(FATAL) << "UNREACHABLE";
1451      break;
1452  }
1453}
1454
1455void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
1456  // Initialize padding entries.
1457  while (handle_scope_number_of_references_ < handle_scope_expected_refs_) {
1458    *cur_hs_entry_ = StackReference<mirror::Object>();
1459    cur_hs_entry_++;
1460    handle_scope_number_of_references_++;
1461  }
1462  handle_scope_->SetNumberOfReferences(handle_scope_expected_refs_);
1463  DCHECK_NE(handle_scope_expected_refs_, 0U);
1464  // Install HandleScope.
1465  self->PushHandleScope(handle_scope_);
1466}
1467
1468extern "C" void* artFindNativeMethod();
1469
1470uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
1471  if (lock != nullptr) {
1472    return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
1473  } else {
1474    return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
1475  }
1476}
1477
1478void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
1479  if (lock != nullptr) {
1480    JniMethodEndSynchronized(cookie, lock, self);
1481  } else {
1482    JniMethodEnd(cookie, self);
1483  }
1484}
1485
1486/*
1487 * Initializes an alloca region assumed to be directly below sp for a native call:
1488 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
1489 * The final element on the stack is a pointer to the native code.
1490 *
1491 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
1492 * We need to fix this, as the handle scope needs to go into the callee-save frame.
1493 *
1494 * The return of this function denotes:
1495 * 1) How many bytes of the alloca can be released, if the value is non-negative.
1496 * 2) An error, if the value is negative.
1497 */
1498extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, StackReference<mirror::ArtMethod>* sp)
1499    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1500  mirror::ArtMethod* called = sp->AsMirrorPtr();
1501  DCHECK(called->IsNative()) << PrettyMethod(called, true);
1502
1503  // run the visitor
1504  uint32_t shorty_len = 0;
1505  const char* shorty = called->GetShorty(&shorty_len);
1506  BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), shorty, shorty_len, self);
1507  visitor.VisitArguments();
1508  visitor.FinalizeHandleScope(self);
1509
1510  // fix up managed-stack things in Thread
1511  self->SetTopOfStack(sp, 0);
1512
1513  self->VerifyStack();
1514
1515  // Start JNI, save the cookie.
1516  uint32_t cookie;
1517  if (called->IsSynchronized()) {
1518    cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
1519    if (self->IsExceptionPending()) {
1520      self->PopHandleScope();
1521      // A negative value denotes an error.
1522      return -1;
1523    }
1524  } else {
1525    cookie = JniMethodStart(self);
1526  }
1527  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1528  *(sp32 - 1) = cookie;
1529
1530  // Retrieve the stored native code.
1531  const void* nativeCode = called->GetNativeMethod();
1532
1533  // There are two cases for the content of nativeCode:
1534  // 1) Pointer to the native function.
1535  // 2) Pointer to the trampoline for native code binding.
1536  // In the second case, we need to execute the binding and continue with the actual native function
1537  // pointer.
1538  DCHECK(nativeCode != nullptr);
1539  if (nativeCode == GetJniDlsymLookupStub()) {
1540    nativeCode = artFindNativeMethod();
1541
1542    if (nativeCode == nullptr) {
1543      DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
1544
1545      // End JNI, as the assembly will move to deliver the exception.
1546      jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
1547      if (shorty[0] == 'L') {
1548        artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
1549      } else {
1550        artQuickGenericJniEndJNINonRef(self, cookie, lock);
1551      }
1552
1553      return -1;
1554    }
1555    // Note that the native code pointer will be automatically set by artFindNativeMethod().
1556  }
1557
1558  // Store the native code pointer in the stack at the right location.
1559  uintptr_t* code_pointer = reinterpret_cast<uintptr_t*>(visitor.GetCodeReturn());
1560  *code_pointer = reinterpret_cast<uintptr_t>(nativeCode);
1561
1562  // 5K reserved, window_size + frame pointer used.
1563  size_t window_size = visitor.GetAllocaUsedSize();
1564  return (5 * KB) - window_size - kPointerSize;
1565}
1566
1567/*
1568 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
1569 * unlocking.
1570 */
1571extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
1572                                                    StackReference<mirror::ArtMethod>* sp,
1573                                                    jvalue result, uint64_t result_f)
1574    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1575  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1576  mirror::ArtMethod* called = sp->AsMirrorPtr();
1577  uint32_t cookie = *(sp32 - 1);
1578
1579  jobject lock = nullptr;
1580  if (called->IsSynchronized()) {
1581    HandleScope* table = reinterpret_cast<HandleScope*>(
1582        reinterpret_cast<uint8_t*>(sp) + sizeof(StackReference<mirror::ArtMethod>));
1583    lock = table->GetHandle(0).ToJObject();
1584  }
1585
1586  char return_shorty_char = called->GetShorty()[0];
1587
1588  if (return_shorty_char == 'L') {
1589    return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock);
1590  } else {
1591    artQuickGenericJniEndJNINonRef(self, cookie, lock);
1592
1593    switch (return_shorty_char) {
1594      case 'F':  // Fall-through.
1595      case 'D':
1596        return result_f;
1597      case 'Z':
1598        return result.z;
1599      case 'B':
1600        return result.b;
1601      case 'C':
1602        return result.c;
1603      case 'S':
1604        return result.s;
1605      case 'I':
1606        return result.i;
1607      case 'J':
1608        return result.j;
1609      case 'V':
1610        return 0;
1611      default:
1612        LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
1613        return 0;
1614    }
1615  }
1616}
1617
1618// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
1619// for the method pointer.
1620//
1621// It is valid to use this, as at the usage points here (returns from C functions) we are assuming
1622// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations).
1623
1624template<InvokeType type, bool access_check>
1625static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1626                                     mirror::ArtMethod* caller_method,
1627                                     Thread* self, StackReference<mirror::ArtMethod>* sp);
1628
1629template<InvokeType type, bool access_check>
1630static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1631                                     mirror::ArtMethod* caller_method,
1632                                     Thread* self, StackReference<mirror::ArtMethod>* sp) {
1633  mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
1634                                             type);
1635  if (UNLIKELY(method == nullptr)) {
1636    FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
1637    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1638    uint32_t shorty_len;
1639    const char* shorty =
1640        dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
1641    {
1642      // Remember the args in case a GC happens in FindMethodFromCode.
1643      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1644      RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
1645      visitor.VisitArguments();
1646      method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method,
1647                                                      self);
1648      visitor.FixupReferences();
1649    }
1650
1651    if (UNLIKELY(method == NULL)) {
1652      CHECK(self->IsExceptionPending());
1653      return GetTwoWordFailureValue();  // Failure.
1654    }
1655  }
1656  DCHECK(!self->IsExceptionPending());
1657  const void* code = method->GetEntryPointFromQuickCompiledCode();
1658
1659  // When we return, the caller will branch to this address, so it had better not be 0!
1660  DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: "
1661      << method->GetDexFile()->GetLocation();
1662
1663  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
1664                                reinterpret_cast<uintptr_t>(method));
1665}
1666
1667// Explicit artInvokeCommon template function declarations to please analysis tool.
1668#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
1669  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
1670  TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx,                        \
1671                                                    mirror::Object* this_object,                \
1672                                                    mirror::ArtMethod* caller_method,           \
1673                                                    Thread* self,                               \
1674                                                    StackReference<mirror::ArtMethod>* sp)      \
1675
1676EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
1677EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
1678EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
1679EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
1680EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
1681EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
1682EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
1683EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
1684EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
1685EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
1686#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
1687
1688
1689// See comments in runtime_support_asm.S
1690extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
1691    mirror::Object* this_object,
1692    mirror::ArtMethod* caller_method,
1693    Thread* self,
1694    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1695  return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp);
1696}
1697
1698
1699extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
1700    mirror::Object* this_object,
1701    mirror::ArtMethod* caller_method,
1702    Thread* self,
1703    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1704  return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp);
1705}
1706
1707extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
1708    mirror::Object* this_object,
1709    mirror::ArtMethod* caller_method,
1710    Thread* self,
1711    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1712  return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp);
1713}
1714
1715extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
1716    mirror::Object* this_object,
1717    mirror::ArtMethod* caller_method,
1718    Thread* self,
1719    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1720  return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp);
1721}
1722
1723extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
1724    mirror::Object* this_object,
1725    mirror::ArtMethod* caller_method,
1726    Thread* self,
1727    StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1728  return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp);
1729}
1730
1731// Determine target of interface dispatch. This object is known non-null.
1732extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
1733                                                      mirror::Object* this_object,
1734                                                      mirror::ArtMethod* caller_method,
1735                                                      Thread* self,
1736                                                      StackReference<mirror::ArtMethod>* sp)
1737    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1738  mirror::ArtMethod* method;
1739  if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
1740    method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
1741    if (UNLIKELY(method == NULL)) {
1742      FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
1743      ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
1744                                                                 caller_method);
1745      return GetTwoWordFailureValue();  // Failure.
1746    }
1747  } else {
1748    FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
1749    DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
1750
1751    // Find the caller PC.
1752    constexpr size_t pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs);
1753    uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + pc_offset);
1754
1755    // Map the caller PC to a dex PC.
1756    uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
1757    const DexFile::CodeItem* code = caller_method->GetCodeItem();
1758    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
1759    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
1760    Instruction::Code instr_code = instr->Opcode();
1761    CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
1762          instr_code == Instruction::INVOKE_INTERFACE_RANGE)
1763        << "Unexpected call into interface trampoline: " << instr->DumpString(NULL);
1764    uint32_t dex_method_idx;
1765    if (instr_code == Instruction::INVOKE_INTERFACE) {
1766      dex_method_idx = instr->VRegB_35c();
1767    } else {
1768      DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
1769      dex_method_idx = instr->VRegB_3rc();
1770    }
1771
1772    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1773    uint32_t shorty_len;
1774    const char* shorty =
1775        dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
1776    {
1777      // Remember the args in case a GC happens in FindMethodFromCode.
1778      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1779      RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
1780      visitor.VisitArguments();
1781      method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method,
1782                                                     self);
1783      visitor.FixupReferences();
1784    }
1785
1786    if (UNLIKELY(method == nullptr)) {
1787      CHECK(self->IsExceptionPending());
1788      return GetTwoWordFailureValue();  // Failure.
1789    }
1790  }
1791  const void* code = method->GetEntryPointFromQuickCompiledCode();
1792
1793  // When we return, the caller will branch to this address, so it had better not be 0!
1794  DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: "
1795      << method->GetDexFile()->GetLocation();
1796
1797  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
1798                                reinterpret_cast<uintptr_t>(method));
1799}
1800
1801}  // namespace art
1802