quick_trampoline_entrypoints.cc revision 700a402244a1a423da4f3ba8032459f4b65fa18f
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "callee_save_frame.h"
18#include "common_throws.h"
19#include "dex_file-inl.h"
20#include "dex_instruction-inl.h"
21#include "entrypoints/entrypoint_utils.h"
22#include "gc/accounting/card_table-inl.h"
23#include "interpreter/interpreter.h"
24#include "mirror/art_method-inl.h"
25#include "mirror/class-inl.h"
26#include "mirror/dex_cache-inl.h"
27#include "mirror/object-inl.h"
28#include "mirror/object_array-inl.h"
29#include "object_utils.h"
30#include "runtime.h"
31#include "scoped_thread_state_change.h"
32
33namespace art {
34
35// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
36class QuickArgumentVisitor {
37  // Number of bytes for each out register in the caller method's frame.
38  static constexpr size_t kBytesStackArgLocation = 4;
39#if defined(__arm__)
40  // The callee save frame is pointed to by SP.
41  // | argN       |  |
42  // | ...        |  |
43  // | arg4       |  |
44  // | arg3 spill |  |  Caller's frame
45  // | arg2 spill |  |
46  // | arg1 spill |  |
47  // | Method*    | ---
48  // | LR         |
49  // | ...        |    callee saves
50  // | R3         |    arg3
51  // | R2         |    arg2
52  // | R1         |    arg1
53  // | R0         |    padding
54  // | Method*    |  <- sp
55  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
56  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
57  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
58  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
59  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8;  // Offset of first GPR arg.
60  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44;  // Offset of return address.
61  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 48;  // Frame size.
62  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
63    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
64  }
65#elif defined(__aarch64__)
66  // The callee save frame is pointed to by SP.
67  // | argN       |  |
68  // | ...        |  |
69  // | arg4       |  |
70  // | arg3 spill |  |  Caller's frame
71  // | arg2 spill |  |
72  // | arg1 spill |  |
73  // | Method*    | ---
74  // | LR         |
75  // | X28        |
76  // |  :         |
77  // | X19        |
78  // | X7         |
79  // | :          |
80  // | X1         |
81  // | D15        |
82  // |  :         |
83  // | D0         |
84  // |            |    padding
85  // | Method*    |  <- sp
86  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
87  static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
88  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
89  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =16;  // Offset of first FPR arg.
90  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144;  // Offset of first GPR arg.
91  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296;  // Offset of return address.
92  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 304;  // Frame size.
93  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
94    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
95  }
96#elif defined(__mips__)
97  // The callee save frame is pointed to by SP.
98  // | argN       |  |
99  // | ...        |  |
100  // | arg4       |  |
101  // | arg3 spill |  |  Caller's frame
102  // | arg2 spill |  |
103  // | arg1 spill |  |
104  // | Method*    | ---
105  // | RA         |
106  // | ...        |    callee saves
107  // | A3         |    arg3
108  // | A2         |    arg2
109  // | A1         |    arg1
110  // | A0/Method* |  <- sp
111  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
112  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
113  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
114  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
115  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
116  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60;  // Offset of return address.
117  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 64;  // Frame size.
118  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
119    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
120  }
121#elif defined(__i386__)
122  // The callee save frame is pointed to by SP.
123  // | argN        |  |
124  // | ...         |  |
125  // | arg4        |  |
126  // | arg3 spill  |  |  Caller's frame
127  // | arg2 spill  |  |
128  // | arg1 spill  |  |
129  // | Method*     | ---
130  // | Return      |
131  // | EBP,ESI,EDI |    callee saves
132  // | EBX         |    arg3
133  // | EDX         |    arg2
134  // | ECX         |    arg1
135  // | EAX/Method* |  <- sp
136  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
137  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
138  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
139  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
140  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
141  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28;  // Offset of return address.
142  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 32;  // Frame size.
143  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
144    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
145  }
146#elif defined(__x86_64__)
147  // The callee save frame is pointed to by SP.
148  // | argN            |  |
149  // | ...             |  |
150  // | reg. arg spills |  |  Caller's frame
151  // | Method*         | ---
152  // | Return          |
153  // | R15             |    callee save
154  // | R14             |    callee save
155  // | R13             |    callee save
156  // | R12             |    callee save
157  // | R9              |    arg5
158  // | R8              |    arg4
159  // | RSI/R6          |    arg1
160  // | RBP/R5          |    callee save
161  // | RBX/R3          |    callee save
162  // | RDX/R2          |    arg2
163  // | RCX/R1          |    arg3
164  // | XMM7            |    float arg 8
165  // | XMM6            |    float arg 7
166  // | XMM5            |    float arg 6
167  // | XMM4            |    float arg 5
168  // | XMM3            |    float arg 4
169  // | XMM2            |    float arg 3
170  // | XMM1            |    float arg 2
171  // | XMM0            |    float arg 1
172  // | Padding         |
173  // | RDI/Method*     |  <- sp
174  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
175  static constexpr size_t kNumQuickGprArgs = 5;  // 3 arguments passed in GPRs.
176  static constexpr size_t kNumQuickFprArgs = 8;  // 0 arguments passed in FPRs.
177  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
178  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80;  // Offset of first GPR arg.
179  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168;  // Offset of return address.
180  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 176;  // Frame size.
181  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
182    switch (gpr_index) {
183      case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
184      case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
185      case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
186      case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
187      case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
188      default:
189        LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
190        return 0;
191    }
192  }
193#else
194#error "Unsupported architecture"
195#endif
196
197 public:
198  static mirror::ArtMethod* GetCallingMethod(mirror::ArtMethod** sp)
199      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
200    DCHECK((*sp)->IsCalleeSaveMethod());
201    byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
202    return *reinterpret_cast<mirror::ArtMethod**>(previous_sp);
203  }
204
205  // For the given quick ref and args quick frame, return the caller's PC.
206  static uintptr_t GetCallingPc(mirror::ArtMethod** sp)
207      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
208    DCHECK((*sp)->IsCalleeSaveMethod());
209    byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
210    return *reinterpret_cast<uintptr_t*>(lr);
211  }
212
213  QuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static,
214                       const char* shorty, uint32_t shorty_len)
215      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
216      is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
217      gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
218      fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
219      stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
220                  + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
221      gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid),
222      is_split_long_or_double_(false) {
223    DCHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize,
224              Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
225  }
226
227  virtual ~QuickArgumentVisitor() {}
228
229  virtual void Visit() = 0;
230
231  Primitive::Type GetParamPrimitiveType() const {
232    return cur_type_;
233  }
234
235  byte* GetParamAddress() const {
236    if (!kQuickSoftFloatAbi) {
237      Primitive::Type type = GetParamPrimitiveType();
238      if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
239        if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
240          return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
241        }
242        return stack_args_ + (stack_index_ * kBytesStackArgLocation);
243      }
244    }
245    if (gpr_index_ < kNumQuickGprArgs) {
246      return gpr_args_ + GprIndexToGprOffset(gpr_index_);
247    }
248    return stack_args_ + (stack_index_ * kBytesStackArgLocation);
249  }
250
251  bool IsSplitLongOrDouble() const {
252    if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
253      return is_split_long_or_double_;
254    } else {
255      return false;  // An optimization for when GPR and FPRs are 64bit.
256    }
257  }
258
259  bool IsParamAReference() const {
260    return GetParamPrimitiveType() == Primitive::kPrimNot;
261  }
262
263  bool IsParamALongOrDouble() const {
264    Primitive::Type type = GetParamPrimitiveType();
265    return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
266  }
267
268  uint64_t ReadSplitLongParam() const {
269    DCHECK(IsSplitLongOrDouble());
270    uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
271    uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
272    return (low_half & 0xffffffffULL) | (high_half << 32);
273  }
274
275  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
276    // This implementation doesn't support reg-spill area for hard float
277    // ABI targets such as x86_64 and aarch64. So, for those targets whose
278    // 'kQuickSoftFloatAbi' is 'false':
279    //     (a) 'stack_args_' should point to the first method's argument
280    //     (b) whatever the argument type it is, the 'stack_index_' should
281    //         be moved forward along with every visiting.
282    gpr_index_ = 0;
283    fpr_index_ = 0;
284    stack_index_ = 0;
285    if (!is_static_) {  // Handle this.
286      cur_type_ = Primitive::kPrimNot;
287      is_split_long_or_double_ = false;
288      Visit();
289      if (!kQuickSoftFloatAbi || kNumQuickGprArgs == 0) {
290        stack_index_++;
291      }
292      if (kNumQuickGprArgs > 0) {
293        gpr_index_++;
294      }
295    }
296    for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
297      cur_type_ = Primitive::GetType(shorty_[shorty_index]);
298      switch (cur_type_) {
299        case Primitive::kPrimNot:
300        case Primitive::kPrimBoolean:
301        case Primitive::kPrimByte:
302        case Primitive::kPrimChar:
303        case Primitive::kPrimShort:
304        case Primitive::kPrimInt:
305          is_split_long_or_double_ = false;
306          Visit();
307          if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
308            stack_index_++;
309          }
310          if (gpr_index_ < kNumQuickGprArgs) {
311            gpr_index_++;
312          }
313          break;
314        case Primitive::kPrimFloat:
315          is_split_long_or_double_ = false;
316          Visit();
317          if (kQuickSoftFloatAbi) {
318            if (gpr_index_ < kNumQuickGprArgs) {
319              gpr_index_++;
320            } else {
321              stack_index_++;
322            }
323          } else {
324            if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
325              fpr_index_++;
326            }
327            stack_index_++;
328          }
329          break;
330        case Primitive::kPrimDouble:
331        case Primitive::kPrimLong:
332          if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
333            is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
334                ((gpr_index_ + 1) == kNumQuickGprArgs);
335            Visit();
336            if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
337              if (kBytesStackArgLocation == 4) {
338                stack_index_+= 2;
339              } else {
340                CHECK_EQ(kBytesStackArgLocation, 8U);
341                stack_index_++;
342              }
343            }
344            if (gpr_index_ < kNumQuickGprArgs) {
345              gpr_index_++;
346              if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
347                if (gpr_index_ < kNumQuickGprArgs) {
348                  gpr_index_++;
349                } else if (kQuickSoftFloatAbi) {
350                  stack_index_++;
351                }
352              }
353            }
354          } else {
355            is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
356                ((fpr_index_ + 1) == kNumQuickFprArgs);
357            Visit();
358            if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
359              fpr_index_++;
360              if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
361                if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
362                  fpr_index_++;
363                }
364              }
365            }
366            if (kBytesStackArgLocation == 4) {
367              stack_index_+= 2;
368            } else {
369              CHECK_EQ(kBytesStackArgLocation, 8U);
370              stack_index_++;
371            }
372          }
373          break;
374        default:
375          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
376      }
377    }
378  }
379
380 private:
381  static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty,
382                                             uint32_t shorty_len) {
383    if (kQuickSoftFloatAbi) {
384      CHECK_EQ(kNumQuickFprArgs, 0U);
385      return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA))
386          + GetBytesPerGprSpillLocation(kRuntimeISA) /* ArtMethod* */;
387    } else {
388      // For now, there is no reg-spill area for the targets with
389      // hard float ABI. So, the offset pointing to the first method's
390      // parameter ('this' for non-static methods) should be returned.
391      return GetBytesPerGprSpillLocation(kRuntimeISA);  // Skip Method*.
392    }
393  }
394
395  const bool is_static_;
396  const char* const shorty_;
397  const uint32_t shorty_len_;
398  byte* const gpr_args_;  // Address of GPR arguments in callee save frame.
399  byte* const fpr_args_;  // Address of FPR arguments in callee save frame.
400  byte* const stack_args_;  // Address of stack arguments in caller's frame.
401  uint32_t gpr_index_;  // Index into spilled GPRs.
402  uint32_t fpr_index_;  // Index into spilled FPRs.
403  uint32_t stack_index_;  // Index into arguments on the stack.
404  // The current type of argument during VisitArguments.
405  Primitive::Type cur_type_;
406  // Does a 64bit parameter straddle the register and stack arguments?
407  bool is_split_long_or_double_;
408};
409
410// Visits arguments on the stack placing them into the shadow frame.
411class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
412 public:
413  BuildQuickShadowFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
414                               uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
415    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
416
417  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
418
419 private:
420  ShadowFrame* const sf_;
421  uint32_t cur_reg_;
422
423  DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
424};
425
426void BuildQuickShadowFrameVisitor::Visit()  {
427  Primitive::Type type = GetParamPrimitiveType();
428  switch (type) {
429    case Primitive::kPrimLong:  // Fall-through.
430    case Primitive::kPrimDouble:
431      if (IsSplitLongOrDouble()) {
432        sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
433      } else {
434        sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
435      }
436      ++cur_reg_;
437      break;
438    case Primitive::kPrimNot: {
439        StackReference<mirror::Object>* stack_ref =
440            reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
441        sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
442      }
443      break;
444    case Primitive::kPrimBoolean:  // Fall-through.
445    case Primitive::kPrimByte:     // Fall-through.
446    case Primitive::kPrimChar:     // Fall-through.
447    case Primitive::kPrimShort:    // Fall-through.
448    case Primitive::kPrimInt:      // Fall-through.
449    case Primitive::kPrimFloat:
450      sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
451      break;
452    case Primitive::kPrimVoid:
453      LOG(FATAL) << "UNREACHABLE";
454      break;
455  }
456  ++cur_reg_;
457}
458
459extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
460                                                mirror::ArtMethod** sp)
461    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
462  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
463  // frame.
464  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
465
466  if (method->IsAbstract()) {
467    ThrowAbstractMethodError(method);
468    return 0;
469  } else {
470    DCHECK(!method->IsNative()) << PrettyMethod(method);
471    const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame");
472    MethodHelper mh(method);
473    const DexFile::CodeItem* code_item = mh.GetCodeItem();
474    DCHECK(code_item != nullptr) << PrettyMethod(method);
475    uint16_t num_regs = code_item->registers_size_;
476    void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
477    ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL,  // No last shadow coming from quick.
478                                                  method, 0, memory));
479    size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
480    BuildQuickShadowFrameVisitor shadow_frame_builder(sp, mh.IsStatic(), mh.GetShorty(),
481                                                      mh.GetShortyLength(),
482                                                      shadow_frame, first_arg_reg);
483    shadow_frame_builder.VisitArguments();
484    // Push a transition back into managed code onto the linked list in thread.
485    ManagedStack fragment;
486    self->PushManagedStackFragment(&fragment);
487    self->PushShadowFrame(shadow_frame);
488    self->EndAssertNoThreadSuspension(old_cause);
489
490    if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
491      // Ensure static method's class is initialized.
492      StackHandleScope<1> hs(self);
493      Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
494      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
495        DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
496        self->PopManagedStackFragment(fragment);
497        return 0;
498      }
499    }
500
501    JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
502    // Pop transition.
503    self->PopManagedStackFragment(fragment);
504    // No need to restore the args since the method has already been run by the interpreter.
505    return result.GetJ();
506  }
507}
508
509// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
510// to jobjects.
511class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
512 public:
513  BuildQuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
514                            uint32_t shorty_len, ScopedObjectAccessUnchecked* soa,
515                            std::vector<jvalue>* args) :
516    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
517
518  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
519
520  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
521
522 private:
523  ScopedObjectAccessUnchecked* const soa_;
524  std::vector<jvalue>* const args_;
525  // References which we must update when exiting in case the GC moved the objects.
526  std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
527
528  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
529};
530
531void BuildQuickArgumentVisitor::Visit() {
532  jvalue val;
533  Primitive::Type type = GetParamPrimitiveType();
534  switch (type) {
535    case Primitive::kPrimNot: {
536      StackReference<mirror::Object>* stack_ref =
537          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
538      val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
539      references_.push_back(std::make_pair(val.l, stack_ref));
540      break;
541    }
542    case Primitive::kPrimLong:  // Fall-through.
543    case Primitive::kPrimDouble:
544      if (IsSplitLongOrDouble()) {
545        val.j = ReadSplitLongParam();
546      } else {
547        val.j = *reinterpret_cast<jlong*>(GetParamAddress());
548      }
549      break;
550    case Primitive::kPrimBoolean:  // Fall-through.
551    case Primitive::kPrimByte:     // Fall-through.
552    case Primitive::kPrimChar:     // Fall-through.
553    case Primitive::kPrimShort:    // Fall-through.
554    case Primitive::kPrimInt:      // Fall-through.
555    case Primitive::kPrimFloat:
556      val.i = *reinterpret_cast<jint*>(GetParamAddress());
557      break;
558    case Primitive::kPrimVoid:
559      LOG(FATAL) << "UNREACHABLE";
560      val.j = 0;
561      break;
562  }
563  args_->push_back(val);
564}
565
566void BuildQuickArgumentVisitor::FixupReferences() {
567  // Fixup any references which may have changed.
568  for (const auto& pair : references_) {
569    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
570    soa_->Env()->DeleteLocalRef(pair.first);
571  }
572}
573
574// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
575// which is responsible for recording callee save registers. We explicitly place into jobjects the
576// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
577// field within the proxy object, which will box the primitive arguments and deal with error cases.
578extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
579                                               mirror::Object* receiver,
580                                               Thread* self, mirror::ArtMethod** sp)
581    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
582  DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
583  DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
584  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
585  const char* old_cause =
586      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
587  // Register the top of the managed stack, making stack crawlable.
588  DCHECK_EQ(*sp, proxy_method) << PrettyMethod(proxy_method);
589  self->SetTopOfStack(sp, 0);
590  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
591            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
592      << PrettyMethod(proxy_method);
593  self->VerifyStack();
594  // Start new JNI local reference state.
595  JNIEnvExt* env = self->GetJniEnv();
596  ScopedObjectAccessUnchecked soa(env);
597  ScopedJniEnvLocalRefState env_state(env);
598  // Create local ref. copies of proxy method and the receiver.
599  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
600
601  // Placing arguments into args vector and remove the receiver.
602  MethodHelper proxy_mh(proxy_method);
603  DCHECK(!proxy_mh.IsStatic()) << PrettyMethod(proxy_method);
604  std::vector<jvalue> args;
605  BuildQuickArgumentVisitor local_ref_visitor(sp, proxy_mh.IsStatic(), proxy_mh.GetShorty(),
606                                              proxy_mh.GetShortyLength(), &soa, &args);
607
608  local_ref_visitor.VisitArguments();
609  DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method);
610  args.erase(args.begin());
611
612  // Convert proxy method into expected interface method.
613  mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
614  DCHECK(interface_method != NULL) << PrettyMethod(proxy_method);
615  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
616  jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
617
618  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
619  // that performs allocations.
620  self->EndAssertNoThreadSuspension(old_cause);
621  JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
622                                               rcvr_jobj, interface_method_jobj, args);
623  // Restore references which might have moved.
624  local_ref_visitor.FixupReferences();
625  return result.GetJ();
626}
627
628// Read object references held in arguments from quick frames and place in a JNI local references,
629// so they don't get garbage collected.
630class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
631 public:
632  RememberForGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
633                               uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
634    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
635
636  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
637
638  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
639
640 private:
641  ScopedObjectAccessUnchecked* const soa_;
642  // References which we must update when exiting in case the GC moved the objects.
643  std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
644  DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
645};
646
647void RememberForGcArgumentVisitor::Visit() {
648  if (IsParamAReference()) {
649    StackReference<mirror::Object>* stack_ref =
650        reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
651    jobject reference =
652        soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
653    references_.push_back(std::make_pair(reference, stack_ref));
654  }
655}
656
657void RememberForGcArgumentVisitor::FixupReferences() {
658  // Fixup any references which may have changed.
659  for (const auto& pair : references_) {
660    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
661    soa_->Env()->DeleteLocalRef(pair.first);
662  }
663}
664
665
666// Lazily resolve a method for quick. Called by stub code.
667extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
668                                                    mirror::Object* receiver,
669                                                    Thread* self, mirror::ArtMethod** sp)
670    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
671  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
672  // Start new JNI local reference state
673  JNIEnvExt* env = self->GetJniEnv();
674  ScopedObjectAccessUnchecked soa(env);
675  ScopedJniEnvLocalRefState env_state(env);
676  const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
677
678  // Compute details about the called method (avoid GCs)
679  ClassLinker* linker = Runtime::Current()->GetClassLinker();
680  mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
681  InvokeType invoke_type;
682  const DexFile* dex_file;
683  uint32_t dex_method_idx;
684  if (called->IsRuntimeMethod()) {
685    uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
686    const DexFile::CodeItem* code;
687    {
688      MethodHelper mh(caller);
689      dex_file = &mh.GetDexFile();
690      code = mh.GetCodeItem();
691    }
692    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
693    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
694    Instruction::Code instr_code = instr->Opcode();
695    bool is_range;
696    switch (instr_code) {
697      case Instruction::INVOKE_DIRECT:
698        invoke_type = kDirect;
699        is_range = false;
700        break;
701      case Instruction::INVOKE_DIRECT_RANGE:
702        invoke_type = kDirect;
703        is_range = true;
704        break;
705      case Instruction::INVOKE_STATIC:
706        invoke_type = kStatic;
707        is_range = false;
708        break;
709      case Instruction::INVOKE_STATIC_RANGE:
710        invoke_type = kStatic;
711        is_range = true;
712        break;
713      case Instruction::INVOKE_SUPER:
714        invoke_type = kSuper;
715        is_range = false;
716        break;
717      case Instruction::INVOKE_SUPER_RANGE:
718        invoke_type = kSuper;
719        is_range = true;
720        break;
721      case Instruction::INVOKE_VIRTUAL:
722        invoke_type = kVirtual;
723        is_range = false;
724        break;
725      case Instruction::INVOKE_VIRTUAL_RANGE:
726        invoke_type = kVirtual;
727        is_range = true;
728        break;
729      case Instruction::INVOKE_INTERFACE:
730        invoke_type = kInterface;
731        is_range = false;
732        break;
733      case Instruction::INVOKE_INTERFACE_RANGE:
734        invoke_type = kInterface;
735        is_range = true;
736        break;
737      default:
738        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
739        // Avoid used uninitialized warnings.
740        invoke_type = kDirect;
741        is_range = false;
742    }
743    dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
744
745  } else {
746    invoke_type = kStatic;
747    dex_file = &MethodHelper(called).GetDexFile();
748    dex_method_idx = called->GetDexMethodIndex();
749  }
750  uint32_t shorty_len;
751  const char* shorty =
752      dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
753  RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
754  visitor.VisitArguments();
755  self->EndAssertNoThreadSuspension(old_cause);
756  bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
757  // Resolve method filling in dex cache.
758  if (called->IsRuntimeMethod()) {
759    StackHandleScope<1> hs(self);
760    Handle<mirror::Object> handle_scope_receiver(hs.NewHandle(virtual_or_interface ? receiver : nullptr));
761    called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
762    receiver = handle_scope_receiver.Get();
763  }
764  const void* code = NULL;
765  if (LIKELY(!self->IsExceptionPending())) {
766    // Incompatible class change should have been handled in resolve method.
767    CHECK(!called->CheckIncompatibleClassChange(invoke_type))
768        << PrettyMethod(called) << " " << invoke_type;
769    if (virtual_or_interface) {
770      // Refine called method based on receiver.
771      CHECK(receiver != nullptr) << invoke_type;
772
773      mirror::ArtMethod* orig_called = called;
774      if (invoke_type == kVirtual) {
775        called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
776      } else {
777        called = receiver->GetClass()->FindVirtualMethodForInterface(called);
778      }
779
780      CHECK(called != nullptr) << PrettyMethod(orig_called) << " "
781                               << PrettyTypeOf(receiver) << " "
782                               << invoke_type << " " << orig_called->GetVtableIndex();
783
784      // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
785      // of the sharpened method.
786      if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) {
787        caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called);
788      } else {
789        // Calling from one dex file to another, need to compute the method index appropriate to
790        // the caller's dex file. Since we get here only if the original called was a runtime
791        // method, we've got the correct dex_file and a dex_method_idx from above.
792        DCHECK(&MethodHelper(caller).GetDexFile() == dex_file);
793        uint32_t method_index =
794            MethodHelper(called).FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx);
795        if (method_index != DexFile::kDexNoIndex) {
796          caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called);
797        }
798      }
799    }
800    // Ensure that the called method's class is initialized.
801    StackHandleScope<1> hs(soa.Self());
802    Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
803    linker->EnsureInitialized(called_class, true, true);
804    if (LIKELY(called_class->IsInitialized())) {
805      code = called->GetEntryPointFromQuickCompiledCode();
806    } else if (called_class->IsInitializing()) {
807      if (invoke_type == kStatic) {
808        // Class is still initializing, go to oat and grab code (trampoline must be left in place
809        // until class is initialized to stop races between threads).
810        code = linker->GetQuickOatCodeFor(called);
811      } else {
812        // No trampoline for non-static methods.
813        code = called->GetEntryPointFromQuickCompiledCode();
814      }
815    } else {
816      DCHECK(called_class->IsErroneous());
817    }
818  }
819  CHECK_EQ(code == NULL, self->IsExceptionPending());
820  // Fixup any locally saved objects may have moved during a GC.
821  visitor.FixupReferences();
822  // Place called method in callee-save frame to be placed as first argument to quick method.
823  *sp = called;
824  return code;
825}
826
827
828
829/*
830 * This class uses a couple of observations to unite the different calling conventions through
831 * a few constants.
832 *
833 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
834 *    possible alignment.
835 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
836 *    types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
837 *    when we have to split things
838 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
839 *    and we can use Int handling directly.
840 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
841 *    necessary when widening. Also, widening of Ints will take place implicitly, and the
842 *    extension should be compatible with Aarch64, which mandates copying the available bits
843 *    into LSB and leaving the rest unspecified.
844 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
845 *    the stack.
846 * 6) There is only little endian.
847 *
848 *
849 * Actual work is supposed to be done in a delegate of the template type. The interface is as
850 * follows:
851 *
852 * void PushGpr(uintptr_t):   Add a value for the next GPR
853 *
854 * void PushFpr4(float):      Add a value for the next FPR of size 32b. Is only called if we need
855 *                            padding, that is, think the architecture is 32b and aligns 64b.
856 *
857 * void PushFpr8(uint64_t):   Push a double. We _will_ call this on 32b, it's the callee's job to
858 *                            split this if necessary. The current state will have aligned, if
859 *                            necessary.
860 *
861 * void PushStack(uintptr_t): Push a value to the stack.
862 *
863 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
864 *                                          as this might be important for null initialization.
865 *                                          Must return the jobject, that is, the reference to the
866 *                                          entry in the HandleScope (nullptr if necessary).
867 *
868 */
869template <class T> class BuildGenericJniFrameStateMachine {
870 public:
871#if defined(__arm__)
872  // TODO: These are all dummy values!
873  static constexpr bool kNativeSoftFloatAbi = true;
874  static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs, r0-r3
875  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
876
877  static constexpr size_t kRegistersNeededForLong = 2;
878  static constexpr size_t kRegistersNeededForDouble = 2;
879  static constexpr bool kMultiRegistersAligned = true;
880  static constexpr bool kMultiRegistersWidened = false;
881  static constexpr bool kAlignLongOnStack = true;
882  static constexpr bool kAlignDoubleOnStack = true;
883#elif defined(__aarch64__)
884  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
885  static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
886  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
887
888  static constexpr size_t kRegistersNeededForLong = 1;
889  static constexpr size_t kRegistersNeededForDouble = 1;
890  static constexpr bool kMultiRegistersAligned = false;
891  static constexpr bool kMultiRegistersWidened = false;
892  static constexpr bool kAlignLongOnStack = false;
893  static constexpr bool kAlignDoubleOnStack = false;
894#elif defined(__mips__)
895  // TODO: These are all dummy values!
896  static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
897  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
898  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
899
900  static constexpr size_t kRegistersNeededForLong = 2;
901  static constexpr size_t kRegistersNeededForDouble = 2;
902  static constexpr bool kMultiRegistersAligned = true;
903  static constexpr bool kMultiRegistersWidened = true;
904  static constexpr bool kAlignLongOnStack = false;
905  static constexpr bool kAlignDoubleOnStack = false;
906#elif defined(__i386__)
907  // TODO: Check these!
908  static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
909  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
910  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
911
912  static constexpr size_t kRegistersNeededForLong = 2;
913  static constexpr size_t kRegistersNeededForDouble = 2;
914  static constexpr bool kMultiRegistersAligned = false;       // x86 not using regs, anyways
915  static constexpr bool kMultiRegistersWidened = false;
916  static constexpr bool kAlignLongOnStack = false;
917  static constexpr bool kAlignDoubleOnStack = false;
918#elif defined(__x86_64__)
919  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
920  static constexpr size_t kNumNativeGprArgs = 6;  // 6 arguments passed in GPRs.
921  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
922
923  static constexpr size_t kRegistersNeededForLong = 1;
924  static constexpr size_t kRegistersNeededForDouble = 1;
925  static constexpr bool kMultiRegistersAligned = false;
926  static constexpr bool kMultiRegistersWidened = false;
927  static constexpr bool kAlignLongOnStack = false;
928  static constexpr bool kAlignDoubleOnStack = false;
929#else
930#error "Unsupported architecture"
931#endif
932
933 public:
934  explicit BuildGenericJniFrameStateMachine(T* delegate) : gpr_index_(kNumNativeGprArgs),
935                                                           fpr_index_(kNumNativeFprArgs),
936                                                           stack_entries_(0),
937                                                           delegate_(delegate) {
938    // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
939    // the next register is even; counting down is just to make the compiler happy...
940    CHECK_EQ(kNumNativeGprArgs % 2, 0U);
941    CHECK_EQ(kNumNativeFprArgs % 2, 0U);
942  }
943
944  virtual ~BuildGenericJniFrameStateMachine() {}
945
946  bool HavePointerGpr() {
947    return gpr_index_ > 0;
948  }
949
950  void AdvancePointer(void* val) {
951    if (HavePointerGpr()) {
952      gpr_index_--;
953      PushGpr(reinterpret_cast<uintptr_t>(val));
954    } else {
955      stack_entries_++;         // TODO: have a field for pointer length as multiple of 32b
956      PushStack(reinterpret_cast<uintptr_t>(val));
957      gpr_index_ = 0;
958    }
959  }
960
961
962  bool HaveHandleScopeGpr() {
963    return gpr_index_ > 0;
964  }
965
966  void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
967    uintptr_t handle = PushHandle(ptr);
968    if (HaveHandleScopeGpr()) {
969      gpr_index_--;
970      PushGpr(handle);
971    } else {
972      stack_entries_++;
973      PushStack(handle);
974      gpr_index_ = 0;
975    }
976  }
977
978
979  bool HaveIntGpr() {
980    return gpr_index_ > 0;
981  }
982
983  void AdvanceInt(uint32_t val) {
984    if (HaveIntGpr()) {
985      gpr_index_--;
986      PushGpr(val);
987    } else {
988      stack_entries_++;
989      PushStack(val);
990      gpr_index_ = 0;
991    }
992  }
993
994
995  bool HaveLongGpr() {
996    return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
997  }
998
999  bool LongGprNeedsPadding() {
1000    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1001        kAlignLongOnStack &&                  // and when it needs alignment
1002        (gpr_index_ & 1) == 1;                // counter is odd, see constructor
1003  }
1004
1005  bool LongStackNeedsPadding() {
1006    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1007        kAlignLongOnStack &&                  // and when it needs 8B alignment
1008        (stack_entries_ & 1) == 1;            // counter is odd
1009  }
1010
1011  void AdvanceLong(uint64_t val) {
1012    if (HaveLongGpr()) {
1013      if (LongGprNeedsPadding()) {
1014        PushGpr(0);
1015        gpr_index_--;
1016      }
1017      if (kRegistersNeededForLong == 1) {
1018        PushGpr(static_cast<uintptr_t>(val));
1019      } else {
1020        PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1021        PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1022      }
1023      gpr_index_ -= kRegistersNeededForLong;
1024    } else {
1025      if (LongStackNeedsPadding()) {
1026        PushStack(0);
1027        stack_entries_++;
1028      }
1029      if (kRegistersNeededForLong == 1) {
1030        PushStack(static_cast<uintptr_t>(val));
1031        stack_entries_++;
1032      } else {
1033        PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1034        PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1035        stack_entries_ += 2;
1036      }
1037      gpr_index_ = 0;
1038    }
1039  }
1040
1041
1042  bool HaveFloatFpr() {
1043    return fpr_index_ > 0;
1044  }
1045
1046  template <typename U, typename V> V convert(U in) {
1047    CHECK_LE(sizeof(U), sizeof(V));
1048    union { U u; V v; } tmp;
1049    tmp.u = in;
1050    return tmp.v;
1051  }
1052
1053  void AdvanceFloat(float val) {
1054    if (kNativeSoftFloatAbi) {
1055      AdvanceInt(convert<float, uint32_t>(val));
1056    } else {
1057      if (HaveFloatFpr()) {
1058        fpr_index_--;
1059        if (kRegistersNeededForDouble == 1) {
1060          if (kMultiRegistersWidened) {
1061            PushFpr8(convert<double, uint64_t>(val));
1062          } else {
1063            // No widening, just use the bits.
1064            PushFpr8(convert<float, uint64_t>(val));
1065          }
1066        } else {
1067          PushFpr4(val);
1068        }
1069      } else {
1070        stack_entries_++;
1071        if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) {
1072          // Need to widen before storing: Note the "double" in the template instantiation.
1073          PushStack(convert<double, uintptr_t>(val));
1074        } else {
1075          PushStack(convert<float, uintptr_t>(val));
1076        }
1077        fpr_index_ = 0;
1078      }
1079    }
1080  }
1081
1082
1083  bool HaveDoubleFpr() {
1084    return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1085  }
1086
1087  bool DoubleFprNeedsPadding() {
1088    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1089        kAlignDoubleOnStack &&                  // and when it needs alignment
1090        (fpr_index_ & 1) == 1;                  // counter is odd, see constructor
1091  }
1092
1093  bool DoubleStackNeedsPadding() {
1094    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1095        kAlignDoubleOnStack &&                  // and when it needs 8B alignment
1096        (stack_entries_ & 1) == 1;              // counter is odd
1097  }
1098
1099  void AdvanceDouble(uint64_t val) {
1100    if (kNativeSoftFloatAbi) {
1101      AdvanceLong(val);
1102    } else {
1103      if (HaveDoubleFpr()) {
1104        if (DoubleFprNeedsPadding()) {
1105          PushFpr4(0);
1106          fpr_index_--;
1107        }
1108        PushFpr8(val);
1109        fpr_index_ -= kRegistersNeededForDouble;
1110      } else {
1111        if (DoubleStackNeedsPadding()) {
1112          PushStack(0);
1113          stack_entries_++;
1114        }
1115        if (kRegistersNeededForDouble == 1) {
1116          PushStack(static_cast<uintptr_t>(val));
1117          stack_entries_++;
1118        } else {
1119          PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1120          PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1121          stack_entries_ += 2;
1122        }
1123        fpr_index_ = 0;
1124      }
1125    }
1126  }
1127
1128  uint32_t getStackEntries() {
1129    return stack_entries_;
1130  }
1131
1132  uint32_t getNumberOfUsedGprs() {
1133    return kNumNativeGprArgs - gpr_index_;
1134  }
1135
1136  uint32_t getNumberOfUsedFprs() {
1137    return kNumNativeFprArgs - fpr_index_;
1138  }
1139
1140 private:
1141  void PushGpr(uintptr_t val) {
1142    delegate_->PushGpr(val);
1143  }
1144  void PushFpr4(float val) {
1145    delegate_->PushFpr4(val);
1146  }
1147  void PushFpr8(uint64_t val) {
1148    delegate_->PushFpr8(val);
1149  }
1150  void PushStack(uintptr_t val) {
1151    delegate_->PushStack(val);
1152  }
1153  uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1154    return delegate_->PushHandle(ref);
1155  }
1156
1157  uint32_t gpr_index_;      // Number of free GPRs
1158  uint32_t fpr_index_;      // Number of free FPRs
1159  uint32_t stack_entries_;  // Stack entries are in multiples of 32b, as floats are usually not
1160                            // extended
1161  T* delegate_;             // What Push implementation gets called
1162};
1163
1164class ComputeGenericJniFrameSize FINAL {
1165 public:
1166  ComputeGenericJniFrameSize() : num_handle_scope_references_(0), num_stack_entries_(0) {}
1167
1168  uint32_t GetStackSize() {
1169    return num_stack_entries_ * sizeof(uintptr_t);
1170  }
1171
1172  // WARNING: After this, *sp won't be pointing to the method anymore!
1173  void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
1174                     void* sp, HandleScope** table, uint32_t* handle_scope_entries,
1175                     uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
1176                     void** code_return, size_t* overall_size)
1177      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1178    ComputeAll(is_static, shorty, shorty_len);
1179
1180    mirror::ArtMethod* method = **m;
1181
1182    uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
1183
1184    // First, fix up the layout of the callee-save frame.
1185    // We have to squeeze in the HandleScope, and relocate the method pointer.
1186
1187    // "Free" the slot for the method.
1188    sp8 += kPointerSize;
1189
1190    // Add the HandleScope.
1191    *handle_scope_entries = num_handle_scope_references_;
1192    size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSize(num_handle_scope_references_);
1193    sp8 -= handle_scope_size;
1194    *table = reinterpret_cast<HandleScope*>(sp8);
1195    (*table)->SetNumberOfReferences(num_handle_scope_references_);
1196
1197    // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
1198    sp8 -= kPointerSize;
1199    uint8_t* method_pointer = sp8;
1200    *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method;
1201    *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer);
1202
1203    // Reference cookie and padding
1204    sp8 -= 8;
1205    // Store HandleScope size
1206    *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(handle_scope_size & 0xFFFFFFFF);
1207
1208    // Next comes the native call stack.
1209    sp8 -= GetStackSize();
1210    // Now align the call stack below. This aligns by 16, as AArch64 seems to require.
1211    uintptr_t mask = ~0x0F;
1212    sp8 = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(sp8) & mask);
1213    *start_stack = reinterpret_cast<uintptr_t*>(sp8);
1214
1215    // put fprs and gprs below
1216    // Assumption is OK right now, as we have soft-float arm
1217    size_t fregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeFprArgs;
1218    sp8 -= fregs * sizeof(uintptr_t);
1219    *start_fpr = reinterpret_cast<uint32_t*>(sp8);
1220    size_t iregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeGprArgs;
1221    sp8 -= iregs * sizeof(uintptr_t);
1222    *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
1223
1224    // reserve space for the code pointer
1225    sp8 -= kPointerSize;
1226    *code_return = reinterpret_cast<void*>(sp8);
1227
1228    *overall_size = reinterpret_cast<uint8_t*>(sp) - sp8;
1229
1230    // The new SP is stored at the end of the alloca, so it can be immediately popped
1231    sp8 = reinterpret_cast<uint8_t*>(sp) - 5 * KB;
1232    *(reinterpret_cast<uint8_t**>(sp8)) = method_pointer;
1233  }
1234
1235  void ComputeHandleScopeOffset() { }  // nothing to do, static right now
1236
1237  void ComputeAll(bool is_static, const char* shorty, uint32_t shorty_len)
1238      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1239    BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize> sm(this);
1240
1241    // JNIEnv
1242    sm.AdvancePointer(nullptr);
1243
1244    // Class object or this as first argument
1245    sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
1246
1247    for (uint32_t i = 1; i < shorty_len; ++i) {
1248      Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
1249      switch (cur_type_) {
1250        case Primitive::kPrimNot:
1251          sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
1252          break;
1253
1254        case Primitive::kPrimBoolean:
1255        case Primitive::kPrimByte:
1256        case Primitive::kPrimChar:
1257        case Primitive::kPrimShort:
1258        case Primitive::kPrimInt:
1259          sm.AdvanceInt(0);
1260          break;
1261        case Primitive::kPrimFloat:
1262          sm.AdvanceFloat(0);
1263          break;
1264        case Primitive::kPrimDouble:
1265          sm.AdvanceDouble(0);
1266          break;
1267        case Primitive::kPrimLong:
1268          sm.AdvanceLong(0);
1269          break;
1270        default:
1271          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1272      }
1273    }
1274
1275    num_stack_entries_ = sm.getStackEntries();
1276  }
1277
1278  void PushGpr(uintptr_t /* val */) {
1279    // not optimizing registers, yet
1280  }
1281
1282  void PushFpr4(float /* val */) {
1283    // not optimizing registers, yet
1284  }
1285
1286  void PushFpr8(uint64_t /* val */) {
1287    // not optimizing registers, yet
1288  }
1289
1290  void PushStack(uintptr_t /* val */) {
1291    // counting is already done in the superclass
1292  }
1293
1294  uintptr_t PushHandle(mirror::Object* /* ptr */) {
1295    num_handle_scope_references_++;
1296    return reinterpret_cast<uintptr_t>(nullptr);
1297  }
1298
1299 private:
1300  uint32_t num_handle_scope_references_;
1301  uint32_t num_stack_entries_;
1302};
1303
1304// Visits arguments on the stack placing them into a region lower down the stack for the benefit
1305// of transitioning into native code.
1306class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
1307 public:
1308  BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty,
1309                              uint32_t shorty_len, Thread* self) :
1310      QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
1311    ComputeGenericJniFrameSize fsc;
1312    fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &handle_scope_, &handle_scope_expected_refs_,
1313                      &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_,
1314                      &alloca_used_size_);
1315    handle_scope_number_of_references_ = 0;
1316    cur_hs_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstHandleScopeEntry());
1317
1318    // jni environment is always first argument
1319    sm_.AdvancePointer(self->GetJniEnv());
1320
1321    if (is_static) {
1322      sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
1323    }
1324  }
1325
1326  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
1327
1328  void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
1329
1330  jobject GetFirstHandleScopeEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1331    return handle_scope_->GetHandle(0).ToJObject();
1332  }
1333
1334  void PushGpr(uintptr_t val) {
1335    *cur_gpr_reg_ = val;
1336    cur_gpr_reg_++;
1337  }
1338
1339  void PushFpr4(float val) {
1340    *cur_fpr_reg_ = val;
1341    cur_fpr_reg_++;
1342  }
1343
1344  void PushFpr8(uint64_t val) {
1345    uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1346    *tmp = val;
1347    cur_fpr_reg_ += 2;
1348  }
1349
1350  void PushStack(uintptr_t val) {
1351    *cur_stack_arg_ = val;
1352    cur_stack_arg_++;
1353  }
1354
1355  uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1356    uintptr_t tmp;
1357    if (ref == nullptr) {
1358      *cur_hs_entry_ = StackReference<mirror::Object>();
1359      tmp = reinterpret_cast<uintptr_t>(nullptr);
1360    } else {
1361      *cur_hs_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
1362      tmp = reinterpret_cast<uintptr_t>(cur_hs_entry_);
1363    }
1364    cur_hs_entry_++;
1365    handle_scope_number_of_references_++;
1366    return tmp;
1367  }
1368
1369  // Size of the part of the alloca that we actually need.
1370  size_t GetAllocaUsedSize() {
1371    return alloca_used_size_;
1372  }
1373
1374  void* GetCodeReturn() {
1375    return code_return_;
1376  }
1377
1378 private:
1379  uint32_t handle_scope_number_of_references_;
1380  StackReference<mirror::Object>* cur_hs_entry_;
1381  HandleScope* handle_scope_;
1382  uint32_t handle_scope_expected_refs_;
1383  uintptr_t* cur_gpr_reg_;
1384  uint32_t* cur_fpr_reg_;
1385  uintptr_t* cur_stack_arg_;
1386  // StackReference<mirror::Object>* top_of_handle_scope_;
1387  void* code_return_;
1388  size_t alloca_used_size_;
1389
1390  BuildGenericJniFrameStateMachine<BuildGenericJniFrameVisitor> sm_;
1391
1392  DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1393};
1394
1395void BuildGenericJniFrameVisitor::Visit() {
1396  Primitive::Type type = GetParamPrimitiveType();
1397  switch (type) {
1398    case Primitive::kPrimLong: {
1399      jlong long_arg;
1400      if (IsSplitLongOrDouble()) {
1401        long_arg = ReadSplitLongParam();
1402      } else {
1403        long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
1404      }
1405      sm_.AdvanceLong(long_arg);
1406      break;
1407    }
1408    case Primitive::kPrimDouble: {
1409      uint64_t double_arg;
1410      if (IsSplitLongOrDouble()) {
1411        // Read into union so that we don't case to a double.
1412        double_arg = ReadSplitLongParam();
1413      } else {
1414        double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
1415      }
1416      sm_.AdvanceDouble(double_arg);
1417      break;
1418    }
1419    case Primitive::kPrimNot: {
1420      StackReference<mirror::Object>* stack_ref =
1421          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1422      sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
1423      break;
1424    }
1425    case Primitive::kPrimFloat:
1426      sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
1427      break;
1428    case Primitive::kPrimBoolean:  // Fall-through.
1429    case Primitive::kPrimByte:     // Fall-through.
1430    case Primitive::kPrimChar:     // Fall-through.
1431    case Primitive::kPrimShort:    // Fall-through.
1432    case Primitive::kPrimInt:      // Fall-through.
1433      sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
1434      break;
1435    case Primitive::kPrimVoid:
1436      LOG(FATAL) << "UNREACHABLE";
1437      break;
1438  }
1439}
1440
1441void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
1442  // Initialize padding entries.
1443  while (handle_scope_number_of_references_ < handle_scope_expected_refs_) {
1444    *cur_hs_entry_ = StackReference<mirror::Object>();
1445    cur_hs_entry_++;
1446    handle_scope_number_of_references_++;
1447  }
1448  handle_scope_->SetNumberOfReferences(handle_scope_expected_refs_);
1449  DCHECK_NE(handle_scope_expected_refs_, 0U);
1450  // Install HandleScope.
1451  self->PushHandleScope(handle_scope_);
1452}
1453
1454extern "C" void* artFindNativeMethod();
1455
1456uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
1457  if (lock != nullptr) {
1458    return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
1459  } else {
1460    return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
1461  }
1462}
1463
1464void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
1465  if (lock != nullptr) {
1466    JniMethodEndSynchronized(cookie, lock, self);
1467  } else {
1468    JniMethodEnd(cookie, self);
1469  }
1470}
1471
1472/*
1473 * Initializes an alloca region assumed to be directly below sp for a native call:
1474 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
1475 * The final element on the stack is a pointer to the native code.
1476 *
1477 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
1478 * We need to fix this, as the handle scope needs to go into the callee-save frame.
1479 *
1480 * The return of this function denotes:
1481 * 1) How many bytes of the alloca can be released, if the value is non-negative.
1482 * 2) An error, if the value is negative.
1483 */
1484extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp)
1485    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1486  mirror::ArtMethod* called = *sp;
1487  DCHECK(called->IsNative()) << PrettyMethod(called, true);
1488
1489  // run the visitor
1490  MethodHelper mh(called);
1491
1492  BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(),
1493                                      self);
1494  visitor.VisitArguments();
1495  visitor.FinalizeHandleScope(self);
1496
1497  // fix up managed-stack things in Thread
1498  self->SetTopOfStack(sp, 0);
1499
1500  self->VerifyStack();
1501
1502  // Start JNI, save the cookie.
1503  uint32_t cookie;
1504  if (called->IsSynchronized()) {
1505    cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeEntry(), self);
1506    if (self->IsExceptionPending()) {
1507      self->PopHandleScope();
1508      // A negative value denotes an error.
1509      return -1;
1510    }
1511  } else {
1512    cookie = JniMethodStart(self);
1513  }
1514  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1515  *(sp32 - 1) = cookie;
1516
1517  // Retrieve the stored native code.
1518  const void* nativeCode = called->GetNativeMethod();
1519
1520  // There are two cases for the content of nativeCode:
1521  // 1) Pointer to the native function.
1522  // 2) Pointer to the trampoline for native code binding.
1523  // In the second case, we need to execute the binding and continue with the actual native function
1524  // pointer.
1525  DCHECK(nativeCode != nullptr);
1526  if (nativeCode == GetJniDlsymLookupStub()) {
1527    nativeCode = artFindNativeMethod();
1528
1529    if (nativeCode == nullptr) {
1530      DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
1531
1532      // End JNI, as the assembly will move to deliver the exception.
1533      jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeEntry() : nullptr;
1534      if (mh.GetShorty()[0] == 'L') {
1535        artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
1536      } else {
1537        artQuickGenericJniEndJNINonRef(self, cookie, lock);
1538      }
1539
1540      return -1;
1541    }
1542    // Note that the native code pointer will be automatically set by artFindNativeMethod().
1543  }
1544
1545  // Store the native code pointer in the stack at the right location.
1546  uintptr_t* code_pointer = reinterpret_cast<uintptr_t*>(visitor.GetCodeReturn());
1547  *code_pointer = reinterpret_cast<uintptr_t>(nativeCode);
1548
1549  // 5K reserved, window_size + frame pointer used.
1550  size_t window_size = visitor.GetAllocaUsedSize();
1551  return (5 * KB) - window_size - kPointerSize;
1552}
1553
1554/*
1555 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
1556 * unlocking.
1557 */
1558extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp,
1559                                                    jvalue result, uint64_t result_f)
1560    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1561  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1562  mirror::ArtMethod* called = *sp;
1563  uint32_t cookie = *(sp32 - 1);
1564
1565  jobject lock = nullptr;
1566  if (called->IsSynchronized()) {
1567    HandleScope* table = reinterpret_cast<HandleScope*>(
1568        reinterpret_cast<uint8_t*>(sp) + kPointerSize);
1569    lock = table->GetHandle(0).ToJObject();
1570  }
1571
1572  MethodHelper mh(called);
1573  char return_shorty_char = mh.GetShorty()[0];
1574
1575  if (return_shorty_char == 'L') {
1576    return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock);
1577  } else {
1578    artQuickGenericJniEndJNINonRef(self, cookie, lock);
1579
1580    switch (return_shorty_char) {
1581      case 'F':  // Fall-through.
1582      case 'D':
1583        return result_f;
1584      case 'Z':
1585        return result.z;
1586      case 'B':
1587        return result.b;
1588      case 'C':
1589        return result.c;
1590      case 'S':
1591        return result.s;
1592      case 'I':
1593        return result.i;
1594      case 'J':
1595        return result.j;
1596      case 'V':
1597        return 0;
1598      default:
1599        LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
1600        return 0;
1601    }
1602  }
1603}
1604
1605template<InvokeType type, bool access_check>
1606static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1607                                mirror::ArtMethod* caller_method,
1608                                Thread* self, mirror::ArtMethod** sp);
1609
1610template<InvokeType type, bool access_check>
1611static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1612                                mirror::ArtMethod* caller_method,
1613                                Thread* self, mirror::ArtMethod** sp) {
1614  mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
1615                                             type);
1616  if (UNLIKELY(method == nullptr)) {
1617    FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
1618    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1619    uint32_t shorty_len;
1620    const char* shorty =
1621        dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
1622    {
1623      // Remember the args in case a GC happens in FindMethodFromCode.
1624      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1625      RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
1626      visitor.VisitArguments();
1627      method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, self);
1628      visitor.FixupReferences();
1629    }
1630
1631    if (UNLIKELY(method == NULL)) {
1632      CHECK(self->IsExceptionPending());
1633      return 0;  // failure
1634    }
1635  }
1636  DCHECK(!self->IsExceptionPending());
1637  const void* code = method->GetEntryPointFromQuickCompiledCode();
1638
1639  // When we return, the caller will branch to this address, so it had better not be 0!
1640  DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: "
1641      << MethodHelper(method).GetDexFile().GetLocation();
1642#ifdef __LP64__
1643  UNIMPLEMENTED(FATAL);
1644  return 0;
1645#else
1646  uint32_t method_uint = reinterpret_cast<uint32_t>(method);
1647  uint64_t code_uint = reinterpret_cast<uint32_t>(code);
1648  uint64_t result = ((code_uint << 32) | method_uint);
1649  return result;
1650#endif
1651}
1652
1653// Explicit artInvokeCommon template function declarations to please analysis tool.
1654#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
1655  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
1656  uint64_t artInvokeCommon<type, access_check>(uint32_t method_idx,                             \
1657                                               mirror::Object* this_object,                     \
1658                                               mirror::ArtMethod* caller_method,                \
1659                                               Thread* self, mirror::ArtMethod** sp)            \
1660
1661EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
1662EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
1663EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
1664EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
1665EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
1666EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
1667EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
1668EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
1669EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
1670EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
1671#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
1672
1673
1674// See comments in runtime_support_asm.S
1675extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
1676                                                                mirror::Object* this_object,
1677                                                                mirror::ArtMethod* caller_method,
1678                                                                Thread* self,
1679                                                                mirror::ArtMethod** sp)
1680    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1681  return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp);
1682}
1683
1684
1685extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
1686                                                             mirror::Object* this_object,
1687                                                             mirror::ArtMethod* caller_method,
1688                                                             Thread* self,
1689                                                             mirror::ArtMethod** sp)
1690    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1691  return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp);
1692}
1693
1694extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
1695                                                             mirror::Object* this_object,
1696                                                             mirror::ArtMethod* caller_method,
1697                                                             Thread* self,
1698                                                             mirror::ArtMethod** sp)
1699    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1700  return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp);
1701}
1702
1703extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
1704                                                            mirror::Object* this_object,
1705                                                            mirror::ArtMethod* caller_method,
1706                                                            Thread* self,
1707                                                            mirror::ArtMethod** sp)
1708    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1709  return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp);
1710}
1711
1712extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
1713                                                              mirror::Object* this_object,
1714                                                              mirror::ArtMethod* caller_method,
1715                                                              Thread* self,
1716                                                              mirror::ArtMethod** sp)
1717    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1718  return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp);
1719}
1720
1721// Determine target of interface dispatch. This object is known non-null.
1722extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
1723                                                 mirror::Object* this_object,
1724                                                 mirror::ArtMethod* caller_method,
1725                                                 Thread* self, mirror::ArtMethod** sp)
1726    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1727  mirror::ArtMethod* method;
1728  if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
1729    method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
1730    if (UNLIKELY(method == NULL)) {
1731      FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
1732      ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
1733                                                                 caller_method);
1734      return 0;  // Failure.
1735    }
1736  } else {
1737    FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
1738    DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
1739    // Determine method index from calling dex instruction.
1740#if defined(__arm__)
1741    // On entry the stack pointed by sp is:
1742    // | argN       |  |
1743    // | ...        |  |
1744    // | arg4       |  |
1745    // | arg3 spill |  |  Caller's frame
1746    // | arg2 spill |  |
1747    // | arg1 spill |  |
1748    // | Method*    | ---
1749    // | LR         |
1750    // | ...        |    callee saves
1751    // | R3         |    arg3
1752    // | R2         |    arg2
1753    // | R1         |    arg1
1754    // | R0         |
1755    // | Method*    |  <- sp
1756    DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
1757    uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize);
1758    uintptr_t caller_pc = regs[10];
1759#elif defined(__i386__)
1760    // On entry the stack pointed by sp is:
1761    // | argN        |  |
1762    // | ...         |  |
1763    // | arg4        |  |
1764    // | arg3 spill  |  |  Caller's frame
1765    // | arg2 spill  |  |
1766    // | arg1 spill  |  |
1767    // | Method*     | ---
1768    // | Return      |
1769    // | EBP,ESI,EDI |    callee saves
1770    // | EBX         |    arg3
1771    // | EDX         |    arg2
1772    // | ECX         |    arg1
1773    // | EAX/Method* |  <- sp
1774    DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
1775    uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
1776    uintptr_t caller_pc = regs[7];
1777#elif defined(__mips__)
1778    // On entry the stack pointed by sp is:
1779    // | argN       |  |
1780    // | ...        |  |
1781    // | arg4       |  |
1782    // | arg3 spill |  |  Caller's frame
1783    // | arg2 spill |  |
1784    // | arg1 spill |  |
1785    // | Method*    | ---
1786    // | RA         |
1787    // | ...        |    callee saves
1788    // | A3         |    arg3
1789    // | A2         |    arg2
1790    // | A1         |    arg1
1791    // | A0/Method* |  <- sp
1792    DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
1793    uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
1794    uintptr_t caller_pc = regs[15];
1795#else
1796    UNIMPLEMENTED(FATAL);
1797    uintptr_t caller_pc = 0;
1798#endif
1799    uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
1800    const DexFile::CodeItem* code = MethodHelper(caller_method).GetCodeItem();
1801    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
1802    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
1803    Instruction::Code instr_code = instr->Opcode();
1804    CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
1805          instr_code == Instruction::INVOKE_INTERFACE_RANGE)
1806        << "Unexpected call into interface trampoline: " << instr->DumpString(NULL);
1807    uint32_t dex_method_idx;
1808    if (instr_code == Instruction::INVOKE_INTERFACE) {
1809      dex_method_idx = instr->VRegB_35c();
1810    } else {
1811      DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
1812      dex_method_idx = instr->VRegB_3rc();
1813    }
1814
1815    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1816    uint32_t shorty_len;
1817    const char* shorty =
1818        dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
1819    {
1820      // Remember the args in case a GC happens in FindMethodFromCode.
1821      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1822      RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
1823      visitor.VisitArguments();
1824      method = FindMethodFromCode<kInterface, false>(dex_method_idx, this_object, caller_method,
1825                                                     self);
1826      visitor.FixupReferences();
1827    }
1828
1829    if (UNLIKELY(method == nullptr)) {
1830      CHECK(self->IsExceptionPending());
1831      return 0;  // Failure.
1832    }
1833  }
1834  const void* code = method->GetEntryPointFromQuickCompiledCode();
1835
1836  // When we return, the caller will branch to this address, so it had better not be 0!
1837  DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: "
1838      << MethodHelper(method).GetDexFile().GetLocation();
1839#ifdef __LP64__
1840  UNIMPLEMENTED(FATAL);
1841  return 0;
1842#else
1843  uint32_t method_uint = reinterpret_cast<uint32_t>(method);
1844  uint64_t code_uint = reinterpret_cast<uint32_t>(code);
1845  uint64_t result = ((code_uint << 32) | method_uint);
1846  return result;
1847#endif
1848}
1849
1850}  // namespace art
1851