quick_trampoline_entrypoints.cc revision 5cb328362a633302ca0fcdbaa0da7d94069df051
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "callee_save_frame.h"
18#include "common_throws.h"
19#include "dex_file-inl.h"
20#include "dex_instruction-inl.h"
21#include "entrypoints/entrypoint_utils.h"
22#include "gc/accounting/card_table-inl.h"
23#include "interpreter/interpreter.h"
24#include "mirror/art_method-inl.h"
25#include "mirror/class-inl.h"
26#include "mirror/dex_cache-inl.h"
27#include "mirror/object-inl.h"
28#include "mirror/object_array-inl.h"
29#include "object_utils.h"
30#include "runtime.h"
31#include "scoped_thread_state_change.h"
32
33namespace art {
34
35// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
36class QuickArgumentVisitor {
37  // Number of bytes for each out register in the caller method's frame.
38  static constexpr size_t kBytesStackArgLocation = 4;
39#if defined(__arm__)
40  // The callee save frame is pointed to by SP.
41  // | argN       |  |
42  // | ...        |  |
43  // | arg4       |  |
44  // | arg3 spill |  |  Caller's frame
45  // | arg2 spill |  |
46  // | arg1 spill |  |
47  // | Method*    | ---
48  // | LR         |
49  // | ...        |    callee saves
50  // | R3         |    arg3
51  // | R2         |    arg2
52  // | R1         |    arg1
53  // | R0         |    padding
54  // | Method*    |  <- sp
55  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
56  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
57  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
58  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
59  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8;  // Offset of first GPR arg.
60  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44;  // Offset of return address.
61  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 48;  // Frame size.
62  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
63    return gpr_index * kBytesPerGprSpillLocation;
64  }
65#elif defined(__aarch64__)
66  // The callee save frame is pointed to by SP.
67  // | argN       |  |
68  // | ...        |  |
69  // | arg4       |  |
70  // | arg3 spill |  |  Caller's frame
71  // | arg2 spill |  |
72  // | arg1 spill |  |
73  // | Method*    | ---
74  // | LR         |
75  // | X28        |
76  // |  :         |
77  // | X19        |
78  // | X7         |
79  // | :          |
80  // | X1         |
81  // | D15        |
82  // |  :         |
83  // | D0         |
84  // |            |    padding
85  // | Method*    |  <- sp
86  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
87  static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
88  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
89  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =16;  // Offset of first FPR arg.
90  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144;  // Offset of first GPR arg.
91  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296;  // Offset of return address.
92  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 304;  // Frame size.
93  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
94    return gpr_index * kBytesPerGprSpillLocation;
95  }
96#elif defined(__mips__)
97  // The callee save frame is pointed to by SP.
98  // | argN       |  |
99  // | ...        |  |
100  // | arg4       |  |
101  // | arg3 spill |  |  Caller's frame
102  // | arg2 spill |  |
103  // | arg1 spill |  |
104  // | Method*    | ---
105  // | RA         |
106  // | ...        |    callee saves
107  // | A3         |    arg3
108  // | A2         |    arg2
109  // | A1         |    arg1
110  // | A0/Method* |  <- sp
111  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
112  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
113  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
114  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
115  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
116  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60;  // Offset of return address.
117  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 64;  // Frame size.
118  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
119    return gpr_index * kBytesPerGprSpillLocation;
120  }
121#elif defined(__i386__)
122  // The callee save frame is pointed to by SP.
123  // | argN        |  |
124  // | ...         |  |
125  // | arg4        |  |
126  // | arg3 spill  |  |  Caller's frame
127  // | arg2 spill  |  |
128  // | arg1 spill  |  |
129  // | Method*     | ---
130  // | Return      |
131  // | EBP,ESI,EDI |    callee saves
132  // | EBX         |    arg3
133  // | EDX         |    arg2
134  // | ECX         |    arg1
135  // | EAX/Method* |  <- sp
136  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
137  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
138  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
139  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
140  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
141  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28;  // Offset of return address.
142  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 32;  // Frame size.
143  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
144    return gpr_index * kBytesPerGprSpillLocation;
145  }
146#elif defined(__x86_64__)
147  // The callee save frame is pointed to by SP.
148  // | argN            |  |
149  // | ...             |  |
150  // | reg. arg spills |  |  Caller's frame
151  // | Method*         | ---
152  // | Return          |
153  // | R15             |    callee save
154  // | R14             |    callee save
155  // | R13             |    callee save
156  // | R12             |    callee save
157  // | R9              |    arg5
158  // | R8              |    arg4
159  // | RSI/R6          |    arg1
160  // | RBP/R5          |    callee save
161  // | RBX/R3          |    callee save
162  // | RDX/R2          |    arg2
163  // | RCX/R1          |    arg3
164  // | XMM7            |    float arg 8
165  // | XMM6            |    float arg 7
166  // | XMM5            |    float arg 6
167  // | XMM4            |    float arg 5
168  // | XMM3            |    float arg 4
169  // | XMM2            |    float arg 3
170  // | XMM1            |    float arg 2
171  // | XMM0            |    float arg 1
172  // | Padding         |
173  // | RDI/Method*     |  <- sp
174  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
175  static constexpr size_t kNumQuickGprArgs = 5;  // 3 arguments passed in GPRs.
176  static constexpr size_t kNumQuickFprArgs = 8;  // 0 arguments passed in FPRs.
177  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
178  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80;  // Offset of first GPR arg.
179  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168;  // Offset of return address.
180  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 176;  // Frame size.
181  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
182    switch (gpr_index) {
183      case 0: return (4 * kBytesPerGprSpillLocation);
184      case 1: return (1 * kBytesPerGprSpillLocation);
185      case 2: return (0 * kBytesPerGprSpillLocation);
186      case 3: return (5 * kBytesPerGprSpillLocation);
187      case 4: return (6 * kBytesPerGprSpillLocation);
188      default:
189        LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
190        return 0;
191    }
192  }
193#else
194#error "Unsupported architecture"
195#endif
196
197 public:
198  static mirror::ArtMethod* GetCallingMethod(mirror::ArtMethod** sp)
199      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
200    DCHECK((*sp)->IsCalleeSaveMethod());
201    byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
202    return *reinterpret_cast<mirror::ArtMethod**>(previous_sp);
203  }
204
205  // For the given quick ref and args quick frame, return the caller's PC.
206  static uintptr_t GetCallingPc(mirror::ArtMethod** sp)
207      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
208    DCHECK((*sp)->IsCalleeSaveMethod());
209    byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
210    return *reinterpret_cast<uintptr_t*>(lr);
211  }
212
213  QuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static,
214                       const char* shorty, uint32_t shorty_len)
215      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
216      is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
217      gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
218      fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
219      stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
220                  + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
221      gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid),
222      is_split_long_or_double_(false) {
223    DCHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize,
224              Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
225  }
226
227  virtual ~QuickArgumentVisitor() {}
228
229  virtual void Visit() = 0;
230
231  Primitive::Type GetParamPrimitiveType() const {
232    return cur_type_;
233  }
234
235  byte* GetParamAddress() const {
236    if (!kQuickSoftFloatAbi) {
237      Primitive::Type type = GetParamPrimitiveType();
238      if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
239        if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
240          return fpr_args_ + (fpr_index_ * kBytesPerFprSpillLocation);
241        }
242        return stack_args_ + (stack_index_ * kBytesStackArgLocation);
243      }
244    }
245    if (gpr_index_ < kNumQuickGprArgs) {
246      return gpr_args_ + GprIndexToGprOffset(gpr_index_);
247    }
248    return stack_args_ + (stack_index_ * kBytesStackArgLocation);
249  }
250
251  bool IsSplitLongOrDouble() const {
252    if ((kBytesPerGprSpillLocation == 4) || (kBytesPerFprSpillLocation == 4)) {
253      return is_split_long_or_double_;
254    } else {
255      return false;  // An optimization for when GPR and FPRs are 64bit.
256    }
257  }
258
259  bool IsParamAReference() const {
260    return GetParamPrimitiveType() == Primitive::kPrimNot;
261  }
262
263  bool IsParamALongOrDouble() const {
264    Primitive::Type type = GetParamPrimitiveType();
265    return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
266  }
267
268  uint64_t ReadSplitLongParam() const {
269    DCHECK(IsSplitLongOrDouble());
270    uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
271    uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
272    return (low_half & 0xffffffffULL) | (high_half << 32);
273  }
274
275  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
276    // This implementation doesn't support reg-spill area for hard float
277    // ABI targets such as x86_64 and aarch64. So, for those targets whose
278    // 'kQuickSoftFloatAbi' is 'false':
279    //     (a) 'stack_args_' should point to the first method's argument
280    //     (b) whatever the argument type it is, the 'stack_index_' should
281    //         be moved forward along with every visiting.
282    gpr_index_ = 0;
283    fpr_index_ = 0;
284    stack_index_ = 0;
285    if (!is_static_) {  // Handle this.
286      cur_type_ = Primitive::kPrimNot;
287      is_split_long_or_double_ = false;
288      Visit();
289      if (!kQuickSoftFloatAbi || kNumQuickGprArgs == 0) {
290        stack_index_++;
291      }
292      if (kNumQuickGprArgs > 0) {
293        gpr_index_++;
294      }
295    }
296    for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
297      cur_type_ = Primitive::GetType(shorty_[shorty_index]);
298      switch (cur_type_) {
299        case Primitive::kPrimNot:
300        case Primitive::kPrimBoolean:
301        case Primitive::kPrimByte:
302        case Primitive::kPrimChar:
303        case Primitive::kPrimShort:
304        case Primitive::kPrimInt:
305          is_split_long_or_double_ = false;
306          Visit();
307          if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
308            stack_index_++;
309          }
310          if (gpr_index_ < kNumQuickGprArgs) {
311            gpr_index_++;
312          }
313          break;
314        case Primitive::kPrimFloat:
315          is_split_long_or_double_ = false;
316          Visit();
317          if (kQuickSoftFloatAbi) {
318            if (gpr_index_ < kNumQuickGprArgs) {
319              gpr_index_++;
320            } else {
321              stack_index_++;
322            }
323          } else {
324            if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
325              fpr_index_++;
326            }
327            stack_index_++;
328          }
329          break;
330        case Primitive::kPrimDouble:
331        case Primitive::kPrimLong:
332          if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
333            is_split_long_or_double_ = (kBytesPerGprSpillLocation == 4) &&
334                ((gpr_index_ + 1) == kNumQuickGprArgs);
335            Visit();
336            if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
337              if (kBytesStackArgLocation == 4) {
338                stack_index_+= 2;
339              } else {
340                CHECK_EQ(kBytesStackArgLocation, 8U);
341                stack_index_++;
342              }
343            }
344            if (gpr_index_ < kNumQuickGprArgs) {
345              gpr_index_++;
346              if (kBytesPerGprSpillLocation == 4) {
347                if (gpr_index_ < kNumQuickGprArgs) {
348                  gpr_index_++;
349                } else if (kQuickSoftFloatAbi) {
350                  stack_index_++;
351                }
352              }
353            }
354          } else {
355            is_split_long_or_double_ = (kBytesPerFprSpillLocation == 4) &&
356                ((fpr_index_ + 1) == kNumQuickFprArgs);
357            Visit();
358            if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
359              fpr_index_++;
360              if (kBytesPerFprSpillLocation == 4) {
361                if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
362                  fpr_index_++;
363                }
364              }
365            }
366            if (kBytesStackArgLocation == 4) {
367              stack_index_+= 2;
368            } else {
369              CHECK_EQ(kBytesStackArgLocation, 8U);
370              stack_index_++;
371            }
372          }
373          break;
374        default:
375          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
376      }
377    }
378  }
379
380 private:
381  static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty,
382                                             uint32_t shorty_len) {
383    if (kQuickSoftFloatAbi) {
384      CHECK_EQ(kNumQuickFprArgs, 0U);
385      return (kNumQuickGprArgs * kBytesPerGprSpillLocation) + kBytesPerGprSpillLocation /* ArtMethod* */;
386    } else {
387      // For now, there is no reg-spill area for the targets with
388      // hard float ABI. So, the offset pointing to the first method's
389      // parameter ('this' for non-static methods) should be returned.
390      return kBytesPerGprSpillLocation;  // Skip Method*.
391    }
392  }
393
394  const bool is_static_;
395  const char* const shorty_;
396  const uint32_t shorty_len_;
397  byte* const gpr_args_;  // Address of GPR arguments in callee save frame.
398  byte* const fpr_args_;  // Address of FPR arguments in callee save frame.
399  byte* const stack_args_;  // Address of stack arguments in caller's frame.
400  uint32_t gpr_index_;  // Index into spilled GPRs.
401  uint32_t fpr_index_;  // Index into spilled FPRs.
402  uint32_t stack_index_;  // Index into arguments on the stack.
403  // The current type of argument during VisitArguments.
404  Primitive::Type cur_type_;
405  // Does a 64bit parameter straddle the register and stack arguments?
406  bool is_split_long_or_double_;
407};
408
409// Visits arguments on the stack placing them into the shadow frame.
410class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
411 public:
412  BuildQuickShadowFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
413                               uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
414    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
415
416  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
417
418 private:
419  ShadowFrame* const sf_;
420  uint32_t cur_reg_;
421
422  DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
423};
424
425void BuildQuickShadowFrameVisitor::Visit()  {
426  Primitive::Type type = GetParamPrimitiveType();
427  switch (type) {
428    case Primitive::kPrimLong:  // Fall-through.
429    case Primitive::kPrimDouble:
430      if (IsSplitLongOrDouble()) {
431        sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
432      } else {
433        sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
434      }
435      ++cur_reg_;
436      break;
437    case Primitive::kPrimNot: {
438        StackReference<mirror::Object>* stack_ref =
439            reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
440        sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
441      }
442      break;
443    case Primitive::kPrimBoolean:  // Fall-through.
444    case Primitive::kPrimByte:     // Fall-through.
445    case Primitive::kPrimChar:     // Fall-through.
446    case Primitive::kPrimShort:    // Fall-through.
447    case Primitive::kPrimInt:      // Fall-through.
448    case Primitive::kPrimFloat:
449      sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
450      break;
451    case Primitive::kPrimVoid:
452      LOG(FATAL) << "UNREACHABLE";
453      break;
454  }
455  ++cur_reg_;
456}
457
458extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
459                                                mirror::ArtMethod** sp)
460    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
461  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
462  // frame.
463  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
464
465  if (method->IsAbstract()) {
466    ThrowAbstractMethodError(method);
467    return 0;
468  } else {
469    DCHECK(!method->IsNative()) << PrettyMethod(method);
470    const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame");
471    MethodHelper mh(method);
472    const DexFile::CodeItem* code_item = mh.GetCodeItem();
473    DCHECK(code_item != nullptr) << PrettyMethod(method);
474    uint16_t num_regs = code_item->registers_size_;
475    void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
476    ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL,  // No last shadow coming from quick.
477                                                  method, 0, memory));
478    size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
479    BuildQuickShadowFrameVisitor shadow_frame_builder(sp, mh.IsStatic(), mh.GetShorty(),
480                                                      mh.GetShortyLength(),
481                                                      shadow_frame, first_arg_reg);
482    shadow_frame_builder.VisitArguments();
483    // Push a transition back into managed code onto the linked list in thread.
484    ManagedStack fragment;
485    self->PushManagedStackFragment(&fragment);
486    self->PushShadowFrame(shadow_frame);
487    self->EndAssertNoThreadSuspension(old_cause);
488
489    if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
490      // Ensure static method's class is initialized.
491      SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
492      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) {
493        DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
494        self->PopManagedStackFragment(fragment);
495        return 0;
496      }
497    }
498
499    JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
500    // Pop transition.
501    self->PopManagedStackFragment(fragment);
502    // No need to restore the args since the method has already been run by the interpreter.
503    return result.GetJ();
504  }
505}
506
507// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
508// to jobjects.
509class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
510 public:
511  BuildQuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
512                            uint32_t shorty_len, ScopedObjectAccessUnchecked* soa,
513                            std::vector<jvalue>* args) :
514    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
515
516  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
517
518  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
519
520 private:
521  ScopedObjectAccessUnchecked* const soa_;
522  std::vector<jvalue>* const args_;
523  // References which we must update when exiting in case the GC moved the objects.
524  std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
525
526  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
527};
528
529void BuildQuickArgumentVisitor::Visit() {
530  jvalue val;
531  Primitive::Type type = GetParamPrimitiveType();
532  switch (type) {
533    case Primitive::kPrimNot: {
534      StackReference<mirror::Object>* stack_ref =
535          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
536      val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
537      references_.push_back(std::make_pair(val.l, stack_ref));
538      break;
539    }
540    case Primitive::kPrimLong:  // Fall-through.
541    case Primitive::kPrimDouble:
542      if (IsSplitLongOrDouble()) {
543        val.j = ReadSplitLongParam();
544      } else {
545        val.j = *reinterpret_cast<jlong*>(GetParamAddress());
546      }
547      break;
548    case Primitive::kPrimBoolean:  // Fall-through.
549    case Primitive::kPrimByte:     // Fall-through.
550    case Primitive::kPrimChar:     // Fall-through.
551    case Primitive::kPrimShort:    // Fall-through.
552    case Primitive::kPrimInt:      // Fall-through.
553    case Primitive::kPrimFloat:
554      val.i = *reinterpret_cast<jint*>(GetParamAddress());
555      break;
556    case Primitive::kPrimVoid:
557      LOG(FATAL) << "UNREACHABLE";
558      val.j = 0;
559      break;
560  }
561  args_->push_back(val);
562}
563
564void BuildQuickArgumentVisitor::FixupReferences() {
565  // Fixup any references which may have changed.
566  for (const auto& pair : references_) {
567    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
568    soa_->Env()->DeleteLocalRef(pair.first);
569  }
570}
571
572// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
573// which is responsible for recording callee save registers. We explicitly place into jobjects the
574// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
575// field within the proxy object, which will box the primitive arguments and deal with error cases.
576extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
577                                               mirror::Object* receiver,
578                                               Thread* self, mirror::ArtMethod** sp)
579    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
580  DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
581  DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
582  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
583  const char* old_cause =
584      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
585  // Register the top of the managed stack, making stack crawlable.
586  DCHECK_EQ(*sp, proxy_method) << PrettyMethod(proxy_method);
587  self->SetTopOfStack(sp, 0);
588  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
589            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
590      << PrettyMethod(proxy_method);
591  self->VerifyStack();
592  // Start new JNI local reference state.
593  JNIEnvExt* env = self->GetJniEnv();
594  ScopedObjectAccessUnchecked soa(env);
595  ScopedJniEnvLocalRefState env_state(env);
596  // Create local ref. copies of proxy method and the receiver.
597  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
598
599  // Placing arguments into args vector and remove the receiver.
600  MethodHelper proxy_mh(proxy_method);
601  DCHECK(!proxy_mh.IsStatic()) << PrettyMethod(proxy_method);
602  std::vector<jvalue> args;
603  BuildQuickArgumentVisitor local_ref_visitor(sp, proxy_mh.IsStatic(), proxy_mh.GetShorty(),
604                                              proxy_mh.GetShortyLength(), &soa, &args);
605
606  local_ref_visitor.VisitArguments();
607  DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method);
608  args.erase(args.begin());
609
610  // Convert proxy method into expected interface method.
611  mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
612  DCHECK(interface_method != NULL) << PrettyMethod(proxy_method);
613  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
614  jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
615
616  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
617  // that performs allocations.
618  self->EndAssertNoThreadSuspension(old_cause);
619  JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
620                                               rcvr_jobj, interface_method_jobj, args);
621  // Restore references which might have moved.
622  local_ref_visitor.FixupReferences();
623  return result.GetJ();
624}
625
626// Read object references held in arguments from quick frames and place in a JNI local references,
627// so they don't get garbage collected.
628class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
629 public:
630  RememberForGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
631                               uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
632    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
633
634  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
635
636  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
637
638 private:
639  ScopedObjectAccessUnchecked* const soa_;
640  // References which we must update when exiting in case the GC moved the objects.
641  std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
642  DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
643};
644
645void RememberForGcArgumentVisitor::Visit() {
646  if (IsParamAReference()) {
647    StackReference<mirror::Object>* stack_ref =
648        reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
649    jobject reference =
650        soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
651    references_.push_back(std::make_pair(reference, stack_ref));
652  }
653}
654
655void RememberForGcArgumentVisitor::FixupReferences() {
656  // Fixup any references which may have changed.
657  for (const auto& pair : references_) {
658    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
659    soa_->Env()->DeleteLocalRef(pair.first);
660  }
661}
662
663
664// Lazily resolve a method for quick. Called by stub code.
665extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
666                                                    mirror::Object* receiver,
667                                                    Thread* self, mirror::ArtMethod** sp)
668    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
669  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
670  // Start new JNI local reference state
671  JNIEnvExt* env = self->GetJniEnv();
672  ScopedObjectAccessUnchecked soa(env);
673  ScopedJniEnvLocalRefState env_state(env);
674  const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
675
676  // Compute details about the called method (avoid GCs)
677  ClassLinker* linker = Runtime::Current()->GetClassLinker();
678  mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
679  InvokeType invoke_type;
680  const DexFile* dex_file;
681  uint32_t dex_method_idx;
682  if (called->IsRuntimeMethod()) {
683    uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
684    const DexFile::CodeItem* code;
685    {
686      MethodHelper mh(caller);
687      dex_file = &mh.GetDexFile();
688      code = mh.GetCodeItem();
689    }
690    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
691    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
692    Instruction::Code instr_code = instr->Opcode();
693    bool is_range;
694    switch (instr_code) {
695      case Instruction::INVOKE_DIRECT:
696        invoke_type = kDirect;
697        is_range = false;
698        break;
699      case Instruction::INVOKE_DIRECT_RANGE:
700        invoke_type = kDirect;
701        is_range = true;
702        break;
703      case Instruction::INVOKE_STATIC:
704        invoke_type = kStatic;
705        is_range = false;
706        break;
707      case Instruction::INVOKE_STATIC_RANGE:
708        invoke_type = kStatic;
709        is_range = true;
710        break;
711      case Instruction::INVOKE_SUPER:
712        invoke_type = kSuper;
713        is_range = false;
714        break;
715      case Instruction::INVOKE_SUPER_RANGE:
716        invoke_type = kSuper;
717        is_range = true;
718        break;
719      case Instruction::INVOKE_VIRTUAL:
720        invoke_type = kVirtual;
721        is_range = false;
722        break;
723      case Instruction::INVOKE_VIRTUAL_RANGE:
724        invoke_type = kVirtual;
725        is_range = true;
726        break;
727      case Instruction::INVOKE_INTERFACE:
728        invoke_type = kInterface;
729        is_range = false;
730        break;
731      case Instruction::INVOKE_INTERFACE_RANGE:
732        invoke_type = kInterface;
733        is_range = true;
734        break;
735      default:
736        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
737        // Avoid used uninitialized warnings.
738        invoke_type = kDirect;
739        is_range = false;
740    }
741    dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
742
743  } else {
744    invoke_type = kStatic;
745    dex_file = &MethodHelper(called).GetDexFile();
746    dex_method_idx = called->GetDexMethodIndex();
747  }
748  uint32_t shorty_len;
749  const char* shorty =
750      dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
751  RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
752  visitor.VisitArguments();
753  self->EndAssertNoThreadSuspension(old_cause);
754  bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
755  // Resolve method filling in dex cache.
756  if (called->IsRuntimeMethod()) {
757    SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr);
758    called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
759    receiver = sirt_receiver.get();
760  }
761  const void* code = NULL;
762  if (LIKELY(!self->IsExceptionPending())) {
763    // Incompatible class change should have been handled in resolve method.
764    CHECK(!called->CheckIncompatibleClassChange(invoke_type))
765        << PrettyMethod(called) << " " << invoke_type;
766    if (virtual_or_interface) {
767      // Refine called method based on receiver.
768      CHECK(receiver != nullptr) << invoke_type;
769      if (invoke_type == kVirtual) {
770        called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
771      } else {
772        called = receiver->GetClass()->FindVirtualMethodForInterface(called);
773      }
774      // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
775      // of the sharpened method.
776      if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) {
777        caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called);
778      } else {
779        // Calling from one dex file to another, need to compute the method index appropriate to
780        // the caller's dex file. Since we get here only if the original called was a runtime
781        // method, we've got the correct dex_file and a dex_method_idx from above.
782        DCHECK(&MethodHelper(caller).GetDexFile() == dex_file);
783        uint32_t method_index =
784            MethodHelper(called).FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx);
785        if (method_index != DexFile::kDexNoIndex) {
786          caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called);
787        }
788      }
789    }
790    // Ensure that the called method's class is initialized.
791    SirtRef<mirror::Class> called_class(soa.Self(), called->GetDeclaringClass());
792    linker->EnsureInitialized(called_class, true, true);
793    if (LIKELY(called_class->IsInitialized())) {
794      code = called->GetEntryPointFromQuickCompiledCode();
795    } else if (called_class->IsInitializing()) {
796      if (invoke_type == kStatic) {
797        // Class is still initializing, go to oat and grab code (trampoline must be left in place
798        // until class is initialized to stop races between threads).
799        code = linker->GetQuickOatCodeFor(called);
800      } else {
801        // No trampoline for non-static methods.
802        code = called->GetEntryPointFromQuickCompiledCode();
803      }
804    } else {
805      DCHECK(called_class->IsErroneous());
806    }
807  }
808  CHECK_EQ(code == NULL, self->IsExceptionPending());
809  // Fixup any locally saved objects may have moved during a GC.
810  visitor.FixupReferences();
811  // Place called method in callee-save frame to be placed as first argument to quick method.
812  *sp = called;
813  return code;
814}
815
816
817
818/*
819 * This class uses a couple of observations to unite the different calling conventions through
820 * a few constants.
821 *
822 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
823 *    possible alignment.
824 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
825 *    types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
826 *    when we have to split things
827 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
828 *    and we can use Int handling directly.
829 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
830 *    necessary when widening. Also, widening of Ints will take place implicitly, and the
831 *    extension should be compatible with Aarch64, which mandates copying the available bits
832 *    into LSB and leaving the rest unspecified.
833 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
834 *    the stack.
835 * 6) There is only little endian.
836 *
837 *
838 * Actual work is supposed to be done in a delegate of the template type. The interface is as
839 * follows:
840 *
841 * void PushGpr(uintptr_t):   Add a value for the next GPR
842 *
843 * void PushFpr4(float):      Add a value for the next FPR of size 32b. Is only called if we need
844 *                            padding, that is, think the architecture is 32b and aligns 64b.
845 *
846 * void PushFpr8(uint64_t):   Push a double. We _will_ call this on 32b, it's the callee's job to
847 *                            split this if necessary. The current state will have aligned, if
848 *                            necessary.
849 *
850 * void PushStack(uintptr_t): Push a value to the stack.
851 *
852 * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. This _will_ have nullptr,
853 *                                          as this might be important for null initialization.
854 *                                          Must return the jobject, that is, the reference to the
855 *                                          entry in the Sirt (nullptr if necessary).
856 *
857 */
858template <class T> class BuildGenericJniFrameStateMachine {
859 public:
860#if defined(__arm__)
861  // TODO: These are all dummy values!
862  static constexpr bool kNativeSoftFloatAbi = true;
863  static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs, r0-r3
864  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
865
866  static constexpr size_t kRegistersNeededForLong = 2;
867  static constexpr size_t kRegistersNeededForDouble = 2;
868  static constexpr bool kMultiRegistersAligned = true;
869  static constexpr bool kMultiRegistersWidened = false;
870  static constexpr bool kAlignLongOnStack = true;
871  static constexpr bool kAlignDoubleOnStack = true;
872#elif defined(__aarch64__)
873  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
874  static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
875  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
876
877  static constexpr size_t kRegistersNeededForLong = 1;
878  static constexpr size_t kRegistersNeededForDouble = 1;
879  static constexpr bool kMultiRegistersAligned = false;
880  static constexpr bool kMultiRegistersWidened = false;
881  static constexpr bool kAlignLongOnStack = false;
882  static constexpr bool kAlignDoubleOnStack = false;
883#elif defined(__mips__)
884  // TODO: These are all dummy values!
885  static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
886  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
887  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
888
889  static constexpr size_t kRegistersNeededForLong = 2;
890  static constexpr size_t kRegistersNeededForDouble = 2;
891  static constexpr bool kMultiRegistersAligned = true;
892  static constexpr bool kMultiRegistersWidened = true;
893  static constexpr bool kAlignLongOnStack = false;
894  static constexpr bool kAlignDoubleOnStack = false;
895#elif defined(__i386__)
896  // TODO: Check these!
897  static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
898  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
899  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
900
901  static constexpr size_t kRegistersNeededForLong = 2;
902  static constexpr size_t kRegistersNeededForDouble = 2;
903  static constexpr bool kMultiRegistersAligned = false;       // x86 not using regs, anyways
904  static constexpr bool kMultiRegistersWidened = false;
905  static constexpr bool kAlignLongOnStack = false;
906  static constexpr bool kAlignDoubleOnStack = false;
907#elif defined(__x86_64__)
908  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
909  static constexpr size_t kNumNativeGprArgs = 6;  // 6 arguments passed in GPRs.
910  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
911
912  static constexpr size_t kRegistersNeededForLong = 1;
913  static constexpr size_t kRegistersNeededForDouble = 1;
914  static constexpr bool kMultiRegistersAligned = false;
915  static constexpr bool kMultiRegistersWidened = false;
916  static constexpr bool kAlignLongOnStack = false;
917  static constexpr bool kAlignDoubleOnStack = false;
918#else
919#error "Unsupported architecture"
920#endif
921
922 public:
923  explicit BuildGenericJniFrameStateMachine(T* delegate) : gpr_index_(kNumNativeGprArgs),
924                                                           fpr_index_(kNumNativeFprArgs),
925                                                           stack_entries_(0),
926                                                           delegate_(delegate) {
927    // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
928    // the next register is even; counting down is just to make the compiler happy...
929    CHECK_EQ(kNumNativeGprArgs % 2, 0U);
930    CHECK_EQ(kNumNativeFprArgs % 2, 0U);
931  }
932
933  virtual ~BuildGenericJniFrameStateMachine() {}
934
935  bool HavePointerGpr() {
936    return gpr_index_ > 0;
937  }
938
939  void AdvancePointer(void* val) {
940    if (HavePointerGpr()) {
941      gpr_index_--;
942      PushGpr(reinterpret_cast<uintptr_t>(val));
943    } else {
944      stack_entries_++;         // TODO: have a field for pointer length as multiple of 32b
945      PushStack(reinterpret_cast<uintptr_t>(val));
946      gpr_index_ = 0;
947    }
948  }
949
950
951  bool HaveSirtGpr() {
952    return gpr_index_ > 0;
953  }
954
955  void AdvanceSirt(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
956    uintptr_t sirtRef = PushSirt(ptr);
957    if (HaveSirtGpr()) {
958      gpr_index_--;
959      PushGpr(sirtRef);
960    } else {
961      stack_entries_++;
962      PushStack(sirtRef);
963      gpr_index_ = 0;
964    }
965  }
966
967
968  bool HaveIntGpr() {
969    return gpr_index_ > 0;
970  }
971
972  void AdvanceInt(uint32_t val) {
973    if (HaveIntGpr()) {
974      gpr_index_--;
975      PushGpr(val);
976    } else {
977      stack_entries_++;
978      PushStack(val);
979      gpr_index_ = 0;
980    }
981  }
982
983
984  bool HaveLongGpr() {
985    return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
986  }
987
988  bool LongGprNeedsPadding() {
989    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
990        kAlignLongOnStack &&                  // and when it needs alignment
991        (gpr_index_ & 1) == 1;                // counter is odd, see constructor
992  }
993
994  bool LongStackNeedsPadding() {
995    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
996        kAlignLongOnStack &&                  // and when it needs 8B alignment
997        (stack_entries_ & 1) == 1;            // counter is odd
998  }
999
1000  void AdvanceLong(uint64_t val) {
1001    if (HaveLongGpr()) {
1002      if (LongGprNeedsPadding()) {
1003        PushGpr(0);
1004        gpr_index_--;
1005      }
1006      if (kRegistersNeededForLong == 1) {
1007        PushGpr(static_cast<uintptr_t>(val));
1008      } else {
1009        PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1010        PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1011      }
1012      gpr_index_ -= kRegistersNeededForLong;
1013    } else {
1014      if (LongStackNeedsPadding()) {
1015        PushStack(0);
1016        stack_entries_++;
1017      }
1018      if (kRegistersNeededForLong == 1) {
1019        PushStack(static_cast<uintptr_t>(val));
1020        stack_entries_++;
1021      } else {
1022        PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1023        PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1024        stack_entries_ += 2;
1025      }
1026      gpr_index_ = 0;
1027    }
1028  }
1029
1030
1031  bool HaveFloatFpr() {
1032    return fpr_index_ > 0;
1033  }
1034
1035  template <typename U, typename V> V convert(U in) {
1036    CHECK_LE(sizeof(U), sizeof(V));
1037    union { U u; V v; } tmp;
1038    tmp.u = in;
1039    return tmp.v;
1040  }
1041
1042  void AdvanceFloat(float val) {
1043    if (kNativeSoftFloatAbi) {
1044      AdvanceInt(convert<float, uint32_t>(val));
1045    } else {
1046      if (HaveFloatFpr()) {
1047        fpr_index_--;
1048        if (kRegistersNeededForDouble == 1) {
1049          if (kMultiRegistersWidened) {
1050            PushFpr8(convert<double, uint64_t>(val));
1051          } else {
1052            // No widening, just use the bits.
1053            PushFpr8(convert<float, uint64_t>(val));
1054          }
1055        } else {
1056          PushFpr4(val);
1057        }
1058      } else {
1059        stack_entries_++;
1060        if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) {
1061          // Need to widen before storing: Note the "double" in the template instantiation.
1062          PushStack(convert<double, uintptr_t>(val));
1063        } else {
1064          PushStack(convert<float, uintptr_t>(val));
1065        }
1066        fpr_index_ = 0;
1067      }
1068    }
1069  }
1070
1071
1072  bool HaveDoubleFpr() {
1073    return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1074  }
1075
1076  bool DoubleFprNeedsPadding() {
1077    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1078        kAlignDoubleOnStack &&                  // and when it needs alignment
1079        (fpr_index_ & 1) == 1;                  // counter is odd, see constructor
1080  }
1081
1082  bool DoubleStackNeedsPadding() {
1083    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1084        kAlignDoubleOnStack &&                  // and when it needs 8B alignment
1085        (stack_entries_ & 1) == 1;              // counter is odd
1086  }
1087
1088  void AdvanceDouble(uint64_t val) {
1089    if (kNativeSoftFloatAbi) {
1090      AdvanceLong(val);
1091    } else {
1092      if (HaveDoubleFpr()) {
1093        if (DoubleFprNeedsPadding()) {
1094          PushFpr4(0);
1095          fpr_index_--;
1096        }
1097        PushFpr8(val);
1098        fpr_index_ -= kRegistersNeededForDouble;
1099      } else {
1100        if (DoubleStackNeedsPadding()) {
1101          PushStack(0);
1102          stack_entries_++;
1103        }
1104        if (kRegistersNeededForDouble == 1) {
1105          PushStack(static_cast<uintptr_t>(val));
1106          stack_entries_++;
1107        } else {
1108          PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1109          PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1110          stack_entries_ += 2;
1111        }
1112        fpr_index_ = 0;
1113      }
1114    }
1115  }
1116
1117  uint32_t getStackEntries() {
1118    return stack_entries_;
1119  }
1120
1121  uint32_t getNumberOfUsedGprs() {
1122    return kNumNativeGprArgs - gpr_index_;
1123  }
1124
1125  uint32_t getNumberOfUsedFprs() {
1126    return kNumNativeFprArgs - fpr_index_;
1127  }
1128
1129 private:
1130  void PushGpr(uintptr_t val) {
1131    delegate_->PushGpr(val);
1132  }
1133  void PushFpr4(float val) {
1134    delegate_->PushFpr4(val);
1135  }
1136  void PushFpr8(uint64_t val) {
1137    delegate_->PushFpr8(val);
1138  }
1139  void PushStack(uintptr_t val) {
1140    delegate_->PushStack(val);
1141  }
1142  uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1143    return delegate_->PushSirt(ref);
1144  }
1145
1146  uint32_t gpr_index_;      // Number of free GPRs
1147  uint32_t fpr_index_;      // Number of free FPRs
1148  uint32_t stack_entries_;  // Stack entries are in multiples of 32b, as floats are usually not
1149                            // extended
1150  T* delegate_;             // What Push implementation gets called
1151};
1152
1153class ComputeGenericJniFrameSize FINAL {
1154 public:
1155  ComputeGenericJniFrameSize() : num_sirt_references_(0), num_stack_entries_(0) {}
1156
1157  uint32_t GetStackSize() {
1158    return num_stack_entries_ * sizeof(uintptr_t);
1159  }
1160
1161  // WARNING: After this, *sp won't be pointing to the method anymore!
1162  void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
1163                     void* sp, StackIndirectReferenceTable** table, uint32_t* sirt_entries,
1164                     uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
1165                     void** code_return, size_t* overall_size)
1166      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1167    ComputeAll(is_static, shorty, shorty_len);
1168
1169    mirror::ArtMethod* method = **m;
1170
1171    uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
1172
1173    // First, fix up the layout of the callee-save frame.
1174    // We have to squeeze in the Sirt, and relocate the method pointer.
1175
1176    // "Free" the slot for the method.
1177    sp8 += kPointerSize;
1178
1179    // Add the Sirt.
1180    *sirt_entries = num_sirt_references_;
1181    size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(num_sirt_references_);
1182    sp8 -= sirt_size;
1183    *table = reinterpret_cast<StackIndirectReferenceTable*>(sp8);
1184    (*table)->SetNumberOfReferences(num_sirt_references_);
1185
1186    // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
1187    sp8 -= kPointerSize;
1188    uint8_t* method_pointer = sp8;
1189    *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method;
1190    *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer);
1191
1192    // Reference cookie and padding
1193    sp8 -= 8;
1194    // Store Sirt size
1195    *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(sirt_size & 0xFFFFFFFF);
1196
1197    // Next comes the native call stack.
1198    sp8 -= GetStackSize();
1199    // Now align the call stack below. This aligns by 16, as AArch64 seems to require.
1200    uintptr_t mask = ~0x0F;
1201    sp8 = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(sp8) & mask);
1202    *start_stack = reinterpret_cast<uintptr_t*>(sp8);
1203
1204    // put fprs and gprs below
1205    // Assumption is OK right now, as we have soft-float arm
1206    size_t fregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeFprArgs;
1207    sp8 -= fregs * sizeof(uintptr_t);
1208    *start_fpr = reinterpret_cast<uint32_t*>(sp8);
1209    size_t iregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeGprArgs;
1210    sp8 -= iregs * sizeof(uintptr_t);
1211    *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
1212
1213    // reserve space for the code pointer
1214    sp8 -= kPointerSize;
1215    *code_return = reinterpret_cast<void*>(sp8);
1216
1217    *overall_size = reinterpret_cast<uint8_t*>(sp) - sp8;
1218
1219    // The new SP is stored at the end of the alloca, so it can be immediately popped
1220    sp8 = reinterpret_cast<uint8_t*>(sp) - 5 * KB;
1221    *(reinterpret_cast<uint8_t**>(sp8)) = method_pointer;
1222  }
1223
1224  void ComputeSirtOffset() { }  // nothing to do, static right now
1225
1226  void ComputeAll(bool is_static, const char* shorty, uint32_t shorty_len)
1227      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1228    BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize> sm(this);
1229
1230    // JNIEnv
1231    sm.AdvancePointer(nullptr);
1232
1233    // Class object or this as first argument
1234    sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678));
1235
1236    for (uint32_t i = 1; i < shorty_len; ++i) {
1237      Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
1238      switch (cur_type_) {
1239        case Primitive::kPrimNot:
1240          sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678));
1241          break;
1242
1243        case Primitive::kPrimBoolean:
1244        case Primitive::kPrimByte:
1245        case Primitive::kPrimChar:
1246        case Primitive::kPrimShort:
1247        case Primitive::kPrimInt:
1248          sm.AdvanceInt(0);
1249          break;
1250        case Primitive::kPrimFloat:
1251          sm.AdvanceFloat(0);
1252          break;
1253        case Primitive::kPrimDouble:
1254          sm.AdvanceDouble(0);
1255          break;
1256        case Primitive::kPrimLong:
1257          sm.AdvanceLong(0);
1258          break;
1259        default:
1260          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1261      }
1262    }
1263
1264    num_stack_entries_ = sm.getStackEntries();
1265  }
1266
1267  void PushGpr(uintptr_t /* val */) {
1268    // not optimizing registers, yet
1269  }
1270
1271  void PushFpr4(float /* val */) {
1272    // not optimizing registers, yet
1273  }
1274
1275  void PushFpr8(uint64_t /* val */) {
1276    // not optimizing registers, yet
1277  }
1278
1279  void PushStack(uintptr_t /* val */) {
1280    // counting is already done in the superclass
1281  }
1282
1283  uintptr_t PushSirt(mirror::Object* /* ptr */) {
1284    num_sirt_references_++;
1285    return reinterpret_cast<uintptr_t>(nullptr);
1286  }
1287
1288 private:
1289  uint32_t num_sirt_references_;
1290  uint32_t num_stack_entries_;
1291};
1292
1293// Visits arguments on the stack placing them into a region lower down the stack for the benefit
1294// of transitioning into native code.
1295class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
1296 public:
1297  BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty,
1298                              uint32_t shorty_len, Thread* self) :
1299      QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
1300    ComputeGenericJniFrameSize fsc;
1301    fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &sirt_, &sirt_expected_refs_,
1302                      &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_,
1303                      &alloca_used_size_);
1304    sirt_number_of_references_ = 0;
1305    cur_sirt_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstSirtEntry());
1306
1307    // jni environment is always first argument
1308    sm_.AdvancePointer(self->GetJniEnv());
1309
1310    if (is_static) {
1311      sm_.AdvanceSirt((**sp)->GetDeclaringClass());
1312    }
1313  }
1314
1315  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
1316
1317  void FinalizeSirt(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
1318
1319  jobject GetFirstSirtEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1320    return reinterpret_cast<jobject>(sirt_->GetStackReference(0));
1321  }
1322
1323  void PushGpr(uintptr_t val) {
1324    *cur_gpr_reg_ = val;
1325    cur_gpr_reg_++;
1326  }
1327
1328  void PushFpr4(float val) {
1329    *cur_fpr_reg_ = val;
1330    cur_fpr_reg_++;
1331  }
1332
1333  void PushFpr8(uint64_t val) {
1334    uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1335    *tmp = val;
1336    cur_fpr_reg_ += 2;
1337  }
1338
1339  void PushStack(uintptr_t val) {
1340    *cur_stack_arg_ = val;
1341    cur_stack_arg_++;
1342  }
1343
1344  uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1345    uintptr_t tmp;
1346    if (ref == nullptr) {
1347      *cur_sirt_entry_ = StackReference<mirror::Object>();
1348      tmp = reinterpret_cast<uintptr_t>(nullptr);
1349    } else {
1350      *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
1351      tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_);
1352    }
1353    cur_sirt_entry_++;
1354    sirt_number_of_references_++;
1355    return tmp;
1356  }
1357
1358  // Size of the part of the alloca that we actually need.
1359  size_t GetAllocaUsedSize() {
1360    return alloca_used_size_;
1361  }
1362
1363  void* GetCodeReturn() {
1364    return code_return_;
1365  }
1366
1367 private:
1368  uint32_t sirt_number_of_references_;
1369  StackReference<mirror::Object>* cur_sirt_entry_;
1370  StackIndirectReferenceTable* sirt_;
1371  uint32_t sirt_expected_refs_;
1372  uintptr_t* cur_gpr_reg_;
1373  uint32_t* cur_fpr_reg_;
1374  uintptr_t* cur_stack_arg_;
1375  // StackReference<mirror::Object>* top_of_sirt_;
1376  void* code_return_;
1377  size_t alloca_used_size_;
1378
1379  BuildGenericJniFrameStateMachine<BuildGenericJniFrameVisitor> sm_;
1380
1381  DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1382};
1383
1384void BuildGenericJniFrameVisitor::Visit() {
1385  Primitive::Type type = GetParamPrimitiveType();
1386  switch (type) {
1387    case Primitive::kPrimLong: {
1388      jlong long_arg;
1389      if (IsSplitLongOrDouble()) {
1390        long_arg = ReadSplitLongParam();
1391      } else {
1392        long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
1393      }
1394      sm_.AdvanceLong(long_arg);
1395      break;
1396    }
1397    case Primitive::kPrimDouble: {
1398      uint64_t double_arg;
1399      if (IsSplitLongOrDouble()) {
1400        // Read into union so that we don't case to a double.
1401        double_arg = ReadSplitLongParam();
1402      } else {
1403        double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
1404      }
1405      sm_.AdvanceDouble(double_arg);
1406      break;
1407    }
1408    case Primitive::kPrimNot: {
1409      StackReference<mirror::Object>* stack_ref =
1410          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1411      sm_.AdvanceSirt(stack_ref->AsMirrorPtr());
1412      break;
1413    }
1414    case Primitive::kPrimFloat:
1415      sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
1416      break;
1417    case Primitive::kPrimBoolean:  // Fall-through.
1418    case Primitive::kPrimByte:     // Fall-through.
1419    case Primitive::kPrimChar:     // Fall-through.
1420    case Primitive::kPrimShort:    // Fall-through.
1421    case Primitive::kPrimInt:      // Fall-through.
1422      sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
1423      break;
1424    case Primitive::kPrimVoid:
1425      LOG(FATAL) << "UNREACHABLE";
1426      break;
1427  }
1428}
1429
1430void BuildGenericJniFrameVisitor::FinalizeSirt(Thread* self) {
1431  // Initialize padding entries.
1432  while (sirt_number_of_references_ < sirt_expected_refs_) {
1433    *cur_sirt_entry_ = StackReference<mirror::Object>();
1434    cur_sirt_entry_++;
1435    sirt_number_of_references_++;
1436  }
1437  sirt_->SetNumberOfReferences(sirt_expected_refs_);
1438  DCHECK_NE(sirt_expected_refs_, 0U);
1439  // Install Sirt.
1440  self->PushSirt(sirt_);
1441}
1442
1443extern "C" void* artFindNativeMethod();
1444
1445uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
1446  if (lock != nullptr) {
1447    return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
1448  } else {
1449    return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
1450  }
1451}
1452
1453void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
1454  if (lock != nullptr) {
1455    JniMethodEndSynchronized(cookie, lock, self);
1456  } else {
1457    JniMethodEnd(cookie, self);
1458  }
1459}
1460
1461/*
1462 * Initializes an alloca region assumed to be directly below sp for a native call:
1463 * Create a Sirt and call stack and fill a mini stack with values to be pushed to registers.
1464 * The final element on the stack is a pointer to the native code.
1465 *
1466 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
1467 * We need to fix this, as the Sirt needs to go into the callee-save frame.
1468 *
1469 * The return of this function denotes:
1470 * 1) How many bytes of the alloca can be released, if the value is non-negative.
1471 * 2) An error, if the value is negative.
1472 */
1473extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp)
1474    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1475  mirror::ArtMethod* called = *sp;
1476  DCHECK(called->IsNative()) << PrettyMethod(called, true);
1477
1478  // run the visitor
1479  MethodHelper mh(called);
1480
1481  BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(),
1482                                      self);
1483  visitor.VisitArguments();
1484  visitor.FinalizeSirt(self);
1485
1486  // fix up managed-stack things in Thread
1487  self->SetTopOfStack(sp, 0);
1488
1489  self->VerifyStack();
1490
1491  // Start JNI, save the cookie.
1492  uint32_t cookie;
1493  if (called->IsSynchronized()) {
1494    cookie = JniMethodStartSynchronized(visitor.GetFirstSirtEntry(), self);
1495    if (self->IsExceptionPending()) {
1496      self->PopSirt();
1497      // A negative value denotes an error.
1498      return -1;
1499    }
1500  } else {
1501    cookie = JniMethodStart(self);
1502  }
1503  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1504  *(sp32 - 1) = cookie;
1505
1506  // Retrieve the stored native code.
1507  const void* nativeCode = called->GetNativeMethod();
1508
1509  // There are two cases for the content of nativeCode:
1510  // 1) Pointer to the native function.
1511  // 2) Pointer to the trampoline for native code binding.
1512  // In the second case, we need to execute the binding and continue with the actual native function
1513  // pointer.
1514  DCHECK(nativeCode != nullptr);
1515  if (nativeCode == GetJniDlsymLookupStub()) {
1516    nativeCode = artFindNativeMethod();
1517
1518    if (nativeCode == nullptr) {
1519      DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
1520
1521      // End JNI, as the assembly will move to deliver the exception.
1522      jobject lock = called->IsSynchronized() ? visitor.GetFirstSirtEntry() : nullptr;
1523      if (mh.GetShorty()[0] == 'L') {
1524        artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
1525      } else {
1526        artQuickGenericJniEndJNINonRef(self, cookie, lock);
1527      }
1528
1529      return -1;
1530    }
1531    // Note that the native code pointer will be automatically set by artFindNativeMethod().
1532  }
1533
1534  // Store the native code pointer in the stack at the right location.
1535  uintptr_t* code_pointer = reinterpret_cast<uintptr_t*>(visitor.GetCodeReturn());
1536  *code_pointer = reinterpret_cast<uintptr_t>(nativeCode);
1537
1538  // 5K reserved, window_size + frame pointer used.
1539  size_t window_size = visitor.GetAllocaUsedSize();
1540  return (5 * KB) - window_size - kPointerSize;
1541}
1542
1543/*
1544 * Is called after the native JNI code. Responsible for cleanup (SIRT, saved state) and
1545 * unlocking.
1546 */
1547extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp,
1548                                                    jvalue result, uint64_t result_f)
1549    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1550  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1551  mirror::ArtMethod* called = *sp;
1552  uint32_t cookie = *(sp32 - 1);
1553
1554  jobject lock = nullptr;
1555  if (called->IsSynchronized()) {
1556    StackIndirectReferenceTable* table =
1557        reinterpret_cast<StackIndirectReferenceTable*>(
1558            reinterpret_cast<uint8_t*>(sp) + kPointerSize);
1559    lock = reinterpret_cast<jobject>(table->GetStackReference(0));
1560  }
1561
1562  MethodHelper mh(called);
1563  char return_shorty_char = mh.GetShorty()[0];
1564
1565  if (return_shorty_char == 'L') {
1566    return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock);
1567  } else {
1568    artQuickGenericJniEndJNINonRef(self, cookie, lock);
1569
1570    switch (return_shorty_char) {
1571      case 'F':  // Fall-through.
1572      case 'D':
1573        return result_f;
1574      case 'Z':
1575        return result.z;
1576      case 'B':
1577        return result.b;
1578      case 'C':
1579        return result.c;
1580      case 'S':
1581        return result.s;
1582      case 'I':
1583        return result.i;
1584      case 'J':
1585        return result.j;
1586      case 'V':
1587        return 0;
1588      default:
1589        LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
1590        return 0;
1591    }
1592  }
1593}
1594
1595template<InvokeType type, bool access_check>
1596static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1597                                mirror::ArtMethod* caller_method,
1598                                Thread* self, mirror::ArtMethod** sp);
1599
1600template<InvokeType type, bool access_check>
1601static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1602                                mirror::ArtMethod* caller_method,
1603                                Thread* self, mirror::ArtMethod** sp) {
1604  mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
1605                                             type);
1606  if (UNLIKELY(method == nullptr)) {
1607    FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
1608    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1609    uint32_t shorty_len;
1610    const char* shorty =
1611        dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
1612    {
1613      // Remember the args in case a GC happens in FindMethodFromCode.
1614      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1615      RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
1616      visitor.VisitArguments();
1617      method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, self);
1618      visitor.FixupReferences();
1619    }
1620
1621    if (UNLIKELY(method == NULL)) {
1622      CHECK(self->IsExceptionPending());
1623      return 0;  // failure
1624    }
1625  }
1626  DCHECK(!self->IsExceptionPending());
1627  const void* code = method->GetEntryPointFromQuickCompiledCode();
1628
1629  // When we return, the caller will branch to this address, so it had better not be 0!
1630  DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: "
1631      << MethodHelper(method).GetDexFile().GetLocation();
1632#ifdef __LP64__
1633  UNIMPLEMENTED(FATAL);
1634  return 0;
1635#else
1636  uint32_t method_uint = reinterpret_cast<uint32_t>(method);
1637  uint64_t code_uint = reinterpret_cast<uint32_t>(code);
1638  uint64_t result = ((code_uint << 32) | method_uint);
1639  return result;
1640#endif
1641}
1642
1643// Explicit artInvokeCommon template function declarations to please analysis tool.
1644#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
1645  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
1646  uint64_t artInvokeCommon<type, access_check>(uint32_t method_idx,                             \
1647                                               mirror::Object* this_object,                     \
1648                                               mirror::ArtMethod* caller_method,                \
1649                                               Thread* self, mirror::ArtMethod** sp)            \
1650
1651EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
1652EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
1653EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
1654EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
1655EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
1656EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
1657EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
1658EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
1659EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
1660EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
1661#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
1662
1663
1664// See comments in runtime_support_asm.S
1665extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
1666                                                                mirror::Object* this_object,
1667                                                                mirror::ArtMethod* caller_method,
1668                                                                Thread* self,
1669                                                                mirror::ArtMethod** sp)
1670    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1671  return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp);
1672}
1673
1674
1675extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
1676                                                             mirror::Object* this_object,
1677                                                             mirror::ArtMethod* caller_method,
1678                                                             Thread* self,
1679                                                             mirror::ArtMethod** sp)
1680    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1681  return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp);
1682}
1683
1684extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
1685                                                             mirror::Object* this_object,
1686                                                             mirror::ArtMethod* caller_method,
1687                                                             Thread* self,
1688                                                             mirror::ArtMethod** sp)
1689    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1690  return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp);
1691}
1692
1693extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
1694                                                            mirror::Object* this_object,
1695                                                            mirror::ArtMethod* caller_method,
1696                                                            Thread* self,
1697                                                            mirror::ArtMethod** sp)
1698    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1699  return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp);
1700}
1701
1702extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
1703                                                              mirror::Object* this_object,
1704                                                              mirror::ArtMethod* caller_method,
1705                                                              Thread* self,
1706                                                              mirror::ArtMethod** sp)
1707    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1708  return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp);
1709}
1710
1711// Determine target of interface dispatch. This object is known non-null.
1712extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
1713                                                 mirror::Object* this_object,
1714                                                 mirror::ArtMethod* caller_method,
1715                                                 Thread* self, mirror::ArtMethod** sp)
1716    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1717  mirror::ArtMethod* method;
1718  if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
1719    method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
1720    if (UNLIKELY(method == NULL)) {
1721      FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
1722      ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
1723                                                                 caller_method);
1724      return 0;  // Failure.
1725    }
1726  } else {
1727    FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
1728    DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
1729    // Determine method index from calling dex instruction.
1730#if defined(__arm__)
1731    // On entry the stack pointed by sp is:
1732    // | argN       |  |
1733    // | ...        |  |
1734    // | arg4       |  |
1735    // | arg3 spill |  |  Caller's frame
1736    // | arg2 spill |  |
1737    // | arg1 spill |  |
1738    // | Method*    | ---
1739    // | LR         |
1740    // | ...        |    callee saves
1741    // | R3         |    arg3
1742    // | R2         |    arg2
1743    // | R1         |    arg1
1744    // | R0         |
1745    // | Method*    |  <- sp
1746    DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
1747    uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize);
1748    uintptr_t caller_pc = regs[10];
1749#elif defined(__i386__)
1750    // On entry the stack pointed by sp is:
1751    // | argN        |  |
1752    // | ...         |  |
1753    // | arg4        |  |
1754    // | arg3 spill  |  |  Caller's frame
1755    // | arg2 spill  |  |
1756    // | arg1 spill  |  |
1757    // | Method*     | ---
1758    // | Return      |
1759    // | EBP,ESI,EDI |    callee saves
1760    // | EBX         |    arg3
1761    // | EDX         |    arg2
1762    // | ECX         |    arg1
1763    // | EAX/Method* |  <- sp
1764    DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
1765    uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
1766    uintptr_t caller_pc = regs[7];
1767#elif defined(__mips__)
1768    // On entry the stack pointed by sp is:
1769    // | argN       |  |
1770    // | ...        |  |
1771    // | arg4       |  |
1772    // | arg3 spill |  |  Caller's frame
1773    // | arg2 spill |  |
1774    // | arg1 spill |  |
1775    // | Method*    | ---
1776    // | RA         |
1777    // | ...        |    callee saves
1778    // | A3         |    arg3
1779    // | A2         |    arg2
1780    // | A1         |    arg1
1781    // | A0/Method* |  <- sp
1782    DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
1783    uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
1784    uintptr_t caller_pc = regs[15];
1785#else
1786    UNIMPLEMENTED(FATAL);
1787    uintptr_t caller_pc = 0;
1788#endif
1789    uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
1790    const DexFile::CodeItem* code = MethodHelper(caller_method).GetCodeItem();
1791    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
1792    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
1793    Instruction::Code instr_code = instr->Opcode();
1794    CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
1795          instr_code == Instruction::INVOKE_INTERFACE_RANGE)
1796        << "Unexpected call into interface trampoline: " << instr->DumpString(NULL);
1797    uint32_t dex_method_idx;
1798    if (instr_code == Instruction::INVOKE_INTERFACE) {
1799      dex_method_idx = instr->VRegB_35c();
1800    } else {
1801      DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
1802      dex_method_idx = instr->VRegB_3rc();
1803    }
1804
1805    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1806    uint32_t shorty_len;
1807    const char* shorty =
1808        dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
1809    {
1810      // Remember the args in case a GC happens in FindMethodFromCode.
1811      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1812      RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
1813      visitor.VisitArguments();
1814      method = FindMethodFromCode<kInterface, false>(dex_method_idx, this_object, caller_method,
1815                                                     self);
1816      visitor.FixupReferences();
1817    }
1818
1819    if (UNLIKELY(method == nullptr)) {
1820      CHECK(self->IsExceptionPending());
1821      return 0;  // Failure.
1822    }
1823  }
1824  const void* code = method->GetEntryPointFromQuickCompiledCode();
1825
1826  // When we return, the caller will branch to this address, so it had better not be 0!
1827  DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: "
1828      << MethodHelper(method).GetDexFile().GetLocation();
1829#ifdef __LP64__
1830  UNIMPLEMENTED(FATAL);
1831  return 0;
1832#else
1833  uint32_t method_uint = reinterpret_cast<uint32_t>(method);
1834  uint64_t code_uint = reinterpret_cast<uint32_t>(code);
1835  uint64_t result = ((code_uint << 32) | method_uint);
1836  return result;
1837#endif
1838}
1839
1840}  // namespace art
1841