quick_trampoline_entrypoints.cc revision c6d86725521841637bdd9564e71be3d9691db20f
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "callee_save_frame.h"
18#include "common_throws.h"
19#include "dex_file-inl.h"
20#include "dex_instruction-inl.h"
21#include "entrypoints/entrypoint_utils-inl.h"
22#include "entrypoints/runtime_asm_entrypoints.h"
23#include "gc/accounting/card_table-inl.h"
24#include "interpreter/interpreter.h"
25#include "method_reference.h"
26#include "mirror/art_method-inl.h"
27#include "mirror/class-inl.h"
28#include "mirror/dex_cache-inl.h"
29#include "mirror/object-inl.h"
30#include "mirror/object_array-inl.h"
31#include "runtime.h"
32#include "scoped_thread_state_change.h"
33
34namespace art {
35
36// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
37class QuickArgumentVisitor {
38  // Number of bytes for each out register in the caller method's frame.
39  static constexpr size_t kBytesStackArgLocation = 4;
40  // Frame size in bytes of a callee-save frame for RefsAndArgs.
41  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
42      GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
43#if defined(__arm__)
44  // The callee save frame is pointed to by SP.
45  // | argN       |  |
46  // | ...        |  |
47  // | arg4       |  |
48  // | arg3 spill |  |  Caller's frame
49  // | arg2 spill |  |
50  // | arg1 spill |  |
51  // | Method*    | ---
52  // | LR         |
53  // | ...        |    4x6 bytes callee saves
54  // | R3         |
55  // | R2         |
56  // | R1         |
57  // | S15        |
58  // | :          |
59  // | S0         |
60  // |            |    4x2 bytes padding
61  // | Method*    |  <- sp
62  static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat;
63  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat;
64  static constexpr size_t kNumQuickGprArgs = 3;
65  static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
66  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
67      arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs);  // Offset of first FPR arg.
68  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
69      arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs);  // Offset of first GPR arg.
70  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
71      arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs);  // Offset of return address.
72  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
73    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
74  }
75#elif defined(__aarch64__)
76  // The callee save frame is pointed to by SP.
77  // | argN       |  |
78  // | ...        |  |
79  // | arg4       |  |
80  // | arg3 spill |  |  Caller's frame
81  // | arg2 spill |  |
82  // | arg1 spill |  |
83  // | Method*    | ---
84  // | LR         |
85  // | X29        |
86  // |  :         |
87  // | X20        |
88  // | X7         |
89  // | :          |
90  // | X1         |
91  // | D7         |
92  // |  :         |
93  // | D0         |
94  // |            |    padding
95  // | Method*    |  <- sp
96  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
97  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
98  static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
99  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
100  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
101      arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs);  // Offset of first FPR arg.
102  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
103      arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs);  // Offset of first GPR arg.
104  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
105      arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs);  // Offset of return address.
106  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
107    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
108  }
109#elif defined(__mips__)
110  // The callee save frame is pointed to by SP.
111  // | argN       |  |
112  // | ...        |  |
113  // | arg4       |  |
114  // | arg3 spill |  |  Caller's frame
115  // | arg2 spill |  |
116  // | arg1 spill |  |
117  // | Method*    | ---
118  // | RA         |
119  // | ...        |    callee saves
120  // | A3         |    arg3
121  // | A2         |    arg2
122  // | A1         |    arg1
123  // | A0/Method* |  <- sp
124  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
125  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
126  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
127  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
128  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
129  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 16;  // Offset of first GPR arg.
130  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60;  // Offset of return address.
131  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
132    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
133  }
134#elif defined(__i386__)
135  // The callee save frame is pointed to by SP.
136  // | argN        |  |
137  // | ...         |  |
138  // | arg4        |  |
139  // | arg3 spill  |  |  Caller's frame
140  // | arg2 spill  |  |
141  // | arg1 spill  |  |
142  // | Method*     | ---
143  // | Return      |
144  // | EBP,ESI,EDI |    callee saves
145  // | EBX         |    arg3
146  // | EDX         |    arg2
147  // | ECX         |    arg1
148  // | EAX/Method* |  <- sp
149  static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
150  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
151  static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
152  static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
153  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
154  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
155  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28;  // Offset of return address.
156  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
157    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
158  }
159#elif defined(__x86_64__)
160  // The callee save frame is pointed to by SP.
161  // | argN            |  |
162  // | ...             |  |
163  // | reg. arg spills |  |  Caller's frame
164  // | Method*         | ---
165  // | Return          |
166  // | R15             |    callee save
167  // | R14             |    callee save
168  // | R13             |    callee save
169  // | R12             |    callee save
170  // | R9              |    arg5
171  // | R8              |    arg4
172  // | RSI/R6          |    arg1
173  // | RBP/R5          |    callee save
174  // | RBX/R3          |    callee save
175  // | RDX/R2          |    arg2
176  // | RCX/R1          |    arg3
177  // | XMM7            |    float arg 8
178  // | XMM6            |    float arg 7
179  // | XMM5            |    float arg 6
180  // | XMM4            |    float arg 5
181  // | XMM3            |    float arg 4
182  // | XMM2            |    float arg 3
183  // | XMM1            |    float arg 2
184  // | XMM0            |    float arg 1
185  // | Padding         |
186  // | RDI/Method*     |  <- sp
187  static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
188  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
189  static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
190  static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
191  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
192  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8;  // Offset of first GPR arg.
193  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8;  // Offset of return address.
194  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
195    switch (gpr_index) {
196      case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
197      case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
198      case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
199      case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
200      case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
201      default:
202      LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
203      return 0;
204    }
205  }
206#else
207#error "Unsupported architecture"
208#endif
209
210 public:
211  // Special handling for proxy methods. Proxy methods are instance methods so the
212  // 'this' object is the 1st argument. They also have the same frame layout as the
213  // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
214  // 1st GPR.
215  static mirror::Object* GetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
216      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
217    CHECK(sp->AsMirrorPtr()->IsProxyMethod());
218    CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, sp->AsMirrorPtr()->GetFrameSizeInBytes());
219    CHECK_GT(kNumQuickGprArgs, 0u);
220    constexpr uint32_t kThisGprIndex = 0u;  // 'this' is in the 1st GPR.
221    size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
222        GprIndexToGprOffset(kThisGprIndex);
223    uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
224    return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
225  }
226
227  static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
228      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
229    DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
230    uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
231    return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
232  }
233
234  // For the given quick ref and args quick frame, return the caller's PC.
235  static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
236      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
237    DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
238    uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
239    return *reinterpret_cast<uintptr_t*>(lr);
240  }
241
242  QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty,
243                       uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
244          is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
245          gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
246          fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
247          stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
248              + sizeof(StackReference<mirror::ArtMethod>)),  // Skip StackReference<ArtMethod>.
249          gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
250          cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
251    static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
252                  "Number of Quick FPR arguments unexpected");
253    static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
254                  "Double alignment unexpected");
255    // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
256    // next register is even.
257    static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
258                  "Number of Quick FPR arguments not even");
259  }
260
261  virtual ~QuickArgumentVisitor() {}
262
263  virtual void Visit() = 0;
264
265  Primitive::Type GetParamPrimitiveType() const {
266    return cur_type_;
267  }
268
269  uint8_t* GetParamAddress() const {
270    if (!kQuickSoftFloatAbi) {
271      Primitive::Type type = GetParamPrimitiveType();
272      if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
273        if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
274          if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
275            return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
276          }
277        } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
278          return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
279        }
280        return stack_args_ + (stack_index_ * kBytesStackArgLocation);
281      }
282    }
283    if (gpr_index_ < kNumQuickGprArgs) {
284      return gpr_args_ + GprIndexToGprOffset(gpr_index_);
285    }
286    return stack_args_ + (stack_index_ * kBytesStackArgLocation);
287  }
288
289  bool IsSplitLongOrDouble() const {
290    if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
291      return is_split_long_or_double_;
292    } else {
293      return false;  // An optimization for when GPR and FPRs are 64bit.
294    }
295  }
296
297  bool IsParamAReference() const {
298    return GetParamPrimitiveType() == Primitive::kPrimNot;
299  }
300
301  bool IsParamALongOrDouble() const {
302    Primitive::Type type = GetParamPrimitiveType();
303    return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
304  }
305
306  uint64_t ReadSplitLongParam() const {
307    // The splitted long is always available through the stack.
308    return *reinterpret_cast<uint64_t*>(stack_args_
309        + stack_index_ * kBytesStackArgLocation);
310  }
311
312  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
313    // (a) 'stack_args_' should point to the first method's argument
314    // (b) whatever the argument type it is, the 'stack_index_' should
315    //     be moved forward along with every visiting.
316    gpr_index_ = 0;
317    fpr_index_ = 0;
318    if (kQuickDoubleRegAlignedFloatBackFilled) {
319      fpr_double_index_ = 0;
320    }
321    stack_index_ = 0;
322    if (!is_static_) {  // Handle this.
323      cur_type_ = Primitive::kPrimNot;
324      is_split_long_or_double_ = false;
325      Visit();
326      stack_index_++;
327      if (kNumQuickGprArgs > 0) {
328        gpr_index_++;
329      }
330    }
331    for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
332      cur_type_ = Primitive::GetType(shorty_[shorty_index]);
333      switch (cur_type_) {
334        case Primitive::kPrimNot:
335        case Primitive::kPrimBoolean:
336        case Primitive::kPrimByte:
337        case Primitive::kPrimChar:
338        case Primitive::kPrimShort:
339        case Primitive::kPrimInt:
340          is_split_long_or_double_ = false;
341          Visit();
342          stack_index_++;
343          if (gpr_index_ < kNumQuickGprArgs) {
344            gpr_index_++;
345          }
346          break;
347        case Primitive::kPrimFloat:
348          is_split_long_or_double_ = false;
349          Visit();
350          stack_index_++;
351          if (kQuickSoftFloatAbi) {
352            if (gpr_index_ < kNumQuickGprArgs) {
353              gpr_index_++;
354            }
355          } else {
356            if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
357              fpr_index_++;
358              if (kQuickDoubleRegAlignedFloatBackFilled) {
359                // Double should not overlap with float.
360                // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
361                fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
362                // Float should not overlap with double.
363                if (fpr_index_ % 2 == 0) {
364                  fpr_index_ = std::max(fpr_double_index_, fpr_index_);
365                }
366              }
367            }
368          }
369          break;
370        case Primitive::kPrimDouble:
371        case Primitive::kPrimLong:
372          if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
373            is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
374                ((gpr_index_ + 1) == kNumQuickGprArgs);
375            Visit();
376            if (kBytesStackArgLocation == 4) {
377              stack_index_+= 2;
378            } else {
379              CHECK_EQ(kBytesStackArgLocation, 8U);
380              stack_index_++;
381            }
382            if (gpr_index_ < kNumQuickGprArgs) {
383              gpr_index_++;
384              if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
385                if (gpr_index_ < kNumQuickGprArgs) {
386                  gpr_index_++;
387                }
388              }
389            }
390          } else {
391            is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
392                ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
393            Visit();
394            if (kBytesStackArgLocation == 4) {
395              stack_index_+= 2;
396            } else {
397              CHECK_EQ(kBytesStackArgLocation, 8U);
398              stack_index_++;
399            }
400            if (kQuickDoubleRegAlignedFloatBackFilled) {
401              if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
402                fpr_double_index_ += 2;
403                // Float should not overlap with double.
404                if (fpr_index_ % 2 == 0) {
405                  fpr_index_ = std::max(fpr_double_index_, fpr_index_);
406                }
407              }
408            } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
409              fpr_index_++;
410              if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
411                if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
412                  fpr_index_++;
413                }
414              }
415            }
416          }
417          break;
418        default:
419          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
420      }
421    }
422  }
423
424 protected:
425  const bool is_static_;
426  const char* const shorty_;
427  const uint32_t shorty_len_;
428
429 private:
430  uint8_t* const gpr_args_;  // Address of GPR arguments in callee save frame.
431  uint8_t* const fpr_args_;  // Address of FPR arguments in callee save frame.
432  uint8_t* const stack_args_;  // Address of stack arguments in caller's frame.
433  uint32_t gpr_index_;  // Index into spilled GPRs.
434  // Index into spilled FPRs.
435  // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
436  // holds a higher register number.
437  uint32_t fpr_index_;
438  // Index into spilled FPRs for aligned double.
439  // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
440  // terms of singles, may be behind fpr_index.
441  uint32_t fpr_double_index_;
442  uint32_t stack_index_;  // Index into arguments on the stack.
443  // The current type of argument during VisitArguments.
444  Primitive::Type cur_type_;
445  // Does a 64bit parameter straddle the register and stack arguments?
446  bool is_split_long_or_double_;
447};
448
449// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
450// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
451extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
452    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
453  return QuickArgumentVisitor::GetProxyThisObject(sp);
454}
455
456// Visits arguments on the stack placing them into the shadow frame.
457class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
458 public:
459  BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
460                               const char* shorty, uint32_t shorty_len, ShadowFrame* sf,
461                               size_t first_arg_reg) :
462      QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
463
464  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
465
466 private:
467  ShadowFrame* const sf_;
468  uint32_t cur_reg_;
469
470  DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
471};
472
473void BuildQuickShadowFrameVisitor::Visit() {
474  Primitive::Type type = GetParamPrimitiveType();
475  switch (type) {
476    case Primitive::kPrimLong:  // Fall-through.
477    case Primitive::kPrimDouble:
478      if (IsSplitLongOrDouble()) {
479        sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
480      } else {
481        sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
482      }
483      ++cur_reg_;
484      break;
485    case Primitive::kPrimNot: {
486        StackReference<mirror::Object>* stack_ref =
487            reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
488        sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
489      }
490      break;
491    case Primitive::kPrimBoolean:  // Fall-through.
492    case Primitive::kPrimByte:     // Fall-through.
493    case Primitive::kPrimChar:     // Fall-through.
494    case Primitive::kPrimShort:    // Fall-through.
495    case Primitive::kPrimInt:      // Fall-through.
496    case Primitive::kPrimFloat:
497      sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
498      break;
499    case Primitive::kPrimVoid:
500      LOG(FATAL) << "UNREACHABLE";
501      UNREACHABLE();
502  }
503  ++cur_reg_;
504}
505
506extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
507                                                StackReference<mirror::ArtMethod>* sp)
508    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
509  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
510  // frame.
511  ScopedQuickEntrypointChecks sqec(self);
512
513  if (method->IsAbstract()) {
514    ThrowAbstractMethodError(method);
515    return 0;
516  } else {
517    DCHECK(!method->IsNative()) << PrettyMethod(method);
518    const char* old_cause = self->StartAssertNoThreadSuspension(
519        "Building interpreter shadow frame");
520    const DexFile::CodeItem* code_item = method->GetCodeItem();
521    DCHECK(code_item != nullptr) << PrettyMethod(method);
522    uint16_t num_regs = code_item->registers_size_;
523    void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
524    // No last shadow coming from quick.
525    ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory));
526    size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
527    uint32_t shorty_len = 0;
528    const char* shorty = method->GetShorty(&shorty_len);
529    BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
530                                                      shadow_frame, first_arg_reg);
531    shadow_frame_builder.VisitArguments();
532    const bool needs_initialization =
533        method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
534    // Push a transition back into managed code onto the linked list in thread.
535    ManagedStack fragment;
536    self->PushManagedStackFragment(&fragment);
537    self->PushShadowFrame(shadow_frame);
538    self->EndAssertNoThreadSuspension(old_cause);
539
540    if (needs_initialization) {
541      // Ensure static method's class is initialized.
542      StackHandleScope<1> hs(self);
543      Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass()));
544      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
545        DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(shadow_frame->GetMethod());
546        self->PopManagedStackFragment(fragment);
547        return 0;
548      }
549    }
550    JValue result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame);
551    // Pop transition.
552    self->PopManagedStackFragment(fragment);
553    // No need to restore the args since the method has already been run by the interpreter.
554    return result.GetJ();
555  }
556}
557
558// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
559// to jobjects.
560class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
561 public:
562  BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
563                            const char* shorty, uint32_t shorty_len,
564                            ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
565      QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
566
567  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
568
569  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
570
571 private:
572  ScopedObjectAccessUnchecked* const soa_;
573  std::vector<jvalue>* const args_;
574  // References which we must update when exiting in case the GC moved the objects.
575  std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
576
577  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
578};
579
580void BuildQuickArgumentVisitor::Visit() {
581  jvalue val;
582  Primitive::Type type = GetParamPrimitiveType();
583  switch (type) {
584    case Primitive::kPrimNot: {
585      StackReference<mirror::Object>* stack_ref =
586          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
587      val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
588      references_.push_back(std::make_pair(val.l, stack_ref));
589      break;
590    }
591    case Primitive::kPrimLong:  // Fall-through.
592    case Primitive::kPrimDouble:
593      if (IsSplitLongOrDouble()) {
594        val.j = ReadSplitLongParam();
595      } else {
596        val.j = *reinterpret_cast<jlong*>(GetParamAddress());
597      }
598      break;
599    case Primitive::kPrimBoolean:  // Fall-through.
600    case Primitive::kPrimByte:     // Fall-through.
601    case Primitive::kPrimChar:     // Fall-through.
602    case Primitive::kPrimShort:    // Fall-through.
603    case Primitive::kPrimInt:      // Fall-through.
604    case Primitive::kPrimFloat:
605      val.i = *reinterpret_cast<jint*>(GetParamAddress());
606      break;
607    case Primitive::kPrimVoid:
608      LOG(FATAL) << "UNREACHABLE";
609      UNREACHABLE();
610  }
611  args_->push_back(val);
612}
613
614void BuildQuickArgumentVisitor::FixupReferences() {
615  // Fixup any references which may have changed.
616  for (const auto& pair : references_) {
617    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
618    soa_->Env()->DeleteLocalRef(pair.first);
619  }
620}
621
622// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
623// which is responsible for recording callee save registers. We explicitly place into jobjects the
624// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
625// field within the proxy object, which will box the primitive arguments and deal with error cases.
626extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
627                                               mirror::Object* receiver,
628                                               Thread* self, StackReference<mirror::ArtMethod>* sp)
629    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
630  DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
631  DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
632  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
633  const char* old_cause =
634      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
635  // Register the top of the managed stack, making stack crawlable.
636  DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
637  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
638            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
639      << PrettyMethod(proxy_method);
640  self->VerifyStack();
641  // Start new JNI local reference state.
642  JNIEnvExt* env = self->GetJniEnv();
643  ScopedObjectAccessUnchecked soa(env);
644  ScopedJniEnvLocalRefState env_state(env);
645  // Create local ref. copies of proxy method and the receiver.
646  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
647
648  // Placing arguments into args vector and remove the receiver.
649  mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy();
650  CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
651                                       << PrettyMethod(non_proxy_method);
652  std::vector<jvalue> args;
653  uint32_t shorty_len = 0;
654  const char* shorty = proxy_method->GetShorty(&shorty_len);
655  BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args);
656
657  local_ref_visitor.VisitArguments();
658  DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method);
659  args.erase(args.begin());
660
661  // Convert proxy method into expected interface method.
662  mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
663  DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method);
664  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
665  jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
666
667  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
668  // that performs allocations.
669  self->EndAssertNoThreadSuspension(old_cause);
670  JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
671  // Restore references which might have moved.
672  local_ref_visitor.FixupReferences();
673  return result.GetJ();
674}
675
676// Read object references held in arguments from quick frames and place in a JNI local references,
677// so they don't get garbage collected.
678class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
679 public:
680  RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
681                               const char* shorty, uint32_t shorty_len,
682                               ScopedObjectAccessUnchecked* soa) :
683      QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
684
685  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
686
687  void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
688
689 private:
690  ScopedObjectAccessUnchecked* const soa_;
691  // References which we must update when exiting in case the GC moved the objects.
692  std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
693
694  DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
695};
696
697void RememberForGcArgumentVisitor::Visit() {
698  if (IsParamAReference()) {
699    StackReference<mirror::Object>* stack_ref =
700        reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
701    jobject reference =
702        soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
703    references_.push_back(std::make_pair(reference, stack_ref));
704  }
705}
706
707void RememberForGcArgumentVisitor::FixupReferences() {
708  // Fixup any references which may have changed.
709  for (const auto& pair : references_) {
710    pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
711    soa_->Env()->DeleteLocalRef(pair.first);
712  }
713}
714
715// Lazily resolve a method for quick. Called by stub code.
716extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
717                                                    mirror::Object* receiver,
718                                                    Thread* self,
719                                                    StackReference<mirror::ArtMethod>* sp)
720    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
721  ScopedQuickEntrypointChecks sqec(self);
722  // Start new JNI local reference state
723  JNIEnvExt* env = self->GetJniEnv();
724  ScopedObjectAccessUnchecked soa(env);
725  ScopedJniEnvLocalRefState env_state(env);
726  const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
727
728  // Compute details about the called method (avoid GCs)
729  ClassLinker* linker = Runtime::Current()->GetClassLinker();
730  mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
731  InvokeType invoke_type;
732  MethodReference called_method(nullptr, 0);
733  const bool called_method_known_on_entry = !called->IsRuntimeMethod();
734  if (!called_method_known_on_entry) {
735    uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
736    const DexFile::CodeItem* code;
737    called_method.dex_file = caller->GetDexFile();
738    code = caller->GetCodeItem();
739    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
740    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
741    Instruction::Code instr_code = instr->Opcode();
742    bool is_range;
743    switch (instr_code) {
744      case Instruction::INVOKE_DIRECT:
745        invoke_type = kDirect;
746        is_range = false;
747        break;
748      case Instruction::INVOKE_DIRECT_RANGE:
749        invoke_type = kDirect;
750        is_range = true;
751        break;
752      case Instruction::INVOKE_STATIC:
753        invoke_type = kStatic;
754        is_range = false;
755        break;
756      case Instruction::INVOKE_STATIC_RANGE:
757        invoke_type = kStatic;
758        is_range = true;
759        break;
760      case Instruction::INVOKE_SUPER:
761        invoke_type = kSuper;
762        is_range = false;
763        break;
764      case Instruction::INVOKE_SUPER_RANGE:
765        invoke_type = kSuper;
766        is_range = true;
767        break;
768      case Instruction::INVOKE_VIRTUAL:
769        invoke_type = kVirtual;
770        is_range = false;
771        break;
772      case Instruction::INVOKE_VIRTUAL_RANGE:
773        invoke_type = kVirtual;
774        is_range = true;
775        break;
776      case Instruction::INVOKE_INTERFACE:
777        invoke_type = kInterface;
778        is_range = false;
779        break;
780      case Instruction::INVOKE_INTERFACE_RANGE:
781        invoke_type = kInterface;
782        is_range = true;
783        break;
784      default:
785        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr);
786        UNREACHABLE();
787    }
788    called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
789  } else {
790    invoke_type = kStatic;
791    called_method.dex_file = called->GetDexFile();
792    called_method.dex_method_index = called->GetDexMethodIndex();
793  }
794  uint32_t shorty_len;
795  const char* shorty =
796      called_method.dex_file->GetMethodShorty(
797          called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len);
798  RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
799  visitor.VisitArguments();
800  self->EndAssertNoThreadSuspension(old_cause);
801  const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
802  // Resolve method filling in dex cache.
803  if (!called_method_known_on_entry) {
804    StackHandleScope<1> hs(self);
805    mirror::Object* dummy = nullptr;
806    HandleWrapper<mirror::Object> h_receiver(
807        hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
808    DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
809    called = linker->ResolveMethod(self, called_method.dex_method_index, &caller, invoke_type);
810  }
811  const void* code = nullptr;
812  if (LIKELY(!self->IsExceptionPending())) {
813    // Incompatible class change should have been handled in resolve method.
814    CHECK(!called->CheckIncompatibleClassChange(invoke_type))
815        << PrettyMethod(called) << " " << invoke_type;
816    if (virtual_or_interface) {
817      // Refine called method based on receiver.
818      CHECK(receiver != nullptr) << invoke_type;
819
820      mirror::ArtMethod* orig_called = called;
821      if (invoke_type == kVirtual) {
822        called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
823      } else {
824        called = receiver->GetClass()->FindVirtualMethodForInterface(called);
825      }
826
827      CHECK(called != nullptr) << PrettyMethod(orig_called) << " "
828                               << PrettyTypeOf(receiver) << " "
829                               << invoke_type << " " << orig_called->GetVtableIndex();
830
831      // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
832      // of the sharpened method avoiding dirtying the dex cache if possible.
833      // Note, called_method.dex_method_index references the dex method before the
834      // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares
835      // about the name and signature.
836      uint32_t update_dex_cache_method_index = called->GetDexMethodIndex();
837      if (!called->HasSameDexCacheResolvedMethods(caller)) {
838        // Calling from one dex file to another, need to compute the method index appropriate to
839        // the caller's dex file. Since we get here only if the original called was a runtime
840        // method, we've got the correct dex_file and a dex_method_idx from above.
841        DCHECK(!called_method_known_on_entry);
842        DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
843        const DexFile* caller_dex_file = called_method.dex_file;
844        uint32_t caller_method_name_and_sig_index = called_method.dex_method_index;
845        update_dex_cache_method_index =
846            called->FindDexMethodIndexInOtherDexFile(*caller_dex_file,
847                                                     caller_method_name_and_sig_index);
848      }
849      if ((update_dex_cache_method_index != DexFile::kDexNoIndex) &&
850          (caller->GetDexCacheResolvedMethod(update_dex_cache_method_index) != called)) {
851        caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called);
852      }
853    }
854    // Ensure that the called method's class is initialized.
855    StackHandleScope<1> hs(soa.Self());
856    Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
857    linker->EnsureInitialized(soa.Self(), called_class, true, true);
858    if (LIKELY(called_class->IsInitialized())) {
859      code = called->GetEntryPointFromQuickCompiledCode();
860    } else if (called_class->IsInitializing()) {
861      if (invoke_type == kStatic) {
862        // Class is still initializing, go to oat and grab code (trampoline must be left in place
863        // until class is initialized to stop races between threads).
864        code = linker->GetQuickOatCodeFor(called);
865      } else {
866        // No trampoline for non-static methods.
867        code = called->GetEntryPointFromQuickCompiledCode();
868      }
869    } else {
870      DCHECK(called_class->IsErroneous());
871    }
872  }
873  CHECK_EQ(code == nullptr, self->IsExceptionPending());
874  // Fixup any locally saved objects may have moved during a GC.
875  visitor.FixupReferences();
876  // Place called method in callee-save frame to be placed as first argument to quick method.
877  sp->Assign(called);
878  return code;
879}
880
881/*
882 * This class uses a couple of observations to unite the different calling conventions through
883 * a few constants.
884 *
885 * 1) Number of registers used for passing is normally even, so counting down has no penalty for
886 *    possible alignment.
887 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
888 *    types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
889 *    when we have to split things
890 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
891 *    and we can use Int handling directly.
892 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
893 *    necessary when widening. Also, widening of Ints will take place implicitly, and the
894 *    extension should be compatible with Aarch64, which mandates copying the available bits
895 *    into LSB and leaving the rest unspecified.
896 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
897 *    the stack.
898 * 6) There is only little endian.
899 *
900 *
901 * Actual work is supposed to be done in a delegate of the template type. The interface is as
902 * follows:
903 *
904 * void PushGpr(uintptr_t):   Add a value for the next GPR
905 *
906 * void PushFpr4(float):      Add a value for the next FPR of size 32b. Is only called if we need
907 *                            padding, that is, think the architecture is 32b and aligns 64b.
908 *
909 * void PushFpr8(uint64_t):   Push a double. We _will_ call this on 32b, it's the callee's job to
910 *                            split this if necessary. The current state will have aligned, if
911 *                            necessary.
912 *
913 * void PushStack(uintptr_t): Push a value to the stack.
914 *
915 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
916 *                                          as this might be important for null initialization.
917 *                                          Must return the jobject, that is, the reference to the
918 *                                          entry in the HandleScope (nullptr if necessary).
919 *
920 */
921template<class T> class BuildNativeCallFrameStateMachine {
922 public:
923#if defined(__arm__)
924  // TODO: These are all dummy values!
925  static constexpr bool kNativeSoftFloatAbi = true;
926  static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs, r0-r3
927  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
928
929  static constexpr size_t kRegistersNeededForLong = 2;
930  static constexpr size_t kRegistersNeededForDouble = 2;
931  static constexpr bool kMultiRegistersAligned = true;
932  static constexpr bool kMultiRegistersWidened = false;
933  static constexpr bool kAlignLongOnStack = true;
934  static constexpr bool kAlignDoubleOnStack = true;
935#elif defined(__aarch64__)
936  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
937  static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
938  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
939
940  static constexpr size_t kRegistersNeededForLong = 1;
941  static constexpr size_t kRegistersNeededForDouble = 1;
942  static constexpr bool kMultiRegistersAligned = false;
943  static constexpr bool kMultiRegistersWidened = false;
944  static constexpr bool kAlignLongOnStack = false;
945  static constexpr bool kAlignDoubleOnStack = false;
946#elif defined(__mips__)
947  static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
948  static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs.
949  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
950
951  static constexpr size_t kRegistersNeededForLong = 2;
952  static constexpr size_t kRegistersNeededForDouble = 2;
953  static constexpr bool kMultiRegistersAligned = true;
954  static constexpr bool kMultiRegistersWidened = true;
955  static constexpr bool kAlignLongOnStack = true;
956  static constexpr bool kAlignDoubleOnStack = true;
957#elif defined(__i386__)
958  // TODO: Check these!
959  static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
960  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
961  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
962
963  static constexpr size_t kRegistersNeededForLong = 2;
964  static constexpr size_t kRegistersNeededForDouble = 2;
965  static constexpr bool kMultiRegistersAligned = false;  // x86 not using regs, anyways
966  static constexpr bool kMultiRegistersWidened = false;
967  static constexpr bool kAlignLongOnStack = false;
968  static constexpr bool kAlignDoubleOnStack = false;
969#elif defined(__x86_64__)
970  static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
971  static constexpr size_t kNumNativeGprArgs = 6;  // 6 arguments passed in GPRs.
972  static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
973
974  static constexpr size_t kRegistersNeededForLong = 1;
975  static constexpr size_t kRegistersNeededForDouble = 1;
976  static constexpr bool kMultiRegistersAligned = false;
977  static constexpr bool kMultiRegistersWidened = false;
978  static constexpr bool kAlignLongOnStack = false;
979  static constexpr bool kAlignDoubleOnStack = false;
980#else
981#error "Unsupported architecture"
982#endif
983
984 public:
985  explicit BuildNativeCallFrameStateMachine(T* delegate)
986      : gpr_index_(kNumNativeGprArgs),
987        fpr_index_(kNumNativeFprArgs),
988        stack_entries_(0),
989        delegate_(delegate) {
990    // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
991    // the next register is even; counting down is just to make the compiler happy...
992    static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
993    static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
994  }
995
996  virtual ~BuildNativeCallFrameStateMachine() {}
997
998  bool HavePointerGpr() const {
999    return gpr_index_ > 0;
1000  }
1001
1002  void AdvancePointer(const void* val) {
1003    if (HavePointerGpr()) {
1004      gpr_index_--;
1005      PushGpr(reinterpret_cast<uintptr_t>(val));
1006    } else {
1007      stack_entries_++;  // TODO: have a field for pointer length as multiple of 32b
1008      PushStack(reinterpret_cast<uintptr_t>(val));
1009      gpr_index_ = 0;
1010    }
1011  }
1012
1013  bool HaveHandleScopeGpr() const {
1014    return gpr_index_ > 0;
1015  }
1016
1017  void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1018    uintptr_t handle = PushHandle(ptr);
1019    if (HaveHandleScopeGpr()) {
1020      gpr_index_--;
1021      PushGpr(handle);
1022    } else {
1023      stack_entries_++;
1024      PushStack(handle);
1025      gpr_index_ = 0;
1026    }
1027  }
1028
1029  bool HaveIntGpr() const {
1030    return gpr_index_ > 0;
1031  }
1032
1033  void AdvanceInt(uint32_t val) {
1034    if (HaveIntGpr()) {
1035      gpr_index_--;
1036      PushGpr(val);
1037    } else {
1038      stack_entries_++;
1039      PushStack(val);
1040      gpr_index_ = 0;
1041    }
1042  }
1043
1044  bool HaveLongGpr() const {
1045    return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
1046  }
1047
1048  bool LongGprNeedsPadding() const {
1049    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1050        kAlignLongOnStack &&                  // and when it needs alignment
1051        (gpr_index_ & 1) == 1;                // counter is odd, see constructor
1052  }
1053
1054  bool LongStackNeedsPadding() const {
1055    return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
1056        kAlignLongOnStack &&                  // and when it needs 8B alignment
1057        (stack_entries_ & 1) == 1;            // counter is odd
1058  }
1059
1060  void AdvanceLong(uint64_t val) {
1061    if (HaveLongGpr()) {
1062      if (LongGprNeedsPadding()) {
1063        PushGpr(0);
1064        gpr_index_--;
1065      }
1066      if (kRegistersNeededForLong == 1) {
1067        PushGpr(static_cast<uintptr_t>(val));
1068      } else {
1069        PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1070        PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1071      }
1072      gpr_index_ -= kRegistersNeededForLong;
1073    } else {
1074      if (LongStackNeedsPadding()) {
1075        PushStack(0);
1076        stack_entries_++;
1077      }
1078      if (kRegistersNeededForLong == 1) {
1079        PushStack(static_cast<uintptr_t>(val));
1080        stack_entries_++;
1081      } else {
1082        PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1083        PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1084        stack_entries_ += 2;
1085      }
1086      gpr_index_ = 0;
1087    }
1088  }
1089
1090  bool HaveFloatFpr() const {
1091    return fpr_index_ > 0;
1092  }
1093
1094  void AdvanceFloat(float val) {
1095    if (kNativeSoftFloatAbi) {
1096      AdvanceInt(bit_cast<float, uint32_t>(val));
1097    } else {
1098      if (HaveFloatFpr()) {
1099        fpr_index_--;
1100        if (kRegistersNeededForDouble == 1) {
1101          if (kMultiRegistersWidened) {
1102            PushFpr8(bit_cast<double, uint64_t>(val));
1103          } else {
1104            // No widening, just use the bits.
1105            PushFpr8(bit_cast<float, uint64_t>(val));
1106          }
1107        } else {
1108          PushFpr4(val);
1109        }
1110      } else {
1111        stack_entries_++;
1112        if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) {
1113          // Need to widen before storing: Note the "double" in the template instantiation.
1114          // Note: We need to jump through those hoops to make the compiler happy.
1115          DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
1116          PushStack(static_cast<uintptr_t>(bit_cast<double, uint64_t>(val)));
1117        } else {
1118          PushStack(bit_cast<float, uintptr_t>(val));
1119        }
1120        fpr_index_ = 0;
1121      }
1122    }
1123  }
1124
1125  bool HaveDoubleFpr() const {
1126    return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
1127  }
1128
1129  bool DoubleFprNeedsPadding() const {
1130    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1131        kAlignDoubleOnStack &&                  // and when it needs alignment
1132        (fpr_index_ & 1) == 1;                  // counter is odd, see constructor
1133  }
1134
1135  bool DoubleStackNeedsPadding() const {
1136    return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
1137        kAlignDoubleOnStack &&                  // and when it needs 8B alignment
1138        (stack_entries_ & 1) == 1;              // counter is odd
1139  }
1140
1141  void AdvanceDouble(uint64_t val) {
1142    if (kNativeSoftFloatAbi) {
1143      AdvanceLong(val);
1144    } else {
1145      if (HaveDoubleFpr()) {
1146        if (DoubleFprNeedsPadding()) {
1147          PushFpr4(0);
1148          fpr_index_--;
1149        }
1150        PushFpr8(val);
1151        fpr_index_ -= kRegistersNeededForDouble;
1152      } else {
1153        if (DoubleStackNeedsPadding()) {
1154          PushStack(0);
1155          stack_entries_++;
1156        }
1157        if (kRegistersNeededForDouble == 1) {
1158          PushStack(static_cast<uintptr_t>(val));
1159          stack_entries_++;
1160        } else {
1161          PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
1162          PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
1163          stack_entries_ += 2;
1164        }
1165        fpr_index_ = 0;
1166      }
1167    }
1168  }
1169
1170  uint32_t GetStackEntries() const {
1171    return stack_entries_;
1172  }
1173
1174  uint32_t GetNumberOfUsedGprs() const {
1175    return kNumNativeGprArgs - gpr_index_;
1176  }
1177
1178  uint32_t GetNumberOfUsedFprs() const {
1179    return kNumNativeFprArgs - fpr_index_;
1180  }
1181
1182 private:
1183  void PushGpr(uintptr_t val) {
1184    delegate_->PushGpr(val);
1185  }
1186  void PushFpr4(float val) {
1187    delegate_->PushFpr4(val);
1188  }
1189  void PushFpr8(uint64_t val) {
1190    delegate_->PushFpr8(val);
1191  }
1192  void PushStack(uintptr_t val) {
1193    delegate_->PushStack(val);
1194  }
1195  uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1196    return delegate_->PushHandle(ref);
1197  }
1198
1199  uint32_t gpr_index_;      // Number of free GPRs
1200  uint32_t fpr_index_;      // Number of free FPRs
1201  uint32_t stack_entries_;  // Stack entries are in multiples of 32b, as floats are usually not
1202                            // extended
1203  T* const delegate_;             // What Push implementation gets called
1204};
1205
1206// Computes the sizes of register stacks and call stack area. Handling of references can be extended
1207// in subclasses.
1208//
1209// To handle native pointers, use "L" in the shorty for an object reference, which simulates
1210// them with handles.
1211class ComputeNativeCallFrameSize {
1212 public:
1213  ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
1214
1215  virtual ~ComputeNativeCallFrameSize() {}
1216
1217  uint32_t GetStackSize() const {
1218    return num_stack_entries_ * sizeof(uintptr_t);
1219  }
1220
1221  uint8_t* LayoutCallStack(uint8_t* sp8) const {
1222    sp8 -= GetStackSize();
1223    // Align by kStackAlignment.
1224    sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1225    return sp8;
1226  }
1227
1228  uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr)
1229      const {
1230    // Assumption is OK right now, as we have soft-float arm
1231    size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
1232    sp8 -= fregs * sizeof(uintptr_t);
1233    *start_fpr = reinterpret_cast<uint32_t*>(sp8);
1234    size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
1235    sp8 -= iregs * sizeof(uintptr_t);
1236    *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
1237    return sp8;
1238  }
1239
1240  uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr,
1241                            uint32_t** start_fpr) const {
1242    // Native call stack.
1243    sp8 = LayoutCallStack(sp8);
1244    *start_stack = reinterpret_cast<uintptr_t*>(sp8);
1245
1246    // Put fprs and gprs below.
1247    sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr);
1248
1249    // Return the new bottom.
1250    return sp8;
1251  }
1252
1253  virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
1254      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1255    UNUSED(sm);
1256  }
1257
1258  void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1259    BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
1260
1261    WalkHeader(&sm);
1262
1263    for (uint32_t i = 1; i < shorty_len; ++i) {
1264      Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
1265      switch (cur_type_) {
1266        case Primitive::kPrimNot:
1267          // TODO: fix abuse of mirror types.
1268          sm.AdvanceHandleScope(
1269              reinterpret_cast<mirror::Object*>(0x12345678));
1270          break;
1271
1272        case Primitive::kPrimBoolean:
1273        case Primitive::kPrimByte:
1274        case Primitive::kPrimChar:
1275        case Primitive::kPrimShort:
1276        case Primitive::kPrimInt:
1277          sm.AdvanceInt(0);
1278          break;
1279        case Primitive::kPrimFloat:
1280          sm.AdvanceFloat(0);
1281          break;
1282        case Primitive::kPrimDouble:
1283          sm.AdvanceDouble(0);
1284          break;
1285        case Primitive::kPrimLong:
1286          sm.AdvanceLong(0);
1287          break;
1288        default:
1289          LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
1290          UNREACHABLE();
1291      }
1292    }
1293
1294    num_stack_entries_ = sm.GetStackEntries();
1295  }
1296
1297  void PushGpr(uintptr_t /* val */) {
1298    // not optimizing registers, yet
1299  }
1300
1301  void PushFpr4(float /* val */) {
1302    // not optimizing registers, yet
1303  }
1304
1305  void PushFpr8(uint64_t /* val */) {
1306    // not optimizing registers, yet
1307  }
1308
1309  void PushStack(uintptr_t /* val */) {
1310    // counting is already done in the superclass
1311  }
1312
1313  virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
1314    return reinterpret_cast<uintptr_t>(nullptr);
1315  }
1316
1317 protected:
1318  uint32_t num_stack_entries_;
1319};
1320
1321class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
1322 public:
1323  ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {}
1324
1325  // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
1326  // is at *m = sp. Will update to point to the bottom of the save frame.
1327  //
1328  // Note: assumes ComputeAll() has been run before.
1329  void LayoutCalleeSaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp,
1330                             HandleScope** handle_scope)
1331      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1332    mirror::ArtMethod* method = (*m)->AsMirrorPtr();
1333
1334    uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
1335
1336    // First, fix up the layout of the callee-save frame.
1337    // We have to squeeze in the HandleScope, and relocate the method pointer.
1338
1339    // "Free" the slot for the method.
1340    sp8 += sizeof(void*);  // In the callee-save frame we use a full pointer.
1341
1342    // Under the callee saves put handle scope and new method stack reference.
1343    size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
1344    size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
1345
1346    sp8 -= scope_and_method;
1347    // Align by kStackAlignment.
1348    sp8 = reinterpret_cast<uint8_t*>(RoundDown(
1349        reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
1350
1351    uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
1352    *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(),
1353                                        num_handle_scope_references_);
1354
1355    // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
1356    uint8_t* method_pointer = sp8;
1357    StackReference<mirror::ArtMethod>* new_method_ref =
1358        reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer);
1359    new_method_ref->Assign(method);
1360    *m = new_method_ref;
1361  }
1362
1363  // Adds space for the cookie. Note: may leave stack unaligned.
1364  void LayoutCookie(uint8_t** sp) const {
1365    // Reference cookie and padding
1366    *sp -= 8;
1367  }
1368
1369  // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
1370  // Returns the new bottom. Note: this may be unaligned.
1371  uint8_t* LayoutJNISaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp,
1372                              HandleScope** handle_scope)
1373      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1374    // First, fix up the layout of the callee-save frame.
1375    // We have to squeeze in the HandleScope, and relocate the method pointer.
1376    LayoutCalleeSaveFrame(self, m, sp, handle_scope);
1377
1378    // The bottom of the callee-save frame is now where the method is, *m.
1379    uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m);
1380
1381    // Add space for cookie.
1382    LayoutCookie(&sp8);
1383
1384    return sp8;
1385  }
1386
1387  // WARNING: After this, *sp won't be pointing to the method anymore!
1388  uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m,
1389                         const char* shorty, uint32_t shorty_len, HandleScope** handle_scope,
1390                         uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
1391      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1392    Walk(shorty, shorty_len);
1393
1394    // JNI part.
1395    uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope);
1396
1397    sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr);
1398
1399    // Return the new bottom.
1400    return sp8;
1401  }
1402
1403  uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
1404
1405  // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
1406  void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
1407      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
1408
1409 private:
1410  uint32_t num_handle_scope_references_;
1411};
1412
1413uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
1414  num_handle_scope_references_++;
1415  return reinterpret_cast<uintptr_t>(nullptr);
1416}
1417
1418void ComputeGenericJniFrameSize::WalkHeader(
1419    BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
1420  // JNIEnv
1421  sm->AdvancePointer(nullptr);
1422
1423  // Class object or this as first argument
1424  sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
1425}
1426
1427// Class to push values to three separate regions. Used to fill the native call part. Adheres to
1428// the template requirements of BuildGenericJniFrameStateMachine.
1429class FillNativeCall {
1430 public:
1431  FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
1432      cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
1433
1434  virtual ~FillNativeCall() {}
1435
1436  void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
1437    cur_gpr_reg_ = gpr_regs;
1438    cur_fpr_reg_ = fpr_regs;
1439    cur_stack_arg_ = stack_args;
1440  }
1441
1442  void PushGpr(uintptr_t val) {
1443    *cur_gpr_reg_ = val;
1444    cur_gpr_reg_++;
1445  }
1446
1447  void PushFpr4(float val) {
1448    *cur_fpr_reg_ = val;
1449    cur_fpr_reg_++;
1450  }
1451
1452  void PushFpr8(uint64_t val) {
1453    uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
1454    *tmp = val;
1455    cur_fpr_reg_ += 2;
1456  }
1457
1458  void PushStack(uintptr_t val) {
1459    *cur_stack_arg_ = val;
1460    cur_stack_arg_++;
1461  }
1462
1463  virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1464    LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
1465    UNREACHABLE();
1466  }
1467
1468 private:
1469  uintptr_t* cur_gpr_reg_;
1470  uint32_t* cur_fpr_reg_;
1471  uintptr_t* cur_stack_arg_;
1472};
1473
1474// Visits arguments on the stack placing them into a region lower down the stack for the benefit
1475// of transitioning into native code.
1476class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
1477 public:
1478  BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len,
1479                              StackReference<mirror::ArtMethod>** sp)
1480     : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
1481       jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) {
1482    ComputeGenericJniFrameSize fsc;
1483    uintptr_t* start_gpr_reg;
1484    uint32_t* start_fpr_reg;
1485    uintptr_t* start_stack_arg;
1486    bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
1487                                             &handle_scope_,
1488                                             &start_stack_arg,
1489                                             &start_gpr_reg, &start_fpr_reg);
1490
1491    jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
1492
1493    // jni environment is always first argument
1494    sm_.AdvancePointer(self->GetJniEnv());
1495
1496    if (is_static) {
1497      sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass());
1498    }
1499  }
1500
1501  void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
1502
1503  void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
1504
1505  StackReference<mirror::Object>* GetFirstHandleScopeEntry()
1506      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1507    return handle_scope_->GetHandle(0).GetReference();
1508  }
1509
1510  jobject GetFirstHandleScopeJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1511    return handle_scope_->GetHandle(0).ToJObject();
1512  }
1513
1514  void* GetBottomOfUsedArea() const {
1515    return bottom_of_used_area_;
1516  }
1517
1518 private:
1519  // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
1520  class FillJniCall FINAL : public FillNativeCall {
1521   public:
1522    FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
1523                HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
1524                                             handle_scope_(handle_scope), cur_entry_(0) {}
1525
1526    uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
1527
1528    void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
1529      FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
1530      handle_scope_ = scope;
1531      cur_entry_ = 0U;
1532    }
1533
1534    void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1535      // Initialize padding entries.
1536      size_t expected_slots = handle_scope_->NumberOfReferences();
1537      while (cur_entry_ < expected_slots) {
1538        handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
1539      }
1540      DCHECK_NE(cur_entry_, 0U);
1541    }
1542
1543   private:
1544    HandleScope* handle_scope_;
1545    size_t cur_entry_;
1546  };
1547
1548  HandleScope* handle_scope_;
1549  FillJniCall jni_call_;
1550  void* bottom_of_used_area_;
1551
1552  BuildNativeCallFrameStateMachine<FillJniCall> sm_;
1553
1554  DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
1555};
1556
1557uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
1558  uintptr_t tmp;
1559  MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_);
1560  h.Assign(ref);
1561  tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
1562  cur_entry_++;
1563  return tmp;
1564}
1565
1566void BuildGenericJniFrameVisitor::Visit() {
1567  Primitive::Type type = GetParamPrimitiveType();
1568  switch (type) {
1569    case Primitive::kPrimLong: {
1570      jlong long_arg;
1571      if (IsSplitLongOrDouble()) {
1572        long_arg = ReadSplitLongParam();
1573      } else {
1574        long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
1575      }
1576      sm_.AdvanceLong(long_arg);
1577      break;
1578    }
1579    case Primitive::kPrimDouble: {
1580      uint64_t double_arg;
1581      if (IsSplitLongOrDouble()) {
1582        // Read into union so that we don't case to a double.
1583        double_arg = ReadSplitLongParam();
1584      } else {
1585        double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
1586      }
1587      sm_.AdvanceDouble(double_arg);
1588      break;
1589    }
1590    case Primitive::kPrimNot: {
1591      StackReference<mirror::Object>* stack_ref =
1592          reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
1593      sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
1594      break;
1595    }
1596    case Primitive::kPrimFloat:
1597      sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
1598      break;
1599    case Primitive::kPrimBoolean:  // Fall-through.
1600    case Primitive::kPrimByte:     // Fall-through.
1601    case Primitive::kPrimChar:     // Fall-through.
1602    case Primitive::kPrimShort:    // Fall-through.
1603    case Primitive::kPrimInt:      // Fall-through.
1604      sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
1605      break;
1606    case Primitive::kPrimVoid:
1607      LOG(FATAL) << "UNREACHABLE";
1608      UNREACHABLE();
1609  }
1610}
1611
1612void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
1613  // Clear out rest of the scope.
1614  jni_call_.ResetRemainingScopeSlots();
1615  // Install HandleScope.
1616  self->PushHandleScope(handle_scope_);
1617}
1618
1619#if defined(__arm__) || defined(__aarch64__)
1620extern "C" void* artFindNativeMethod();
1621#else
1622extern "C" void* artFindNativeMethod(Thread* self);
1623#endif
1624
1625uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
1626  if (lock != nullptr) {
1627    return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
1628  } else {
1629    return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
1630  }
1631}
1632
1633void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
1634  if (lock != nullptr) {
1635    JniMethodEndSynchronized(cookie, lock, self);
1636  } else {
1637    JniMethodEnd(cookie, self);
1638  }
1639}
1640
1641/*
1642 * Initializes an alloca region assumed to be directly below sp for a native call:
1643 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
1644 * The final element on the stack is a pointer to the native code.
1645 *
1646 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
1647 * We need to fix this, as the handle scope needs to go into the callee-save frame.
1648 *
1649 * The return of this function denotes:
1650 * 1) How many bytes of the alloca can be released, if the value is non-negative.
1651 * 2) An error, if the value is negative.
1652 */
1653extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self,
1654                                                      StackReference<mirror::ArtMethod>* sp)
1655    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1656  mirror::ArtMethod* called = sp->AsMirrorPtr();
1657  DCHECK(called->IsNative()) << PrettyMethod(called, true);
1658  uint32_t shorty_len = 0;
1659  const char* shorty = called->GetShorty(&shorty_len);
1660
1661  // Run the visitor and update sp.
1662  BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp);
1663  visitor.VisitArguments();
1664  visitor.FinalizeHandleScope(self);
1665
1666  // Fix up managed-stack things in Thread.
1667  self->SetTopOfStack(sp);
1668
1669  self->VerifyStack();
1670
1671  // Start JNI, save the cookie.
1672  uint32_t cookie;
1673  if (called->IsSynchronized()) {
1674    cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
1675    if (self->IsExceptionPending()) {
1676      self->PopHandleScope();
1677      // A negative value denotes an error.
1678      return GetTwoWordFailureValue();
1679    }
1680  } else {
1681    cookie = JniMethodStart(self);
1682  }
1683  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1684  *(sp32 - 1) = cookie;
1685
1686  // Retrieve the stored native code.
1687  void* nativeCode = called->GetEntryPointFromJni();
1688
1689  // There are two cases for the content of nativeCode:
1690  // 1) Pointer to the native function.
1691  // 2) Pointer to the trampoline for native code binding.
1692  // In the second case, we need to execute the binding and continue with the actual native function
1693  // pointer.
1694  DCHECK(nativeCode != nullptr);
1695  if (nativeCode == GetJniDlsymLookupStub()) {
1696#if defined(__arm__) || defined(__aarch64__)
1697    nativeCode = artFindNativeMethod();
1698#else
1699    nativeCode = artFindNativeMethod(self);
1700#endif
1701
1702    if (nativeCode == nullptr) {
1703      DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
1704
1705      // End JNI, as the assembly will move to deliver the exception.
1706      jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
1707      if (shorty[0] == 'L') {
1708        artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
1709      } else {
1710        artQuickGenericJniEndJNINonRef(self, cookie, lock);
1711      }
1712
1713      return GetTwoWordFailureValue();
1714    }
1715    // Note that the native code pointer will be automatically set by artFindNativeMethod().
1716  }
1717
1718  // Return native code addr(lo) and bottom of alloca address(hi).
1719  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()),
1720                                reinterpret_cast<uintptr_t>(nativeCode));
1721}
1722
1723/*
1724 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
1725 * unlocking.
1726 */
1727extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f)
1728    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1729  StackReference<mirror::ArtMethod>* sp = self->GetManagedStack()->GetTopQuickFrame();
1730  uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
1731  mirror::ArtMethod* called = sp->AsMirrorPtr();
1732  uint32_t cookie = *(sp32 - 1);
1733
1734  jobject lock = nullptr;
1735  if (called->IsSynchronized()) {
1736    HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp)
1737        + sizeof(StackReference<mirror::ArtMethod>));
1738    lock = table->GetHandle(0).ToJObject();
1739  }
1740
1741  char return_shorty_char = called->GetShorty()[0];
1742
1743  if (return_shorty_char == 'L') {
1744    return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock);
1745  } else {
1746    artQuickGenericJniEndJNINonRef(self, cookie, lock);
1747
1748    switch (return_shorty_char) {
1749      case 'F': {
1750        if (kRuntimeISA == kX86) {
1751          // Convert back the result to float.
1752          double d = bit_cast<uint64_t, double>(result_f);
1753          return bit_cast<float, uint32_t>(static_cast<float>(d));
1754        } else {
1755          return result_f;
1756        }
1757      }
1758      case 'D':
1759        return result_f;
1760      case 'Z':
1761        return result.z;
1762      case 'B':
1763        return result.b;
1764      case 'C':
1765        return result.c;
1766      case 'S':
1767        return result.s;
1768      case 'I':
1769        return result.i;
1770      case 'J':
1771        return result.j;
1772      case 'V':
1773        return 0;
1774      default:
1775        LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
1776        return 0;
1777    }
1778  }
1779}
1780
1781// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
1782// for the method pointer.
1783//
1784// It is valid to use this, as at the usage points here (returns from C functions) we are assuming
1785// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations).
1786
1787template<InvokeType type, bool access_check>
1788static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1789                                     mirror::ArtMethod* caller_method,
1790                                     Thread* self, StackReference<mirror::ArtMethod>* sp);
1791
1792template<InvokeType type, bool access_check>
1793static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
1794                                     mirror::ArtMethod* caller_method,
1795                                     Thread* self, StackReference<mirror::ArtMethod>* sp) {
1796  ScopedQuickEntrypointChecks sqec(self);
1797  DCHECK_EQ(sp->AsMirrorPtr(), Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
1798  mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
1799                                             type);
1800  if (UNLIKELY(method == nullptr)) {
1801    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1802    uint32_t shorty_len;
1803    const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
1804    {
1805      // Remember the args in case a GC happens in FindMethodFromCode.
1806      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1807      RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
1808      visitor.VisitArguments();
1809      method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method,
1810                                                      self);
1811      visitor.FixupReferences();
1812    }
1813
1814    if (UNLIKELY(method == nullptr)) {
1815      CHECK(self->IsExceptionPending());
1816      return GetTwoWordFailureValue();  // Failure.
1817    }
1818  }
1819  DCHECK(!self->IsExceptionPending());
1820  const void* code = method->GetEntryPointFromQuickCompiledCode();
1821
1822  // When we return, the caller will branch to this address, so it had better not be 0!
1823  DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method)
1824                          << " location: "
1825                          << method->GetDexFile()->GetLocation();
1826
1827  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
1828                                reinterpret_cast<uintptr_t>(method));
1829}
1830
1831// Explicit artInvokeCommon template function declarations to please analysis tool.
1832#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
1833  template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
1834  TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx,                        \
1835                                                    mirror::Object* this_object,                \
1836                                                    mirror::ArtMethod* caller_method,           \
1837                                                    Thread* self,                               \
1838                                                    StackReference<mirror::ArtMethod>* sp)      \
1839
1840EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
1841EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
1842EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
1843EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
1844EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
1845EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
1846EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
1847EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
1848EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
1849EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
1850#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
1851
1852// See comments in runtime_support_asm.S
1853extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
1854    uint32_t method_idx, mirror::Object* this_object,
1855    mirror::ArtMethod* caller_method, Thread* self,
1856    StackReference<mirror::ArtMethod>* sp)
1857        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1858  return artInvokeCommon<kInterface, true>(method_idx, this_object,
1859                                           caller_method, self, sp);
1860}
1861
1862extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
1863    uint32_t method_idx, mirror::Object* this_object,
1864    mirror::ArtMethod* caller_method, Thread* self,
1865    StackReference<mirror::ArtMethod>* sp)
1866        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1867  return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method,
1868                                        self, sp);
1869}
1870
1871extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
1872    uint32_t method_idx, mirror::Object* this_object,
1873    mirror::ArtMethod* caller_method, Thread* self,
1874    StackReference<mirror::ArtMethod>* sp)
1875        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1876  return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method,
1877                                        self, sp);
1878}
1879
1880extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
1881    uint32_t method_idx, mirror::Object* this_object,
1882    mirror::ArtMethod* caller_method, Thread* self,
1883    StackReference<mirror::ArtMethod>* sp)
1884        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1885  return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method,
1886                                       self, sp);
1887}
1888
1889extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
1890    uint32_t method_idx, mirror::Object* this_object,
1891    mirror::ArtMethod* caller_method, Thread* self,
1892    StackReference<mirror::ArtMethod>* sp)
1893        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1894  return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method,
1895                                         self, sp);
1896}
1897
1898// Determine target of interface dispatch. This object is known non-null.
1899extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
1900                                                      mirror::Object* this_object,
1901                                                      mirror::ArtMethod* caller_method,
1902                                                      Thread* self,
1903                                                      StackReference<mirror::ArtMethod>* sp)
1904    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1905  ScopedQuickEntrypointChecks sqec(self);
1906  mirror::ArtMethod* method;
1907  if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
1908    method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
1909    if (UNLIKELY(method == nullptr)) {
1910      ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
1911                                                                 caller_method);
1912      return GetTwoWordFailureValue();  // Failure.
1913    }
1914  } else {
1915    DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
1916
1917    // Find the caller PC.
1918    constexpr size_t pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsAndArgs);
1919    uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + pc_offset);
1920
1921    // Map the caller PC to a dex PC.
1922    uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
1923    const DexFile::CodeItem* code = caller_method->GetCodeItem();
1924    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
1925    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
1926    Instruction::Code instr_code = instr->Opcode();
1927    CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
1928          instr_code == Instruction::INVOKE_INTERFACE_RANGE)
1929        << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr);
1930    uint32_t dex_method_idx;
1931    if (instr_code == Instruction::INVOKE_INTERFACE) {
1932      dex_method_idx = instr->VRegB_35c();
1933    } else {
1934      DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
1935      dex_method_idx = instr->VRegB_3rc();
1936    }
1937
1938    const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
1939        ->GetDexFile();
1940    uint32_t shorty_len;
1941    const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx),
1942                                                   &shorty_len);
1943    {
1944      // Remember the args in case a GC happens in FindMethodFromCode.
1945      ScopedObjectAccessUnchecked soa(self->GetJniEnv());
1946      RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
1947      visitor.VisitArguments();
1948      method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method,
1949                                                     self);
1950      visitor.FixupReferences();
1951    }
1952
1953    if (UNLIKELY(method == nullptr)) {
1954      CHECK(self->IsExceptionPending());
1955      return GetTwoWordFailureValue();  // Failure.
1956    }
1957  }
1958  const void* code = method->GetEntryPointFromQuickCompiledCode();
1959
1960  // When we return, the caller will branch to this address, so it had better not be 0!
1961  DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method)
1962                          << " location: " << method->GetDexFile()->GetLocation();
1963
1964  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
1965                                reinterpret_cast<uintptr_t>(method));
1966}
1967
1968}  // namespace art
1969