quick_trampoline_entrypoints.cc revision 167cc7c33f7100e3f7acc1594c066daa0122e27a
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "callee_save_frame.h" 18#include "common_throws.h" 19#include "dex_file-inl.h" 20#include "dex_instruction-inl.h" 21#include "entrypoints/entrypoint_utils-inl.h" 22#include "gc/accounting/card_table-inl.h" 23#include "instruction_set.h" 24#include "interpreter/interpreter.h" 25#include "mirror/art_method-inl.h" 26#include "mirror/class-inl.h" 27#include "mirror/dex_cache-inl.h" 28#include "mirror/object-inl.h" 29#include "mirror/object_array-inl.h" 30#include "runtime.h" 31#include "scoped_thread_state_change.h" 32 33namespace art { 34 35// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 36class QuickArgumentVisitor { 37 // Number of bytes for each out register in the caller method's frame. 38 static constexpr size_t kBytesStackArgLocation = 4; 39 // Frame size in bytes of a callee-save frame for RefsAndArgs. 40 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 41 GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); 42#if defined(__arm__) 43 // The callee save frame is pointed to by SP. 44 // | argN | | 45 // | ... | | 46 // | arg4 | | 47 // | arg3 spill | | Caller's frame 48 // | arg2 spill | | 49 // | arg1 spill | | 50 // | Method* | --- 51 // | LR | 52 // | ... | callee saves 53 // | R3 | arg3 54 // | R2 | arg2 55 // | R1 | arg1 56 // | R0 | padding 57 // | Method* | <- sp 58 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 59 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 60 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 61 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 62 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8; // Offset of first GPR arg. 63 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44; // Offset of return address. 64 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 65 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 66 } 67#elif defined(__aarch64__) 68 // The callee save frame is pointed to by SP. 69 // | argN | | 70 // | ... | | 71 // | arg4 | | 72 // | arg3 spill | | Caller's frame 73 // | arg2 spill | | 74 // | arg1 spill | | 75 // | Method* | --- 76 // | LR | 77 // | X28 | 78 // | : | 79 // | X19 | 80 // | X7 | 81 // | : | 82 // | X1 | 83 // | D15 | 84 // | : | 85 // | D0 | 86 // | | padding 87 // | Method* | <- sp 88 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 89 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 90 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 91 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 92 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144; // Offset of first GPR arg. 93 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296; // Offset of return address. 94 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 95 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 96 } 97#elif defined(__mips__) 98 // The callee save frame is pointed to by SP. 99 // | argN | | 100 // | ... | | 101 // | arg4 | | 102 // | arg3 spill | | Caller's frame 103 // | arg2 spill | | 104 // | arg1 spill | | 105 // | Method* | --- 106 // | RA | 107 // | ... | callee saves 108 // | A3 | arg3 109 // | A2 | arg2 110 // | A1 | arg1 111 // | A0/Method* | <- sp 112 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 113 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 114 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 115 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 116 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 117 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60; // Offset of return address. 118 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 119 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 120 } 121#elif defined(__i386__) 122 // The callee save frame is pointed to by SP. 123 // | argN | | 124 // | ... | | 125 // | arg4 | | 126 // | arg3 spill | | Caller's frame 127 // | arg2 spill | | 128 // | arg1 spill | | 129 // | Method* | --- 130 // | Return | 131 // | EBP,ESI,EDI | callee saves 132 // | EBX | arg3 133 // | EDX | arg2 134 // | ECX | arg1 135 // | EAX/Method* | <- sp 136 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 137 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 138 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 139 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 140 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 141 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28; // Offset of return address. 142 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 143 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 144 } 145#elif defined(__x86_64__) 146 // The callee save frame is pointed to by SP. 147 // | argN | | 148 // | ... | | 149 // | reg. arg spills | | Caller's frame 150 // | Method* | --- 151 // | Return | 152 // | R15 | callee save 153 // | R14 | callee save 154 // | R13 | callee save 155 // | R12 | callee save 156 // | R9 | arg5 157 // | R8 | arg4 158 // | RSI/R6 | arg1 159 // | RBP/R5 | callee save 160 // | RBX/R3 | callee save 161 // | RDX/R2 | arg2 162 // | RCX/R1 | arg3 163 // | XMM7 | float arg 8 164 // | XMM6 | float arg 7 165 // | XMM5 | float arg 6 166 // | XMM4 | float arg 5 167 // | XMM3 | float arg 4 168 // | XMM2 | float arg 3 169 // | XMM1 | float arg 2 170 // | XMM0 | float arg 1 171 // | Padding | 172 // | RDI/Method* | <- sp 173 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 174 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 175 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 176 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 177 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 178 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 179 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 180 switch (gpr_index) { 181 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 182 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 183 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 184 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 185 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 186 default: 187 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 188 return 0; 189 } 190 } 191#else 192#error "Unsupported architecture" 193#endif 194 195 public: 196 static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp) 197 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 198 DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod()); 199 byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 200 return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr(); 201 } 202 203 // For the given quick ref and args quick frame, return the caller's PC. 204 static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp) 205 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 206 DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod()); 207 byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 208 return *reinterpret_cast<uintptr_t*>(lr); 209 } 210 211 QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty, 212 uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : 213 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 214 gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 215 fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 216 stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 217 + StackArgumentStartFromShorty(is_static, shorty, shorty_len)), 218 gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid), 219 is_split_long_or_double_(false) {} 220 221 virtual ~QuickArgumentVisitor() {} 222 223 virtual void Visit() = 0; 224 225 Primitive::Type GetParamPrimitiveType() const { 226 return cur_type_; 227 } 228 229 byte* GetParamAddress() const { 230 if (!kQuickSoftFloatAbi) { 231 Primitive::Type type = GetParamPrimitiveType(); 232 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 233 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 234 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 235 } 236 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 237 } 238 } 239 if (gpr_index_ < kNumQuickGprArgs) { 240 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 241 } 242 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 243 } 244 245 bool IsSplitLongOrDouble() const { 246 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 247 return is_split_long_or_double_; 248 } else { 249 return false; // An optimization for when GPR and FPRs are 64bit. 250 } 251 } 252 253 bool IsParamAReference() const { 254 return GetParamPrimitiveType() == Primitive::kPrimNot; 255 } 256 257 bool IsParamALongOrDouble() const { 258 Primitive::Type type = GetParamPrimitiveType(); 259 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 260 } 261 262 uint64_t ReadSplitLongParam() const { 263 DCHECK(IsSplitLongOrDouble()); 264 uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress()); 265 uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_); 266 return (low_half & 0xffffffffULL) | (high_half << 32); 267 } 268 269 void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 270 // This implementation doesn't support reg-spill area for hard float 271 // ABI targets such as x86_64 and aarch64. So, for those targets whose 272 // 'kQuickSoftFloatAbi' is 'false': 273 // (a) 'stack_args_' should point to the first method's argument 274 // (b) whatever the argument type it is, the 'stack_index_' should 275 // be moved forward along with every visiting. 276 gpr_index_ = 0; 277 fpr_index_ = 0; 278 stack_index_ = 0; 279 if (!is_static_) { // Handle this. 280 cur_type_ = Primitive::kPrimNot; 281 is_split_long_or_double_ = false; 282 Visit(); 283 if (!kQuickSoftFloatAbi || kNumQuickGprArgs == 0) { 284 stack_index_++; 285 } 286 if (kNumQuickGprArgs > 0) { 287 gpr_index_++; 288 } 289 } 290 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 291 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 292 switch (cur_type_) { 293 case Primitive::kPrimNot: 294 case Primitive::kPrimBoolean: 295 case Primitive::kPrimByte: 296 case Primitive::kPrimChar: 297 case Primitive::kPrimShort: 298 case Primitive::kPrimInt: 299 is_split_long_or_double_ = false; 300 Visit(); 301 if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) { 302 stack_index_++; 303 } 304 if (gpr_index_ < kNumQuickGprArgs) { 305 gpr_index_++; 306 } 307 break; 308 case Primitive::kPrimFloat: 309 is_split_long_or_double_ = false; 310 Visit(); 311 if (kQuickSoftFloatAbi) { 312 if (gpr_index_ < kNumQuickGprArgs) { 313 gpr_index_++; 314 } else { 315 stack_index_++; 316 } 317 } else { 318 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 319 fpr_index_++; 320 } 321 stack_index_++; 322 } 323 break; 324 case Primitive::kPrimDouble: 325 case Primitive::kPrimLong: 326 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 327 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 328 ((gpr_index_ + 1) == kNumQuickGprArgs); 329 Visit(); 330 if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) { 331 if (kBytesStackArgLocation == 4) { 332 stack_index_+= 2; 333 } else { 334 CHECK_EQ(kBytesStackArgLocation, 8U); 335 stack_index_++; 336 } 337 } 338 if (gpr_index_ < kNumQuickGprArgs) { 339 gpr_index_++; 340 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 341 if (gpr_index_ < kNumQuickGprArgs) { 342 gpr_index_++; 343 } else if (kQuickSoftFloatAbi) { 344 stack_index_++; 345 } 346 } 347 } 348 } else { 349 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 350 ((fpr_index_ + 1) == kNumQuickFprArgs); 351 Visit(); 352 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 353 fpr_index_++; 354 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 355 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 356 fpr_index_++; 357 } 358 } 359 } 360 if (kBytesStackArgLocation == 4) { 361 stack_index_+= 2; 362 } else { 363 CHECK_EQ(kBytesStackArgLocation, 8U); 364 stack_index_++; 365 } 366 } 367 break; 368 default: 369 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 370 } 371 } 372 } 373 374 private: 375 static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty, 376 uint32_t shorty_len) { 377 if (kQuickSoftFloatAbi) { 378 CHECK_EQ(kNumQuickFprArgs, 0U); 379 return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA)) 380 + sizeof(StackReference<mirror::ArtMethod>) /* StackReference<ArtMethod> */; 381 } else { 382 // For now, there is no reg-spill area for the targets with 383 // hard float ABI. So, the offset pointing to the first method's 384 // parameter ('this' for non-static methods) should be returned. 385 return sizeof(StackReference<mirror::ArtMethod>); // Skip StackReference<ArtMethod>. 386 } 387 } 388 389 protected: 390 const bool is_static_; 391 const char* const shorty_; 392 const uint32_t shorty_len_; 393 394 private: 395 byte* const gpr_args_; // Address of GPR arguments in callee save frame. 396 byte* const fpr_args_; // Address of FPR arguments in callee save frame. 397 byte* const stack_args_; // Address of stack arguments in caller's frame. 398 uint32_t gpr_index_; // Index into spilled GPRs. 399 uint32_t fpr_index_; // Index into spilled FPRs. 400 uint32_t stack_index_; // Index into arguments on the stack. 401 // The current type of argument during VisitArguments. 402 Primitive::Type cur_type_; 403 // Does a 64bit parameter straddle the register and stack arguments? 404 bool is_split_long_or_double_; 405}; 406 407// Visits arguments on the stack placing them into the shadow frame. 408class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 409 public: 410 BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, 411 const char* shorty, uint32_t shorty_len, ShadowFrame* sf, 412 size_t first_arg_reg) : 413 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 414 415 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 416 417 private: 418 ShadowFrame* const sf_; 419 uint32_t cur_reg_; 420 421 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 422}; 423 424void BuildQuickShadowFrameVisitor::Visit() { 425 Primitive::Type type = GetParamPrimitiveType(); 426 switch (type) { 427 case Primitive::kPrimLong: // Fall-through. 428 case Primitive::kPrimDouble: 429 if (IsSplitLongOrDouble()) { 430 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 431 } else { 432 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 433 } 434 ++cur_reg_; 435 break; 436 case Primitive::kPrimNot: { 437 StackReference<mirror::Object>* stack_ref = 438 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 439 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 440 } 441 break; 442 case Primitive::kPrimBoolean: // Fall-through. 443 case Primitive::kPrimByte: // Fall-through. 444 case Primitive::kPrimChar: // Fall-through. 445 case Primitive::kPrimShort: // Fall-through. 446 case Primitive::kPrimInt: // Fall-through. 447 case Primitive::kPrimFloat: 448 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 449 break; 450 case Primitive::kPrimVoid: 451 LOG(FATAL) << "UNREACHABLE"; 452 break; 453 } 454 ++cur_reg_; 455} 456 457extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 458 StackReference<mirror::ArtMethod>* sp) 459 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 460 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 461 // frame. 462 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 463 464 if (method->IsAbstract()) { 465 ThrowAbstractMethodError(method); 466 return 0; 467 } else { 468 DCHECK(!method->IsNative()) << PrettyMethod(method); 469 const char* old_cause = self->StartAssertNoThreadSuspension( 470 "Building interpreter shadow frame"); 471 const DexFile::CodeItem* code_item = method->GetCodeItem(); 472 DCHECK(code_item != nullptr) << PrettyMethod(method); 473 uint16_t num_regs = code_item->registers_size_; 474 void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); 475 // No last shadow coming from quick. 476 ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory)); 477 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 478 uint32_t shorty_len = 0; 479 const char* shorty = method->GetShorty(&shorty_len); 480 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 481 shadow_frame, first_arg_reg); 482 shadow_frame_builder.VisitArguments(); 483 // Push a transition back into managed code onto the linked list in thread. 484 ManagedStack fragment; 485 self->PushManagedStackFragment(&fragment); 486 self->PushShadowFrame(shadow_frame); 487 self->EndAssertNoThreadSuspension(old_cause); 488 489 if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) { 490 // Ensure static method's class is initialized. 491 StackHandleScope<1> hs(self); 492 Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass())); 493 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) { 494 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method); 495 self->PopManagedStackFragment(fragment); 496 return 0; 497 } 498 } 499 500 StackHandleScope<1> hs(self); 501 MethodHelper mh(hs.NewHandle(method)); 502 JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame); 503 // Pop transition. 504 self->PopManagedStackFragment(fragment); 505 // No need to restore the args since the method has already been run by the interpreter. 506 return result.GetJ(); 507 } 508} 509 510// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 511// to jobjects. 512class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 513 public: 514 BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, 515 const char* shorty, uint32_t shorty_len, 516 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 517 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 518 519 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 520 521 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 522 523 private: 524 ScopedObjectAccessUnchecked* const soa_; 525 std::vector<jvalue>* const args_; 526 // References which we must update when exiting in case the GC moved the objects. 527 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 528 529 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 530}; 531 532void BuildQuickArgumentVisitor::Visit() { 533 jvalue val; 534 Primitive::Type type = GetParamPrimitiveType(); 535 switch (type) { 536 case Primitive::kPrimNot: { 537 StackReference<mirror::Object>* stack_ref = 538 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 539 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 540 references_.push_back(std::make_pair(val.l, stack_ref)); 541 break; 542 } 543 case Primitive::kPrimLong: // Fall-through. 544 case Primitive::kPrimDouble: 545 if (IsSplitLongOrDouble()) { 546 val.j = ReadSplitLongParam(); 547 } else { 548 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 549 } 550 break; 551 case Primitive::kPrimBoolean: // Fall-through. 552 case Primitive::kPrimByte: // Fall-through. 553 case Primitive::kPrimChar: // Fall-through. 554 case Primitive::kPrimShort: // Fall-through. 555 case Primitive::kPrimInt: // Fall-through. 556 case Primitive::kPrimFloat: 557 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 558 break; 559 case Primitive::kPrimVoid: 560 LOG(FATAL) << "UNREACHABLE"; 561 val.j = 0; 562 break; 563 } 564 args_->push_back(val); 565} 566 567void BuildQuickArgumentVisitor::FixupReferences() { 568 // Fixup any references which may have changed. 569 for (const auto& pair : references_) { 570 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 571 soa_->Env()->DeleteLocalRef(pair.first); 572 } 573} 574 575// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 576// which is responsible for recording callee save registers. We explicitly place into jobjects the 577// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 578// field within the proxy object, which will box the primitive arguments and deal with error cases. 579extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, 580 mirror::Object* receiver, 581 Thread* self, StackReference<mirror::ArtMethod>* sp) 582 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 583 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 584 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 585 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 586 const char* old_cause = 587 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 588 // Register the top of the managed stack, making stack crawlable. 589 DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) 590 << PrettyMethod(proxy_method); 591 self->SetTopOfStack(sp, 0); 592 DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), 593 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()) 594 << PrettyMethod(proxy_method); 595 self->VerifyStack(); 596 // Start new JNI local reference state. 597 JNIEnvExt* env = self->GetJniEnv(); 598 ScopedObjectAccessUnchecked soa(env); 599 ScopedJniEnvLocalRefState env_state(env); 600 // Create local ref. copies of proxy method and the receiver. 601 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 602 603 // Placing arguments into args vector and remove the receiver. 604 mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(); 605 CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " 606 << PrettyMethod(non_proxy_method); 607 std::vector<jvalue> args; 608 uint32_t shorty_len = 0; 609 const char* shorty = proxy_method->GetShorty(&shorty_len); 610 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 611 612 local_ref_visitor.VisitArguments(); 613 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 614 args.erase(args.begin()); 615 616 // Convert proxy method into expected interface method. 617 mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod(); 618 DCHECK(interface_method != NULL) << PrettyMethod(proxy_method); 619 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 620 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method); 621 622 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 623 // that performs allocations. 624 self->EndAssertNoThreadSuspension(old_cause); 625 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 626 // Restore references which might have moved. 627 local_ref_visitor.FixupReferences(); 628 return result.GetJ(); 629} 630 631// Read object references held in arguments from quick frames and place in a JNI local references, 632// so they don't get garbage collected. 633class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 634 public: 635 RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, 636 const char* shorty, uint32_t shorty_len, 637 ScopedObjectAccessUnchecked* soa) : 638 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 639 640 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 641 642 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 643 644 private: 645 ScopedObjectAccessUnchecked* const soa_; 646 // References which we must update when exiting in case the GC moved the objects. 647 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 648 649 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 650}; 651 652void RememberForGcArgumentVisitor::Visit() { 653 if (IsParamAReference()) { 654 StackReference<mirror::Object>* stack_ref = 655 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 656 jobject reference = 657 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 658 references_.push_back(std::make_pair(reference, stack_ref)); 659 } 660} 661 662void RememberForGcArgumentVisitor::FixupReferences() { 663 // Fixup any references which may have changed. 664 for (const auto& pair : references_) { 665 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 666 soa_->Env()->DeleteLocalRef(pair.first); 667 } 668} 669 670// Lazily resolve a method for quick. Called by stub code. 671extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, 672 mirror::Object* receiver, 673 Thread* self, 674 StackReference<mirror::ArtMethod>* sp) 675 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 676 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 677 // Start new JNI local reference state 678 JNIEnvExt* env = self->GetJniEnv(); 679 ScopedObjectAccessUnchecked soa(env); 680 ScopedJniEnvLocalRefState env_state(env); 681 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 682 683 // Compute details about the called method (avoid GCs) 684 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 685 mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 686 InvokeType invoke_type; 687 const DexFile* dex_file; 688 uint32_t dex_method_idx; 689 if (called->IsRuntimeMethod()) { 690 uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp)); 691 const DexFile::CodeItem* code; 692 dex_file = caller->GetDexFile(); 693 code = caller->GetCodeItem(); 694 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 695 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 696 Instruction::Code instr_code = instr->Opcode(); 697 bool is_range; 698 switch (instr_code) { 699 case Instruction::INVOKE_DIRECT: 700 invoke_type = kDirect; 701 is_range = false; 702 break; 703 case Instruction::INVOKE_DIRECT_RANGE: 704 invoke_type = kDirect; 705 is_range = true; 706 break; 707 case Instruction::INVOKE_STATIC: 708 invoke_type = kStatic; 709 is_range = false; 710 break; 711 case Instruction::INVOKE_STATIC_RANGE: 712 invoke_type = kStatic; 713 is_range = true; 714 break; 715 case Instruction::INVOKE_SUPER: 716 invoke_type = kSuper; 717 is_range = false; 718 break; 719 case Instruction::INVOKE_SUPER_RANGE: 720 invoke_type = kSuper; 721 is_range = true; 722 break; 723 case Instruction::INVOKE_VIRTUAL: 724 invoke_type = kVirtual; 725 is_range = false; 726 break; 727 case Instruction::INVOKE_VIRTUAL_RANGE: 728 invoke_type = kVirtual; 729 is_range = true; 730 break; 731 case Instruction::INVOKE_INTERFACE: 732 invoke_type = kInterface; 733 is_range = false; 734 break; 735 case Instruction::INVOKE_INTERFACE_RANGE: 736 invoke_type = kInterface; 737 is_range = true; 738 break; 739 default: 740 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); 741 // Avoid used uninitialized warnings. 742 invoke_type = kDirect; 743 is_range = false; 744 } 745 dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 746 } else { 747 invoke_type = kStatic; 748 dex_file = called->GetDexFile(); 749 dex_method_idx = called->GetDexMethodIndex(); 750 } 751 uint32_t shorty_len; 752 const char* shorty = 753 dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len); 754 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 755 visitor.VisitArguments(); 756 self->EndAssertNoThreadSuspension(old_cause); 757 bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 758 // Resolve method filling in dex cache. 759 if (UNLIKELY(called->IsRuntimeMethod())) { 760 StackHandleScope<1> hs(self); 761 mirror::Object* dummy = nullptr; 762 HandleWrapper<mirror::Object> h_receiver( 763 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 764 called = linker->ResolveMethod(self, dex_method_idx, &caller, invoke_type); 765 } 766 const void* code = NULL; 767 if (LIKELY(!self->IsExceptionPending())) { 768 // Incompatible class change should have been handled in resolve method. 769 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 770 << PrettyMethod(called) << " " << invoke_type; 771 if (virtual_or_interface) { 772 // Refine called method based on receiver. 773 CHECK(receiver != nullptr) << invoke_type; 774 775 mirror::ArtMethod* orig_called = called; 776 if (invoke_type == kVirtual) { 777 called = receiver->GetClass()->FindVirtualMethodForVirtual(called); 778 } else { 779 called = receiver->GetClass()->FindVirtualMethodForInterface(called); 780 } 781 782 CHECK(called != nullptr) << PrettyMethod(orig_called) << " " 783 << PrettyTypeOf(receiver) << " " 784 << invoke_type << " " << orig_called->GetVtableIndex(); 785 786 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 787 // of the sharpened method. 788 if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) { 789 caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called); 790 } else { 791 // Calling from one dex file to another, need to compute the method index appropriate to 792 // the caller's dex file. Since we get here only if the original called was a runtime 793 // method, we've got the correct dex_file and a dex_method_idx from above. 794 DCHECK_EQ(caller->GetDexFile(), dex_file); 795 StackHandleScope<1> hs(self); 796 MethodHelper mh(hs.NewHandle(called)); 797 uint32_t method_index = mh.FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx); 798 if (method_index != DexFile::kDexNoIndex) { 799 caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called); 800 } 801 } 802 } 803 // Ensure that the called method's class is initialized. 804 StackHandleScope<1> hs(soa.Self()); 805 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 806 linker->EnsureInitialized(called_class, true, true); 807 if (LIKELY(called_class->IsInitialized())) { 808 code = called->GetEntryPointFromQuickCompiledCode(); 809 } else if (called_class->IsInitializing()) { 810 if (invoke_type == kStatic) { 811 // Class is still initializing, go to oat and grab code (trampoline must be left in place 812 // until class is initialized to stop races between threads). 813 code = linker->GetQuickOatCodeFor(called); 814 } else { 815 // No trampoline for non-static methods. 816 code = called->GetEntryPointFromQuickCompiledCode(); 817 } 818 } else { 819 DCHECK(called_class->IsErroneous()); 820 } 821 } 822 CHECK_EQ(code == NULL, self->IsExceptionPending()); 823 // Fixup any locally saved objects may have moved during a GC. 824 visitor.FixupReferences(); 825 // Place called method in callee-save frame to be placed as first argument to quick method. 826 sp->Assign(called); 827 return code; 828} 829 830/* 831 * This class uses a couple of observations to unite the different calling conventions through 832 * a few constants. 833 * 834 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 835 * possible alignment. 836 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 837 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 838 * when we have to split things 839 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 840 * and we can use Int handling directly. 841 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 842 * necessary when widening. Also, widening of Ints will take place implicitly, and the 843 * extension should be compatible with Aarch64, which mandates copying the available bits 844 * into LSB and leaving the rest unspecified. 845 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 846 * the stack. 847 * 6) There is only little endian. 848 * 849 * 850 * Actual work is supposed to be done in a delegate of the template type. The interface is as 851 * follows: 852 * 853 * void PushGpr(uintptr_t): Add a value for the next GPR 854 * 855 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 856 * padding, that is, think the architecture is 32b and aligns 64b. 857 * 858 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 859 * split this if necessary. The current state will have aligned, if 860 * necessary. 861 * 862 * void PushStack(uintptr_t): Push a value to the stack. 863 * 864 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 865 * as this might be important for null initialization. 866 * Must return the jobject, that is, the reference to the 867 * entry in the HandleScope (nullptr if necessary). 868 * 869 */ 870template<class T> class BuildNativeCallFrameStateMachine { 871 public: 872#if defined(__arm__) 873 // TODO: These are all dummy values! 874 static constexpr bool kNativeSoftFloatAbi = true; 875 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 876 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 877 878 static constexpr size_t kRegistersNeededForLong = 2; 879 static constexpr size_t kRegistersNeededForDouble = 2; 880 static constexpr bool kMultiRegistersAligned = true; 881 static constexpr bool kMultiRegistersWidened = false; 882 static constexpr bool kAlignLongOnStack = true; 883 static constexpr bool kAlignDoubleOnStack = true; 884#elif defined(__aarch64__) 885 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 886 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 887 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 888 889 static constexpr size_t kRegistersNeededForLong = 1; 890 static constexpr size_t kRegistersNeededForDouble = 1; 891 static constexpr bool kMultiRegistersAligned = false; 892 static constexpr bool kMultiRegistersWidened = false; 893 static constexpr bool kAlignLongOnStack = false; 894 static constexpr bool kAlignDoubleOnStack = false; 895#elif defined(__mips__) 896 // TODO: These are all dummy values! 897 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 898 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 899 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 900 901 static constexpr size_t kRegistersNeededForLong = 2; 902 static constexpr size_t kRegistersNeededForDouble = 2; 903 static constexpr bool kMultiRegistersAligned = true; 904 static constexpr bool kMultiRegistersWidened = true; 905 static constexpr bool kAlignLongOnStack = false; 906 static constexpr bool kAlignDoubleOnStack = false; 907#elif defined(__i386__) 908 // TODO: Check these! 909 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 910 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 911 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 912 913 static constexpr size_t kRegistersNeededForLong = 2; 914 static constexpr size_t kRegistersNeededForDouble = 2; 915 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 916 static constexpr bool kMultiRegistersWidened = false; 917 static constexpr bool kAlignLongOnStack = false; 918 static constexpr bool kAlignDoubleOnStack = false; 919#elif defined(__x86_64__) 920 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 921 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 922 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 923 924 static constexpr size_t kRegistersNeededForLong = 1; 925 static constexpr size_t kRegistersNeededForDouble = 1; 926 static constexpr bool kMultiRegistersAligned = false; 927 static constexpr bool kMultiRegistersWidened = false; 928 static constexpr bool kAlignLongOnStack = false; 929 static constexpr bool kAlignDoubleOnStack = false; 930#else 931#error "Unsupported architecture" 932#endif 933 934 public: 935 explicit BuildNativeCallFrameStateMachine(T* delegate) 936 : gpr_index_(kNumNativeGprArgs), 937 fpr_index_(kNumNativeFprArgs), 938 stack_entries_(0), 939 delegate_(delegate) { 940 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 941 // the next register is even; counting down is just to make the compiler happy... 942 CHECK_EQ(kNumNativeGprArgs % 2, 0U); 943 CHECK_EQ(kNumNativeFprArgs % 2, 0U); 944 } 945 946 virtual ~BuildNativeCallFrameStateMachine() {} 947 948 bool HavePointerGpr() { 949 return gpr_index_ > 0; 950 } 951 952 void AdvancePointer(const void* val) { 953 if (HavePointerGpr()) { 954 gpr_index_--; 955 PushGpr(reinterpret_cast<uintptr_t>(val)); 956 } else { 957 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 958 PushStack(reinterpret_cast<uintptr_t>(val)); 959 gpr_index_ = 0; 960 } 961 } 962 963 bool HaveHandleScopeGpr() { 964 return gpr_index_ > 0; 965 } 966 967 void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 968 uintptr_t handle = PushHandle(ptr); 969 if (HaveHandleScopeGpr()) { 970 gpr_index_--; 971 PushGpr(handle); 972 } else { 973 stack_entries_++; 974 PushStack(handle); 975 gpr_index_ = 0; 976 } 977 } 978 979 bool HaveIntGpr() { 980 return gpr_index_ > 0; 981 } 982 983 void AdvanceInt(uint32_t val) { 984 if (HaveIntGpr()) { 985 gpr_index_--; 986 PushGpr(val); 987 } else { 988 stack_entries_++; 989 PushStack(val); 990 gpr_index_ = 0; 991 } 992 } 993 994 bool HaveLongGpr() { 995 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 996 } 997 998 bool LongGprNeedsPadding() { 999 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1000 kAlignLongOnStack && // and when it needs alignment 1001 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1002 } 1003 1004 bool LongStackNeedsPadding() { 1005 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1006 kAlignLongOnStack && // and when it needs 8B alignment 1007 (stack_entries_ & 1) == 1; // counter is odd 1008 } 1009 1010 void AdvanceLong(uint64_t val) { 1011 if (HaveLongGpr()) { 1012 if (LongGprNeedsPadding()) { 1013 PushGpr(0); 1014 gpr_index_--; 1015 } 1016 if (kRegistersNeededForLong == 1) { 1017 PushGpr(static_cast<uintptr_t>(val)); 1018 } else { 1019 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1020 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1021 } 1022 gpr_index_ -= kRegistersNeededForLong; 1023 } else { 1024 if (LongStackNeedsPadding()) { 1025 PushStack(0); 1026 stack_entries_++; 1027 } 1028 if (kRegistersNeededForLong == 1) { 1029 PushStack(static_cast<uintptr_t>(val)); 1030 stack_entries_++; 1031 } else { 1032 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1033 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1034 stack_entries_ += 2; 1035 } 1036 gpr_index_ = 0; 1037 } 1038 } 1039 1040 bool HaveFloatFpr() { 1041 return fpr_index_ > 0; 1042 } 1043 1044 void AdvanceFloat(float val) { 1045 if (kNativeSoftFloatAbi) { 1046 AdvanceInt(bit_cast<float, uint32_t>(val)); 1047 } else { 1048 if (HaveFloatFpr()) { 1049 fpr_index_--; 1050 if (kRegistersNeededForDouble == 1) { 1051 if (kMultiRegistersWidened) { 1052 PushFpr8(bit_cast<double, uint64_t>(val)); 1053 } else { 1054 // No widening, just use the bits. 1055 PushFpr8(bit_cast<float, uint64_t>(val)); 1056 } 1057 } else { 1058 PushFpr4(val); 1059 } 1060 } else { 1061 stack_entries_++; 1062 if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) { 1063 // Need to widen before storing: Note the "double" in the template instantiation. 1064 // Note: We need to jump through those hoops to make the compiler happy. 1065 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1066 PushStack(static_cast<uintptr_t>(bit_cast<double, uint64_t>(val))); 1067 } else { 1068 PushStack(bit_cast<float, uintptr_t>(val)); 1069 } 1070 fpr_index_ = 0; 1071 } 1072 } 1073 } 1074 1075 bool HaveDoubleFpr() { 1076 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1077 } 1078 1079 bool DoubleFprNeedsPadding() { 1080 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1081 kAlignDoubleOnStack && // and when it needs alignment 1082 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1083 } 1084 1085 bool DoubleStackNeedsPadding() { 1086 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1087 kAlignDoubleOnStack && // and when it needs 8B alignment 1088 (stack_entries_ & 1) == 1; // counter is odd 1089 } 1090 1091 void AdvanceDouble(uint64_t val) { 1092 if (kNativeSoftFloatAbi) { 1093 AdvanceLong(val); 1094 } else { 1095 if (HaveDoubleFpr()) { 1096 if (DoubleFprNeedsPadding()) { 1097 PushFpr4(0); 1098 fpr_index_--; 1099 } 1100 PushFpr8(val); 1101 fpr_index_ -= kRegistersNeededForDouble; 1102 } else { 1103 if (DoubleStackNeedsPadding()) { 1104 PushStack(0); 1105 stack_entries_++; 1106 } 1107 if (kRegistersNeededForDouble == 1) { 1108 PushStack(static_cast<uintptr_t>(val)); 1109 stack_entries_++; 1110 } else { 1111 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1112 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1113 stack_entries_ += 2; 1114 } 1115 fpr_index_ = 0; 1116 } 1117 } 1118 } 1119 1120 uint32_t getStackEntries() { 1121 return stack_entries_; 1122 } 1123 1124 uint32_t getNumberOfUsedGprs() { 1125 return kNumNativeGprArgs - gpr_index_; 1126 } 1127 1128 uint32_t getNumberOfUsedFprs() { 1129 return kNumNativeFprArgs - fpr_index_; 1130 } 1131 1132 private: 1133 void PushGpr(uintptr_t val) { 1134 delegate_->PushGpr(val); 1135 } 1136 void PushFpr4(float val) { 1137 delegate_->PushFpr4(val); 1138 } 1139 void PushFpr8(uint64_t val) { 1140 delegate_->PushFpr8(val); 1141 } 1142 void PushStack(uintptr_t val) { 1143 delegate_->PushStack(val); 1144 } 1145 uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1146 return delegate_->PushHandle(ref); 1147 } 1148 1149 uint32_t gpr_index_; // Number of free GPRs 1150 uint32_t fpr_index_; // Number of free FPRs 1151 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1152 // extended 1153 T* delegate_; // What Push implementation gets called 1154}; 1155 1156// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1157// in subclasses. 1158// 1159// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1160// them with handles. 1161class ComputeNativeCallFrameSize { 1162 public: 1163 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1164 1165 virtual ~ComputeNativeCallFrameSize() {} 1166 1167 uint32_t GetStackSize() { 1168 return num_stack_entries_ * sizeof(uintptr_t); 1169 } 1170 1171 uint8_t* LayoutCallStack(uint8_t* sp8) { 1172 sp8 -= GetStackSize(); 1173 // Align by kStackAlignment. 1174 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1175 return sp8; 1176 } 1177 1178 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) { 1179 // Assumption is OK right now, as we have soft-float arm 1180 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1181 sp8 -= fregs * sizeof(uintptr_t); 1182 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1183 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1184 sp8 -= iregs * sizeof(uintptr_t); 1185 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1186 return sp8; 1187 } 1188 1189 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1190 uint32_t** start_fpr) { 1191 // Native call stack. 1192 sp8 = LayoutCallStack(sp8); 1193 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1194 1195 // Put fprs and gprs below. 1196 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1197 1198 // Return the new bottom. 1199 return sp8; 1200 } 1201 1202 virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) 1203 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {} 1204 1205 void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1206 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1207 1208 WalkHeader(&sm); 1209 1210 for (uint32_t i = 1; i < shorty_len; ++i) { 1211 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1212 switch (cur_type_) { 1213 case Primitive::kPrimNot: 1214 sm.AdvanceHandleScope( 1215 reinterpret_cast<mirror::Object*>(0x12345678)); 1216 break; 1217 1218 case Primitive::kPrimBoolean: 1219 case Primitive::kPrimByte: 1220 case Primitive::kPrimChar: 1221 case Primitive::kPrimShort: 1222 case Primitive::kPrimInt: 1223 sm.AdvanceInt(0); 1224 break; 1225 case Primitive::kPrimFloat: 1226 sm.AdvanceFloat(0); 1227 break; 1228 case Primitive::kPrimDouble: 1229 sm.AdvanceDouble(0); 1230 break; 1231 case Primitive::kPrimLong: 1232 sm.AdvanceLong(0); 1233 break; 1234 default: 1235 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1236 } 1237 } 1238 1239 num_stack_entries_ = sm.getStackEntries(); 1240 } 1241 1242 void PushGpr(uintptr_t /* val */) { 1243 // not optimizing registers, yet 1244 } 1245 1246 void PushFpr4(float /* val */) { 1247 // not optimizing registers, yet 1248 } 1249 1250 void PushFpr8(uint64_t /* val */) { 1251 // not optimizing registers, yet 1252 } 1253 1254 void PushStack(uintptr_t /* val */) { 1255 // counting is already done in the superclass 1256 } 1257 1258 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1259 return reinterpret_cast<uintptr_t>(nullptr); 1260 } 1261 1262 protected: 1263 uint32_t num_stack_entries_; 1264}; 1265 1266class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1267 public: 1268 ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {} 1269 1270 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1271 // is at *m = sp. Will update to point to the bottom of the save frame. 1272 // 1273 // Note: assumes ComputeAll() has been run before. 1274 void LayoutCalleeSaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table, 1275 uint32_t* handle_scope_entries) 1276 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1277 mirror::ArtMethod* method = (*m)->AsMirrorPtr(); 1278 1279 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1280 1281 // First, fix up the layout of the callee-save frame. 1282 // We have to squeeze in the HandleScope, and relocate the method pointer. 1283 1284 // "Free" the slot for the method. 1285 sp8 += kPointerSize; // In the callee-save frame we use a full pointer. 1286 1287 // Under the callee saves put handle scope and new method stack reference. 1288 *handle_scope_entries = num_handle_scope_references_; 1289 1290 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1291 size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>); 1292 1293 sp8 -= scope_and_method; 1294 // Align by kStackAlignment. 1295 sp8 = reinterpret_cast<uint8_t*>(RoundDown( 1296 reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1297 1298 uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>); 1299 *table = reinterpret_cast<HandleScope*>(sp8_table); 1300 (*table)->SetNumberOfReferences(num_handle_scope_references_); 1301 1302 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1303 uint8_t* method_pointer = sp8; 1304 StackReference<mirror::ArtMethod>* new_method_ref = 1305 reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer); 1306 new_method_ref->Assign(method); 1307 *m = new_method_ref; 1308 } 1309 1310 // Adds space for the cookie. Note: may leave stack unaligned. 1311 void LayoutCookie(uint8_t** sp) { 1312 // Reference cookie and padding 1313 *sp -= 8; 1314 } 1315 1316 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1317 // Returns the new bottom. Note: this may be unaligned. 1318 uint8_t* LayoutJNISaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table, 1319 uint32_t* handle_scope_entries) 1320 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1321 // First, fix up the layout of the callee-save frame. 1322 // We have to squeeze in the HandleScope, and relocate the method pointer. 1323 LayoutCalleeSaveFrame(m, sp, table, handle_scope_entries); 1324 1325 // The bottom of the callee-save frame is now where the method is, *m. 1326 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1327 1328 // Add space for cookie. 1329 LayoutCookie(&sp8); 1330 1331 return sp8; 1332 } 1333 1334 // WARNING: After this, *sp won't be pointing to the method anymore! 1335 uint8_t* ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty, 1336 uint32_t shorty_len, HandleScope** table, uint32_t* handle_scope_entries, 1337 uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr) 1338 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1339 Walk(shorty, shorty_len); 1340 1341 // JNI part. 1342 uint8_t* sp8 = LayoutJNISaveFrame(m, reinterpret_cast<void*>(*m), table, handle_scope_entries); 1343 1344 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1345 1346 // Return the new bottom. 1347 return sp8; 1348 } 1349 1350 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1351 1352 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1353 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1354 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1355 1356 private: 1357 uint32_t num_handle_scope_references_; 1358}; 1359 1360uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1361 num_handle_scope_references_++; 1362 return reinterpret_cast<uintptr_t>(nullptr); 1363} 1364 1365void ComputeGenericJniFrameSize::WalkHeader( 1366 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1367 // JNIEnv 1368 sm->AdvancePointer(nullptr); 1369 1370 // Class object or this as first argument 1371 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1372} 1373 1374// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1375// the template requirements of BuildGenericJniFrameStateMachine. 1376class FillNativeCall { 1377 public: 1378 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1379 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1380 1381 virtual ~FillNativeCall() {} 1382 1383 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1384 cur_gpr_reg_ = gpr_regs; 1385 cur_fpr_reg_ = fpr_regs; 1386 cur_stack_arg_ = stack_args; 1387 } 1388 1389 void PushGpr(uintptr_t val) { 1390 *cur_gpr_reg_ = val; 1391 cur_gpr_reg_++; 1392 } 1393 1394 void PushFpr4(float val) { 1395 *cur_fpr_reg_ = val; 1396 cur_fpr_reg_++; 1397 } 1398 1399 void PushFpr8(uint64_t val) { 1400 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1401 *tmp = val; 1402 cur_fpr_reg_ += 2; 1403 } 1404 1405 void PushStack(uintptr_t val) { 1406 *cur_stack_arg_ = val; 1407 cur_stack_arg_++; 1408 } 1409 1410 virtual uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1411 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1412 return 0U; 1413 } 1414 1415 private: 1416 uintptr_t* cur_gpr_reg_; 1417 uint32_t* cur_fpr_reg_; 1418 uintptr_t* cur_stack_arg_; 1419}; 1420 1421// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1422// of transitioning into native code. 1423class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1424 public: 1425 BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static, 1426 const char* shorty, uint32_t shorty_len, Thread* self) 1427 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1428 jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) { 1429 ComputeGenericJniFrameSize fsc; 1430 uintptr_t* start_gpr_reg; 1431 uint32_t* start_fpr_reg; 1432 uintptr_t* start_stack_arg; 1433 uint32_t handle_scope_entries; 1434 bottom_of_used_area_ = fsc.ComputeLayout(sp, is_static, shorty, shorty_len, &handle_scope_, 1435 &handle_scope_entries, &start_stack_arg, 1436 &start_gpr_reg, &start_fpr_reg); 1437 1438 handle_scope_->SetNumberOfReferences(handle_scope_entries); 1439 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1440 1441 // jni environment is always first argument 1442 sm_.AdvancePointer(self->GetJniEnv()); 1443 1444 if (is_static) { 1445 sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass()); 1446 } 1447 } 1448 1449 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 1450 1451 void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1452 1453 StackReference<mirror::Object>* GetFirstHandleScopeEntry() 1454 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1455 return handle_scope_->GetHandle(0).GetReference(); 1456 } 1457 1458 jobject GetFirstHandleScopeJObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1459 return handle_scope_->GetHandle(0).ToJObject(); 1460 } 1461 1462 void* GetBottomOfUsedArea() { 1463 return bottom_of_used_area_; 1464 } 1465 1466 private: 1467 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 1468 class FillJniCall FINAL : public FillNativeCall { 1469 public: 1470 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 1471 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args), 1472 handle_scope_(handle_scope), cur_entry_(0) {} 1473 1474 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1475 1476 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 1477 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 1478 handle_scope_ = scope; 1479 cur_entry_ = 0U; 1480 } 1481 1482 void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1483 // Initialize padding entries. 1484 size_t expected_slots = handle_scope_->NumberOfReferences(); 1485 while (cur_entry_ < expected_slots) { 1486 handle_scope_->GetHandle(cur_entry_++).Assign(nullptr); 1487 } 1488 DCHECK_NE(cur_entry_, 0U); 1489 } 1490 1491 private: 1492 HandleScope* handle_scope_; 1493 size_t cur_entry_; 1494 }; 1495 1496 HandleScope* handle_scope_; 1497 FillJniCall jni_call_; 1498 void* bottom_of_used_area_; 1499 1500 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 1501 1502 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1503}; 1504 1505uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 1506 uintptr_t tmp; 1507 Handle<mirror::Object> h = handle_scope_->GetHandle(cur_entry_); 1508 h.Assign(ref); 1509 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 1510 cur_entry_++; 1511 return tmp; 1512} 1513 1514void BuildGenericJniFrameVisitor::Visit() { 1515 Primitive::Type type = GetParamPrimitiveType(); 1516 switch (type) { 1517 case Primitive::kPrimLong: { 1518 jlong long_arg; 1519 if (IsSplitLongOrDouble()) { 1520 long_arg = ReadSplitLongParam(); 1521 } else { 1522 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1523 } 1524 sm_.AdvanceLong(long_arg); 1525 break; 1526 } 1527 case Primitive::kPrimDouble: { 1528 uint64_t double_arg; 1529 if (IsSplitLongOrDouble()) { 1530 // Read into union so that we don't case to a double. 1531 double_arg = ReadSplitLongParam(); 1532 } else { 1533 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1534 } 1535 sm_.AdvanceDouble(double_arg); 1536 break; 1537 } 1538 case Primitive::kPrimNot: { 1539 StackReference<mirror::Object>* stack_ref = 1540 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1541 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 1542 break; 1543 } 1544 case Primitive::kPrimFloat: 1545 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1546 break; 1547 case Primitive::kPrimBoolean: // Fall-through. 1548 case Primitive::kPrimByte: // Fall-through. 1549 case Primitive::kPrimChar: // Fall-through. 1550 case Primitive::kPrimShort: // Fall-through. 1551 case Primitive::kPrimInt: // Fall-through. 1552 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1553 break; 1554 case Primitive::kPrimVoid: 1555 LOG(FATAL) << "UNREACHABLE"; 1556 break; 1557 } 1558} 1559 1560void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 1561 // Clear out rest of the scope. 1562 jni_call_.ResetRemainingScopeSlots(); 1563 // Install HandleScope. 1564 self->PushHandleScope(handle_scope_); 1565} 1566 1567#if defined(__arm__) || defined(__aarch64__) 1568extern "C" void* artFindNativeMethod(); 1569#else 1570extern "C" void* artFindNativeMethod(Thread* self); 1571#endif 1572 1573uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { 1574 if (lock != nullptr) { 1575 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1576 } else { 1577 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1578 } 1579} 1580 1581void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { 1582 if (lock != nullptr) { 1583 JniMethodEndSynchronized(cookie, lock, self); 1584 } else { 1585 JniMethodEnd(cookie, self); 1586 } 1587} 1588 1589/* 1590 * Initializes an alloca region assumed to be directly below sp for a native call: 1591 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 1592 * The final element on the stack is a pointer to the native code. 1593 * 1594 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1595 * We need to fix this, as the handle scope needs to go into the callee-save frame. 1596 * 1597 * The return of this function denotes: 1598 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1599 * 2) An error, if the value is negative. 1600 */ 1601extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, 1602 StackReference<mirror::ArtMethod>* sp) 1603 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1604 mirror::ArtMethod* called = sp->AsMirrorPtr(); 1605 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1606 uint32_t shorty_len = 0; 1607 const char* shorty = called->GetShorty(&shorty_len); 1608 1609 // Run the visitor. 1610 BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), shorty, shorty_len, self); 1611 visitor.VisitArguments(); 1612 visitor.FinalizeHandleScope(self); 1613 1614 // Fix up managed-stack things in Thread. 1615 self->SetTopOfStack(sp, 0); 1616 1617 self->VerifyStack(); 1618 1619 // Start JNI, save the cookie. 1620 uint32_t cookie; 1621 if (called->IsSynchronized()) { 1622 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 1623 if (self->IsExceptionPending()) { 1624 self->PopHandleScope(); 1625 // A negative value denotes an error. 1626 return GetTwoWordFailureValue(); 1627 } 1628 } else { 1629 cookie = JniMethodStart(self); 1630 } 1631 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1632 *(sp32 - 1) = cookie; 1633 1634 // Retrieve the stored native code. 1635 const void* nativeCode = called->GetNativeMethod(); 1636 1637 // There are two cases for the content of nativeCode: 1638 // 1) Pointer to the native function. 1639 // 2) Pointer to the trampoline for native code binding. 1640 // In the second case, we need to execute the binding and continue with the actual native function 1641 // pointer. 1642 DCHECK(nativeCode != nullptr); 1643 if (nativeCode == GetJniDlsymLookupStub()) { 1644#if defined(__arm__) || defined(__aarch64__) 1645 nativeCode = artFindNativeMethod(); 1646#else 1647 nativeCode = artFindNativeMethod(self); 1648#endif 1649 1650 if (nativeCode == nullptr) { 1651 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 1652 1653 // End JNI, as the assembly will move to deliver the exception. 1654 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 1655 if (shorty[0] == 'L') { 1656 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 1657 } else { 1658 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1659 } 1660 1661 return GetTwoWordFailureValue(); 1662 } 1663 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 1664 } 1665 1666 // Return native code addr(lo) and bottom of alloca address(hi). 1667 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 1668 reinterpret_cast<uintptr_t>(nativeCode)); 1669} 1670 1671/* 1672 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 1673 * unlocking. 1674 */ 1675extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f) 1676 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1677 StackReference<mirror::ArtMethod>* sp = self->GetManagedStack()->GetTopQuickFrame(); 1678 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1679 mirror::ArtMethod* called = sp->AsMirrorPtr(); 1680 uint32_t cookie = *(sp32 - 1); 1681 1682 jobject lock = nullptr; 1683 if (called->IsSynchronized()) { 1684 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) 1685 + sizeof(StackReference<mirror::ArtMethod>)); 1686 lock = table->GetHandle(0).ToJObject(); 1687 } 1688 1689 char return_shorty_char = called->GetShorty()[0]; 1690 1691 if (return_shorty_char == 'L') { 1692 return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock); 1693 } else { 1694 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1695 1696 switch (return_shorty_char) { 1697 case 'F': // Fall-through. 1698 case 'D': 1699 return result_f; 1700 case 'Z': 1701 return result.z; 1702 case 'B': 1703 return result.b; 1704 case 'C': 1705 return result.c; 1706 case 'S': 1707 return result.s; 1708 case 'I': 1709 return result.i; 1710 case 'J': 1711 return result.j; 1712 case 'V': 1713 return 0; 1714 default: 1715 LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char; 1716 return 0; 1717 } 1718 } 1719} 1720 1721// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 1722// for the method pointer. 1723// 1724// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 1725// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations). 1726 1727template<InvokeType type, bool access_check> 1728static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1729 mirror::ArtMethod* caller_method, 1730 Thread* self, StackReference<mirror::ArtMethod>* sp); 1731 1732template<InvokeType type, bool access_check> 1733static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1734 mirror::ArtMethod* caller_method, 1735 Thread* self, StackReference<mirror::ArtMethod>* sp) { 1736 mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, 1737 type); 1738 if (UNLIKELY(method == nullptr)) { 1739 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1740 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1741 uint32_t shorty_len; 1742 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 1743 { 1744 // Remember the args in case a GC happens in FindMethodFromCode. 1745 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1746 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 1747 visitor.VisitArguments(); 1748 method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method, 1749 self); 1750 visitor.FixupReferences(); 1751 } 1752 1753 if (UNLIKELY(method == NULL)) { 1754 CHECK(self->IsExceptionPending()); 1755 return GetTwoWordFailureValue(); // Failure. 1756 } 1757 } 1758 DCHECK(!self->IsExceptionPending()); 1759 const void* code = method->GetEntryPointFromQuickCompiledCode(); 1760 1761 // When we return, the caller will branch to this address, so it had better not be 0! 1762 DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) 1763 << " location: " 1764 << method->GetDexFile()->GetLocation(); 1765 1766 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 1767 reinterpret_cast<uintptr_t>(method)); 1768} 1769 1770// Explicit artInvokeCommon template function declarations to please analysis tool. 1771#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 1772 template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ 1773 TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx, \ 1774 mirror::Object* this_object, \ 1775 mirror::ArtMethod* caller_method, \ 1776 Thread* self, \ 1777 StackReference<mirror::ArtMethod>* sp) \ 1778 1779EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 1780EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 1781EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 1782EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 1783EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 1784EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 1785EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 1786EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 1787EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 1788EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 1789#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 1790 1791// See comments in runtime_support_asm.S 1792extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 1793 uint32_t method_idx, mirror::Object* this_object, 1794 mirror::ArtMethod* caller_method, Thread* self, 1795 StackReference<mirror::ArtMethod>* sp) 1796 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1797 return artInvokeCommon<kInterface, true>(method_idx, this_object, 1798 caller_method, self, sp); 1799} 1800 1801extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 1802 uint32_t method_idx, mirror::Object* this_object, 1803 mirror::ArtMethod* caller_method, Thread* self, 1804 StackReference<mirror::ArtMethod>* sp) 1805 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1806 return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, 1807 self, sp); 1808} 1809 1810extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 1811 uint32_t method_idx, mirror::Object* this_object, 1812 mirror::ArtMethod* caller_method, Thread* self, 1813 StackReference<mirror::ArtMethod>* sp) 1814 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1815 return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, 1816 self, sp); 1817} 1818 1819extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 1820 uint32_t method_idx, mirror::Object* this_object, 1821 mirror::ArtMethod* caller_method, Thread* self, 1822 StackReference<mirror::ArtMethod>* sp) 1823 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1824 return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, 1825 self, sp); 1826} 1827 1828extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 1829 uint32_t method_idx, mirror::Object* this_object, 1830 mirror::ArtMethod* caller_method, Thread* self, 1831 StackReference<mirror::ArtMethod>* sp) 1832 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1833 return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, 1834 self, sp); 1835} 1836 1837// Determine target of interface dispatch. This object is known non-null. 1838extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method, 1839 mirror::Object* this_object, 1840 mirror::ArtMethod* caller_method, 1841 Thread* self, 1842 StackReference<mirror::ArtMethod>* sp) 1843 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1844 mirror::ArtMethod* method; 1845 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 1846 method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method); 1847 if (UNLIKELY(method == NULL)) { 1848 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1849 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object, 1850 caller_method); 1851 return GetTwoWordFailureValue(); // Failure. 1852 } 1853 } else { 1854 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1855 DCHECK(interface_method == Runtime::Current()->GetResolutionMethod()); 1856 1857 // Find the caller PC. 1858 constexpr size_t pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs); 1859 uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + pc_offset); 1860 1861 // Map the caller PC to a dex PC. 1862 uint32_t dex_pc = caller_method->ToDexPc(caller_pc); 1863 const DexFile::CodeItem* code = caller_method->GetCodeItem(); 1864 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 1865 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 1866 Instruction::Code instr_code = instr->Opcode(); 1867 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 1868 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 1869 << "Unexpected call into interface trampoline: " << instr->DumpString(NULL); 1870 uint32_t dex_method_idx; 1871 if (instr_code == Instruction::INVOKE_INTERFACE) { 1872 dex_method_idx = instr->VRegB_35c(); 1873 } else { 1874 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 1875 dex_method_idx = instr->VRegB_3rc(); 1876 } 1877 1878 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache() 1879 ->GetDexFile(); 1880 uint32_t shorty_len; 1881 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), 1882 &shorty_len); 1883 { 1884 // Remember the args in case a GC happens in FindMethodFromCode. 1885 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1886 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 1887 visitor.VisitArguments(); 1888 method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method, 1889 self); 1890 visitor.FixupReferences(); 1891 } 1892 1893 if (UNLIKELY(method == nullptr)) { 1894 CHECK(self->IsExceptionPending()); 1895 return GetTwoWordFailureValue(); // Failure. 1896 } 1897 } 1898 const void* code = method->GetEntryPointFromQuickCompiledCode(); 1899 1900 // When we return, the caller will branch to this address, so it had better not be 0! 1901 DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) 1902 << " location: " << method->GetDexFile()->GetLocation(); 1903 1904 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 1905 reinterpret_cast<uintptr_t>(method)); 1906} 1907 1908} // namespace art 1909