quick_trampoline_entrypoints.cc revision c200a4abeca91e19969f5b35543f17f812ba32b9
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "callee_save_frame.h" 18#include "common_throws.h" 19#include "dex_file-inl.h" 20#include "dex_instruction-inl.h" 21#include "entrypoints/entrypoint_utils.h" 22#include "gc/accounting/card_table-inl.h" 23#include "instruction_set.h" 24#include "interpreter/interpreter.h" 25#include "mirror/art_method-inl.h" 26#include "mirror/class-inl.h" 27#include "mirror/dex_cache-inl.h" 28#include "mirror/object-inl.h" 29#include "mirror/object_array-inl.h" 30#include "object_utils.h" 31#include "runtime.h" 32#include "scoped_thread_state_change.h" 33 34namespace art { 35 36// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 37class QuickArgumentVisitor { 38 // Number of bytes for each out register in the caller method's frame. 39 static constexpr size_t kBytesStackArgLocation = 4; 40 // Frame size in bytes of a callee-save frame for RefsAndArgs. 41 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 42 GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); 43#if defined(__arm__) 44 // The callee save frame is pointed to by SP. 45 // | argN | | 46 // | ... | | 47 // | arg4 | | 48 // | arg3 spill | | Caller's frame 49 // | arg2 spill | | 50 // | arg1 spill | | 51 // | Method* | --- 52 // | LR | 53 // | ... | callee saves 54 // | R3 | arg3 55 // | R2 | arg2 56 // | R1 | arg1 57 // | R0 | padding 58 // | Method* | <- sp 59 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 60 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 61 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 62 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 63 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8; // Offset of first GPR arg. 64 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44; // Offset of return address. 65 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 66 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 67 } 68#elif defined(__aarch64__) 69 // The callee save frame is pointed to by SP. 70 // | argN | | 71 // | ... | | 72 // | arg4 | | 73 // | arg3 spill | | Caller's frame 74 // | arg2 spill | | 75 // | arg1 spill | | 76 // | Method* | --- 77 // | LR | 78 // | X28 | 79 // | : | 80 // | X19 | 81 // | X7 | 82 // | : | 83 // | X1 | 84 // | D15 | 85 // | : | 86 // | D0 | 87 // | | padding 88 // | Method* | <- sp 89 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 90 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 91 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 92 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 93 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144; // Offset of first GPR arg. 94 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296; // Offset of return address. 95 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 96 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 97 } 98#elif defined(__mips__) 99 // The callee save frame is pointed to by SP. 100 // | argN | | 101 // | ... | | 102 // | arg4 | | 103 // | arg3 spill | | Caller's frame 104 // | arg2 spill | | 105 // | arg1 spill | | 106 // | Method* | --- 107 // | RA | 108 // | ... | callee saves 109 // | A3 | arg3 110 // | A2 | arg2 111 // | A1 | arg1 112 // | A0/Method* | <- sp 113 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 114 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 115 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 116 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 117 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 118 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60; // Offset of return address. 119 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 120 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 121 } 122#elif defined(__i386__) 123 // The callee save frame is pointed to by SP. 124 // | argN | | 125 // | ... | | 126 // | arg4 | | 127 // | arg3 spill | | Caller's frame 128 // | arg2 spill | | 129 // | arg1 spill | | 130 // | Method* | --- 131 // | Return | 132 // | EBP,ESI,EDI | callee saves 133 // | EBX | arg3 134 // | EDX | arg2 135 // | ECX | arg1 136 // | EAX/Method* | <- sp 137 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 138 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 139 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 140 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 141 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 142 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28; // Offset of return address. 143 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 144 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 145 } 146#elif defined(__x86_64__) 147 // The callee save frame is pointed to by SP. 148 // | argN | | 149 // | ... | | 150 // | reg. arg spills | | Caller's frame 151 // | Method* | --- 152 // | Return | 153 // | R15 | callee save 154 // | R14 | callee save 155 // | R13 | callee save 156 // | R12 | callee save 157 // | R9 | arg5 158 // | R8 | arg4 159 // | RSI/R6 | arg1 160 // | RBP/R5 | callee save 161 // | RBX/R3 | callee save 162 // | RDX/R2 | arg2 163 // | RCX/R1 | arg3 164 // | XMM7 | float arg 8 165 // | XMM6 | float arg 7 166 // | XMM5 | float arg 6 167 // | XMM4 | float arg 5 168 // | XMM3 | float arg 4 169 // | XMM2 | float arg 3 170 // | XMM1 | float arg 2 171 // | XMM0 | float arg 1 172 // | Padding | 173 // | RDI/Method* | <- sp 174 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 175 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 176 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 177 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 178 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg. 179 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168; // Offset of return address. 180 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 181 switch (gpr_index) { 182 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 183 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 184 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 185 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 186 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 187 default: 188 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 189 return 0; 190 } 191 } 192#else 193#error "Unsupported architecture" 194#endif 195 196 public: 197 static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp) 198 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 199 DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod()); 200 byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 201 return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr(); 202 } 203 204 // For the given quick ref and args quick frame, return the caller's PC. 205 static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp) 206 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 207 DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod()); 208 byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 209 return *reinterpret_cast<uintptr_t*>(lr); 210 } 211 212 QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty, 213 uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : 214 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 215 gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 216 fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 217 stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 218 + StackArgumentStartFromShorty(is_static, shorty, shorty_len)), 219 gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid), 220 is_split_long_or_double_(false) {} 221 222 virtual ~QuickArgumentVisitor() {} 223 224 virtual void Visit() = 0; 225 226 Primitive::Type GetParamPrimitiveType() const { 227 return cur_type_; 228 } 229 230 byte* GetParamAddress() const { 231 if (!kQuickSoftFloatAbi) { 232 Primitive::Type type = GetParamPrimitiveType(); 233 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 234 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 235 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 236 } 237 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 238 } 239 } 240 if (gpr_index_ < kNumQuickGprArgs) { 241 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 242 } 243 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 244 } 245 246 bool IsSplitLongOrDouble() const { 247 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 248 return is_split_long_or_double_; 249 } else { 250 return false; // An optimization for when GPR and FPRs are 64bit. 251 } 252 } 253 254 bool IsParamAReference() const { 255 return GetParamPrimitiveType() == Primitive::kPrimNot; 256 } 257 258 bool IsParamALongOrDouble() const { 259 Primitive::Type type = GetParamPrimitiveType(); 260 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 261 } 262 263 uint64_t ReadSplitLongParam() const { 264 DCHECK(IsSplitLongOrDouble()); 265 uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress()); 266 uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_); 267 return (low_half & 0xffffffffULL) | (high_half << 32); 268 } 269 270 void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 271 // This implementation doesn't support reg-spill area for hard float 272 // ABI targets such as x86_64 and aarch64. So, for those targets whose 273 // 'kQuickSoftFloatAbi' is 'false': 274 // (a) 'stack_args_' should point to the first method's argument 275 // (b) whatever the argument type it is, the 'stack_index_' should 276 // be moved forward along with every visiting. 277 gpr_index_ = 0; 278 fpr_index_ = 0; 279 stack_index_ = 0; 280 if (!is_static_) { // Handle this. 281 cur_type_ = Primitive::kPrimNot; 282 is_split_long_or_double_ = false; 283 Visit(); 284 if (!kQuickSoftFloatAbi || kNumQuickGprArgs == 0) { 285 stack_index_++; 286 } 287 if (kNumQuickGprArgs > 0) { 288 gpr_index_++; 289 } 290 } 291 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 292 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 293 switch (cur_type_) { 294 case Primitive::kPrimNot: 295 case Primitive::kPrimBoolean: 296 case Primitive::kPrimByte: 297 case Primitive::kPrimChar: 298 case Primitive::kPrimShort: 299 case Primitive::kPrimInt: 300 is_split_long_or_double_ = false; 301 Visit(); 302 if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) { 303 stack_index_++; 304 } 305 if (gpr_index_ < kNumQuickGprArgs) { 306 gpr_index_++; 307 } 308 break; 309 case Primitive::kPrimFloat: 310 is_split_long_or_double_ = false; 311 Visit(); 312 if (kQuickSoftFloatAbi) { 313 if (gpr_index_ < kNumQuickGprArgs) { 314 gpr_index_++; 315 } else { 316 stack_index_++; 317 } 318 } else { 319 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 320 fpr_index_++; 321 } 322 stack_index_++; 323 } 324 break; 325 case Primitive::kPrimDouble: 326 case Primitive::kPrimLong: 327 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 328 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 329 ((gpr_index_ + 1) == kNumQuickGprArgs); 330 Visit(); 331 if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) { 332 if (kBytesStackArgLocation == 4) { 333 stack_index_+= 2; 334 } else { 335 CHECK_EQ(kBytesStackArgLocation, 8U); 336 stack_index_++; 337 } 338 } 339 if (gpr_index_ < kNumQuickGprArgs) { 340 gpr_index_++; 341 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 342 if (gpr_index_ < kNumQuickGprArgs) { 343 gpr_index_++; 344 } else if (kQuickSoftFloatAbi) { 345 stack_index_++; 346 } 347 } 348 } 349 } else { 350 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 351 ((fpr_index_ + 1) == kNumQuickFprArgs); 352 Visit(); 353 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 354 fpr_index_++; 355 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 356 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 357 fpr_index_++; 358 } 359 } 360 } 361 if (kBytesStackArgLocation == 4) { 362 stack_index_+= 2; 363 } else { 364 CHECK_EQ(kBytesStackArgLocation, 8U); 365 stack_index_++; 366 } 367 } 368 break; 369 default: 370 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 371 } 372 } 373 } 374 375 private: 376 static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty, 377 uint32_t shorty_len) { 378 if (kQuickSoftFloatAbi) { 379 CHECK_EQ(kNumQuickFprArgs, 0U); 380 return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA)) 381 + sizeof(StackReference<mirror::ArtMethod>) /* StackReference<ArtMethod> */; 382 } else { 383 // For now, there is no reg-spill area for the targets with 384 // hard float ABI. So, the offset pointing to the first method's 385 // parameter ('this' for non-static methods) should be returned. 386 return sizeof(StackReference<mirror::ArtMethod>); // Skip StackReference<ArtMethod>. 387 } 388 } 389 390 protected: 391 const bool is_static_; 392 const char* const shorty_; 393 const uint32_t shorty_len_; 394 395 private: 396 byte* const gpr_args_; // Address of GPR arguments in callee save frame. 397 byte* const fpr_args_; // Address of FPR arguments in callee save frame. 398 byte* const stack_args_; // Address of stack arguments in caller's frame. 399 uint32_t gpr_index_; // Index into spilled GPRs. 400 uint32_t fpr_index_; // Index into spilled FPRs. 401 uint32_t stack_index_; // Index into arguments on the stack. 402 // The current type of argument during VisitArguments. 403 Primitive::Type cur_type_; 404 // Does a 64bit parameter straddle the register and stack arguments? 405 bool is_split_long_or_double_; 406}; 407 408// Visits arguments on the stack placing them into the shadow frame. 409class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 410 public: 411 BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, 412 const char* shorty, uint32_t shorty_len, ShadowFrame* sf, 413 size_t first_arg_reg) : 414 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 415 416 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 417 418 private: 419 ShadowFrame* const sf_; 420 uint32_t cur_reg_; 421 422 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 423}; 424 425void BuildQuickShadowFrameVisitor::Visit() { 426 Primitive::Type type = GetParamPrimitiveType(); 427 switch (type) { 428 case Primitive::kPrimLong: // Fall-through. 429 case Primitive::kPrimDouble: 430 if (IsSplitLongOrDouble()) { 431 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 432 } else { 433 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 434 } 435 ++cur_reg_; 436 break; 437 case Primitive::kPrimNot: { 438 StackReference<mirror::Object>* stack_ref = 439 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 440 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 441 } 442 break; 443 case Primitive::kPrimBoolean: // Fall-through. 444 case Primitive::kPrimByte: // Fall-through. 445 case Primitive::kPrimChar: // Fall-through. 446 case Primitive::kPrimShort: // Fall-through. 447 case Primitive::kPrimInt: // Fall-through. 448 case Primitive::kPrimFloat: 449 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 450 break; 451 case Primitive::kPrimVoid: 452 LOG(FATAL) << "UNREACHABLE"; 453 break; 454 } 455 ++cur_reg_; 456} 457 458extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 459 StackReference<mirror::ArtMethod>* sp) 460 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 461 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 462 // frame. 463 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 464 465 if (method->IsAbstract()) { 466 ThrowAbstractMethodError(method); 467 return 0; 468 } else { 469 DCHECK(!method->IsNative()) << PrettyMethod(method); 470 const char* old_cause = self->StartAssertNoThreadSuspension( 471 "Building interpreter shadow frame"); 472 const DexFile::CodeItem* code_item = method->GetCodeItem(); 473 DCHECK(code_item != nullptr) << PrettyMethod(method); 474 uint16_t num_regs = code_item->registers_size_; 475 void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); 476 // No last shadow coming from quick. 477 ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory)); 478 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 479 uint32_t shorty_len = 0; 480 const char* shorty = method->GetShorty(&shorty_len); 481 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 482 shadow_frame, first_arg_reg); 483 shadow_frame_builder.VisitArguments(); 484 // Push a transition back into managed code onto the linked list in thread. 485 ManagedStack fragment; 486 self->PushManagedStackFragment(&fragment); 487 self->PushShadowFrame(shadow_frame); 488 self->EndAssertNoThreadSuspension(old_cause); 489 490 if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) { 491 // Ensure static method's class is initialized. 492 StackHandleScope<1> hs(self); 493 Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass())); 494 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) { 495 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method); 496 self->PopManagedStackFragment(fragment); 497 return 0; 498 } 499 } 500 501 StackHandleScope<1> hs(self); 502 MethodHelper mh(hs.NewHandle(method)); 503 JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame); 504 // Pop transition. 505 self->PopManagedStackFragment(fragment); 506 // No need to restore the args since the method has already been run by the interpreter. 507 return result.GetJ(); 508 } 509} 510 511// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 512// to jobjects. 513class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 514 public: 515 BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, 516 const char* shorty, uint32_t shorty_len, 517 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 518 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 519 520 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 521 522 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 523 524 private: 525 ScopedObjectAccessUnchecked* const soa_; 526 std::vector<jvalue>* const args_; 527 // References which we must update when exiting in case the GC moved the objects. 528 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 529 530 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 531}; 532 533void BuildQuickArgumentVisitor::Visit() { 534 jvalue val; 535 Primitive::Type type = GetParamPrimitiveType(); 536 switch (type) { 537 case Primitive::kPrimNot: { 538 StackReference<mirror::Object>* stack_ref = 539 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 540 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 541 references_.push_back(std::make_pair(val.l, stack_ref)); 542 break; 543 } 544 case Primitive::kPrimLong: // Fall-through. 545 case Primitive::kPrimDouble: 546 if (IsSplitLongOrDouble()) { 547 val.j = ReadSplitLongParam(); 548 } else { 549 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 550 } 551 break; 552 case Primitive::kPrimBoolean: // Fall-through. 553 case Primitive::kPrimByte: // Fall-through. 554 case Primitive::kPrimChar: // Fall-through. 555 case Primitive::kPrimShort: // Fall-through. 556 case Primitive::kPrimInt: // Fall-through. 557 case Primitive::kPrimFloat: 558 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 559 break; 560 case Primitive::kPrimVoid: 561 LOG(FATAL) << "UNREACHABLE"; 562 val.j = 0; 563 break; 564 } 565 args_->push_back(val); 566} 567 568void BuildQuickArgumentVisitor::FixupReferences() { 569 // Fixup any references which may have changed. 570 for (const auto& pair : references_) { 571 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 572 soa_->Env()->DeleteLocalRef(pair.first); 573 } 574} 575 576// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 577// which is responsible for recording callee save registers. We explicitly place into jobjects the 578// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 579// field within the proxy object, which will box the primitive arguments and deal with error cases. 580extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, 581 mirror::Object* receiver, 582 Thread* self, StackReference<mirror::ArtMethod>* sp) 583 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 584 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 585 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 586 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 587 const char* old_cause = 588 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 589 // Register the top of the managed stack, making stack crawlable. 590 DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) 591 << PrettyMethod(proxy_method); 592 self->SetTopOfStack(sp, 0); 593 DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), 594 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()) 595 << PrettyMethod(proxy_method); 596 self->VerifyStack(); 597 // Start new JNI local reference state. 598 JNIEnvExt* env = self->GetJniEnv(); 599 ScopedObjectAccessUnchecked soa(env); 600 ScopedJniEnvLocalRefState env_state(env); 601 // Create local ref. copies of proxy method and the receiver. 602 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 603 604 // Placing arguments into args vector and remove the receiver. 605 mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(); 606 CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " 607 << PrettyMethod(non_proxy_method); 608 std::vector<jvalue> args; 609 uint32_t shorty_len = 0; 610 const char* shorty = proxy_method->GetShorty(&shorty_len); 611 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 612 613 local_ref_visitor.VisitArguments(); 614 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 615 args.erase(args.begin()); 616 617 // Convert proxy method into expected interface method. 618 mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod(); 619 DCHECK(interface_method != NULL) << PrettyMethod(proxy_method); 620 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 621 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method); 622 623 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 624 // that performs allocations. 625 self->EndAssertNoThreadSuspension(old_cause); 626 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 627 // Restore references which might have moved. 628 local_ref_visitor.FixupReferences(); 629 return result.GetJ(); 630} 631 632// Read object references held in arguments from quick frames and place in a JNI local references, 633// so they don't get garbage collected. 634class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 635 public: 636 RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, 637 const char* shorty, uint32_t shorty_len, 638 ScopedObjectAccessUnchecked* soa) : 639 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 640 641 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 642 643 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 644 645 private: 646 ScopedObjectAccessUnchecked* const soa_; 647 // References which we must update when exiting in case the GC moved the objects. 648 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 649 650 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 651}; 652 653void RememberForGcArgumentVisitor::Visit() { 654 if (IsParamAReference()) { 655 StackReference<mirror::Object>* stack_ref = 656 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 657 jobject reference = 658 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 659 references_.push_back(std::make_pair(reference, stack_ref)); 660 } 661} 662 663void RememberForGcArgumentVisitor::FixupReferences() { 664 // Fixup any references which may have changed. 665 for (const auto& pair : references_) { 666 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 667 soa_->Env()->DeleteLocalRef(pair.first); 668 } 669} 670 671// Lazily resolve a method for quick. Called by stub code. 672extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, 673 mirror::Object* receiver, 674 Thread* self, 675 StackReference<mirror::ArtMethod>* sp) 676 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 677 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 678 // Start new JNI local reference state 679 JNIEnvExt* env = self->GetJniEnv(); 680 ScopedObjectAccessUnchecked soa(env); 681 ScopedJniEnvLocalRefState env_state(env); 682 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 683 684 // Compute details about the called method (avoid GCs) 685 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 686 mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 687 InvokeType invoke_type; 688 const DexFile* dex_file; 689 uint32_t dex_method_idx; 690 if (called->IsRuntimeMethod()) { 691 uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp)); 692 const DexFile::CodeItem* code; 693 dex_file = caller->GetDexFile(); 694 code = caller->GetCodeItem(); 695 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 696 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 697 Instruction::Code instr_code = instr->Opcode(); 698 bool is_range; 699 switch (instr_code) { 700 case Instruction::INVOKE_DIRECT: 701 invoke_type = kDirect; 702 is_range = false; 703 break; 704 case Instruction::INVOKE_DIRECT_RANGE: 705 invoke_type = kDirect; 706 is_range = true; 707 break; 708 case Instruction::INVOKE_STATIC: 709 invoke_type = kStatic; 710 is_range = false; 711 break; 712 case Instruction::INVOKE_STATIC_RANGE: 713 invoke_type = kStatic; 714 is_range = true; 715 break; 716 case Instruction::INVOKE_SUPER: 717 invoke_type = kSuper; 718 is_range = false; 719 break; 720 case Instruction::INVOKE_SUPER_RANGE: 721 invoke_type = kSuper; 722 is_range = true; 723 break; 724 case Instruction::INVOKE_VIRTUAL: 725 invoke_type = kVirtual; 726 is_range = false; 727 break; 728 case Instruction::INVOKE_VIRTUAL_RANGE: 729 invoke_type = kVirtual; 730 is_range = true; 731 break; 732 case Instruction::INVOKE_INTERFACE: 733 invoke_type = kInterface; 734 is_range = false; 735 break; 736 case Instruction::INVOKE_INTERFACE_RANGE: 737 invoke_type = kInterface; 738 is_range = true; 739 break; 740 default: 741 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); 742 // Avoid used uninitialized warnings. 743 invoke_type = kDirect; 744 is_range = false; 745 } 746 dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 747 } else { 748 invoke_type = kStatic; 749 dex_file = called->GetDexFile(); 750 dex_method_idx = called->GetDexMethodIndex(); 751 } 752 uint32_t shorty_len; 753 const char* shorty = 754 dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len); 755 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 756 visitor.VisitArguments(); 757 self->EndAssertNoThreadSuspension(old_cause); 758 bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 759 // Resolve method filling in dex cache. 760 if (UNLIKELY(called->IsRuntimeMethod())) { 761 StackHandleScope<1> hs(self); 762 mirror::Object* dummy = nullptr; 763 HandleWrapper<mirror::Object> h_receiver( 764 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 765 called = linker->ResolveMethod(self, dex_method_idx, &caller, invoke_type); 766 } 767 const void* code = NULL; 768 if (LIKELY(!self->IsExceptionPending())) { 769 // Incompatible class change should have been handled in resolve method. 770 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 771 << PrettyMethod(called) << " " << invoke_type; 772 if (virtual_or_interface) { 773 // Refine called method based on receiver. 774 CHECK(receiver != nullptr) << invoke_type; 775 776 mirror::ArtMethod* orig_called = called; 777 if (invoke_type == kVirtual) { 778 called = receiver->GetClass()->FindVirtualMethodForVirtual(called); 779 } else { 780 called = receiver->GetClass()->FindVirtualMethodForInterface(called); 781 } 782 783 CHECK(called != nullptr) << PrettyMethod(orig_called) << " " 784 << PrettyTypeOf(receiver) << " " 785 << invoke_type << " " << orig_called->GetVtableIndex(); 786 787 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 788 // of the sharpened method. 789 if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) { 790 caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called); 791 } else { 792 // Calling from one dex file to another, need to compute the method index appropriate to 793 // the caller's dex file. Since we get here only if the original called was a runtime 794 // method, we've got the correct dex_file and a dex_method_idx from above. 795 DCHECK_EQ(caller->GetDexFile(), dex_file); 796 StackHandleScope<1> hs(self); 797 MethodHelper mh(hs.NewHandle(called)); 798 uint32_t method_index = mh.FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx); 799 if (method_index != DexFile::kDexNoIndex) { 800 caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called); 801 } 802 } 803 } 804 // Ensure that the called method's class is initialized. 805 StackHandleScope<1> hs(soa.Self()); 806 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 807 linker->EnsureInitialized(called_class, true, true); 808 if (LIKELY(called_class->IsInitialized())) { 809 code = called->GetEntryPointFromQuickCompiledCode(); 810 } else if (called_class->IsInitializing()) { 811 if (invoke_type == kStatic) { 812 // Class is still initializing, go to oat and grab code (trampoline must be left in place 813 // until class is initialized to stop races between threads). 814 code = linker->GetQuickOatCodeFor(called); 815 } else { 816 // No trampoline for non-static methods. 817 code = called->GetEntryPointFromQuickCompiledCode(); 818 } 819 } else { 820 DCHECK(called_class->IsErroneous()); 821 } 822 } 823 CHECK_EQ(code == NULL, self->IsExceptionPending()); 824 // Fixup any locally saved objects may have moved during a GC. 825 visitor.FixupReferences(); 826 // Place called method in callee-save frame to be placed as first argument to quick method. 827 sp->Assign(called); 828 return code; 829} 830 831/* 832 * This class uses a couple of observations to unite the different calling conventions through 833 * a few constants. 834 * 835 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 836 * possible alignment. 837 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 838 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 839 * when we have to split things 840 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 841 * and we can use Int handling directly. 842 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 843 * necessary when widening. Also, widening of Ints will take place implicitly, and the 844 * extension should be compatible with Aarch64, which mandates copying the available bits 845 * into LSB and leaving the rest unspecified. 846 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 847 * the stack. 848 * 6) There is only little endian. 849 * 850 * 851 * Actual work is supposed to be done in a delegate of the template type. The interface is as 852 * follows: 853 * 854 * void PushGpr(uintptr_t): Add a value for the next GPR 855 * 856 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 857 * padding, that is, think the architecture is 32b and aligns 64b. 858 * 859 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 860 * split this if necessary. The current state will have aligned, if 861 * necessary. 862 * 863 * void PushStack(uintptr_t): Push a value to the stack. 864 * 865 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 866 * as this might be important for null initialization. 867 * Must return the jobject, that is, the reference to the 868 * entry in the HandleScope (nullptr if necessary). 869 * 870 */ 871template<class T> class BuildNativeCallFrameStateMachine { 872 public: 873#if defined(__arm__) 874 // TODO: These are all dummy values! 875 static constexpr bool kNativeSoftFloatAbi = true; 876 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 877 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 878 879 static constexpr size_t kRegistersNeededForLong = 2; 880 static constexpr size_t kRegistersNeededForDouble = 2; 881 static constexpr bool kMultiRegistersAligned = true; 882 static constexpr bool kMultiRegistersWidened = false; 883 static constexpr bool kAlignLongOnStack = true; 884 static constexpr bool kAlignDoubleOnStack = true; 885#elif defined(__aarch64__) 886 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 887 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 888 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 889 890 static constexpr size_t kRegistersNeededForLong = 1; 891 static constexpr size_t kRegistersNeededForDouble = 1; 892 static constexpr bool kMultiRegistersAligned = false; 893 static constexpr bool kMultiRegistersWidened = false; 894 static constexpr bool kAlignLongOnStack = false; 895 static constexpr bool kAlignDoubleOnStack = false; 896#elif defined(__mips__) 897 // TODO: These are all dummy values! 898 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 899 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 900 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 901 902 static constexpr size_t kRegistersNeededForLong = 2; 903 static constexpr size_t kRegistersNeededForDouble = 2; 904 static constexpr bool kMultiRegistersAligned = true; 905 static constexpr bool kMultiRegistersWidened = true; 906 static constexpr bool kAlignLongOnStack = false; 907 static constexpr bool kAlignDoubleOnStack = false; 908#elif defined(__i386__) 909 // TODO: Check these! 910 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 911 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 912 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 913 914 static constexpr size_t kRegistersNeededForLong = 2; 915 static constexpr size_t kRegistersNeededForDouble = 2; 916 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 917 static constexpr bool kMultiRegistersWidened = false; 918 static constexpr bool kAlignLongOnStack = false; 919 static constexpr bool kAlignDoubleOnStack = false; 920#elif defined(__x86_64__) 921 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 922 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 923 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 924 925 static constexpr size_t kRegistersNeededForLong = 1; 926 static constexpr size_t kRegistersNeededForDouble = 1; 927 static constexpr bool kMultiRegistersAligned = false; 928 static constexpr bool kMultiRegistersWidened = false; 929 static constexpr bool kAlignLongOnStack = false; 930 static constexpr bool kAlignDoubleOnStack = false; 931#else 932#error "Unsupported architecture" 933#endif 934 935 public: 936 explicit BuildNativeCallFrameStateMachine(T* delegate) 937 : gpr_index_(kNumNativeGprArgs), 938 fpr_index_(kNumNativeFprArgs), 939 stack_entries_(0), 940 delegate_(delegate) { 941 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 942 // the next register is even; counting down is just to make the compiler happy... 943 CHECK_EQ(kNumNativeGprArgs % 2, 0U); 944 CHECK_EQ(kNumNativeFprArgs % 2, 0U); 945 } 946 947 virtual ~BuildNativeCallFrameStateMachine() {} 948 949 bool HavePointerGpr() { 950 return gpr_index_ > 0; 951 } 952 953 void AdvancePointer(const void* val) { 954 if (HavePointerGpr()) { 955 gpr_index_--; 956 PushGpr(reinterpret_cast<uintptr_t>(val)); 957 } else { 958 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 959 PushStack(reinterpret_cast<uintptr_t>(val)); 960 gpr_index_ = 0; 961 } 962 } 963 964 bool HaveHandleScopeGpr() { 965 return gpr_index_ > 0; 966 } 967 968 void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 969 uintptr_t handle = PushHandle(ptr); 970 if (HaveHandleScopeGpr()) { 971 gpr_index_--; 972 PushGpr(handle); 973 } else { 974 stack_entries_++; 975 PushStack(handle); 976 gpr_index_ = 0; 977 } 978 } 979 980 bool HaveIntGpr() { 981 return gpr_index_ > 0; 982 } 983 984 void AdvanceInt(uint32_t val) { 985 if (HaveIntGpr()) { 986 gpr_index_--; 987 PushGpr(val); 988 } else { 989 stack_entries_++; 990 PushStack(val); 991 gpr_index_ = 0; 992 } 993 } 994 995 bool HaveLongGpr() { 996 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 997 } 998 999 bool LongGprNeedsPadding() { 1000 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1001 kAlignLongOnStack && // and when it needs alignment 1002 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1003 } 1004 1005 bool LongStackNeedsPadding() { 1006 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1007 kAlignLongOnStack && // and when it needs 8B alignment 1008 (stack_entries_ & 1) == 1; // counter is odd 1009 } 1010 1011 void AdvanceLong(uint64_t val) { 1012 if (HaveLongGpr()) { 1013 if (LongGprNeedsPadding()) { 1014 PushGpr(0); 1015 gpr_index_--; 1016 } 1017 if (kRegistersNeededForLong == 1) { 1018 PushGpr(static_cast<uintptr_t>(val)); 1019 } else { 1020 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1021 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1022 } 1023 gpr_index_ -= kRegistersNeededForLong; 1024 } else { 1025 if (LongStackNeedsPadding()) { 1026 PushStack(0); 1027 stack_entries_++; 1028 } 1029 if (kRegistersNeededForLong == 1) { 1030 PushStack(static_cast<uintptr_t>(val)); 1031 stack_entries_++; 1032 } else { 1033 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1034 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1035 stack_entries_ += 2; 1036 } 1037 gpr_index_ = 0; 1038 } 1039 } 1040 1041 bool HaveFloatFpr() { 1042 return fpr_index_ > 0; 1043 } 1044 1045 void AdvanceFloat(float val) { 1046 if (kNativeSoftFloatAbi) { 1047 AdvanceInt(bit_cast<float, uint32_t>(val)); 1048 } else { 1049 if (HaveFloatFpr()) { 1050 fpr_index_--; 1051 if (kRegistersNeededForDouble == 1) { 1052 if (kMultiRegistersWidened) { 1053 PushFpr8(bit_cast<double, uint64_t>(val)); 1054 } else { 1055 // No widening, just use the bits. 1056 PushFpr8(bit_cast<float, uint64_t>(val)); 1057 } 1058 } else { 1059 PushFpr4(val); 1060 } 1061 } else { 1062 stack_entries_++; 1063 if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) { 1064 // Need to widen before storing: Note the "double" in the template instantiation. 1065 // Note: We need to jump through those hoops to make the compiler happy. 1066 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1067 PushStack(static_cast<uintptr_t>(bit_cast<double, uint64_t>(val))); 1068 } else { 1069 PushStack(bit_cast<float, uintptr_t>(val)); 1070 } 1071 fpr_index_ = 0; 1072 } 1073 } 1074 } 1075 1076 bool HaveDoubleFpr() { 1077 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1078 } 1079 1080 bool DoubleFprNeedsPadding() { 1081 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1082 kAlignDoubleOnStack && // and when it needs alignment 1083 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1084 } 1085 1086 bool DoubleStackNeedsPadding() { 1087 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1088 kAlignDoubleOnStack && // and when it needs 8B alignment 1089 (stack_entries_ & 1) == 1; // counter is odd 1090 } 1091 1092 void AdvanceDouble(uint64_t val) { 1093 if (kNativeSoftFloatAbi) { 1094 AdvanceLong(val); 1095 } else { 1096 if (HaveDoubleFpr()) { 1097 if (DoubleFprNeedsPadding()) { 1098 PushFpr4(0); 1099 fpr_index_--; 1100 } 1101 PushFpr8(val); 1102 fpr_index_ -= kRegistersNeededForDouble; 1103 } else { 1104 if (DoubleStackNeedsPadding()) { 1105 PushStack(0); 1106 stack_entries_++; 1107 } 1108 if (kRegistersNeededForDouble == 1) { 1109 PushStack(static_cast<uintptr_t>(val)); 1110 stack_entries_++; 1111 } else { 1112 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1113 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1114 stack_entries_ += 2; 1115 } 1116 fpr_index_ = 0; 1117 } 1118 } 1119 } 1120 1121 uint32_t getStackEntries() { 1122 return stack_entries_; 1123 } 1124 1125 uint32_t getNumberOfUsedGprs() { 1126 return kNumNativeGprArgs - gpr_index_; 1127 } 1128 1129 uint32_t getNumberOfUsedFprs() { 1130 return kNumNativeFprArgs - fpr_index_; 1131 } 1132 1133 private: 1134 void PushGpr(uintptr_t val) { 1135 delegate_->PushGpr(val); 1136 } 1137 void PushFpr4(float val) { 1138 delegate_->PushFpr4(val); 1139 } 1140 void PushFpr8(uint64_t val) { 1141 delegate_->PushFpr8(val); 1142 } 1143 void PushStack(uintptr_t val) { 1144 delegate_->PushStack(val); 1145 } 1146 uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1147 return delegate_->PushHandle(ref); 1148 } 1149 1150 uint32_t gpr_index_; // Number of free GPRs 1151 uint32_t fpr_index_; // Number of free FPRs 1152 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1153 // extended 1154 T* delegate_; // What Push implementation gets called 1155}; 1156 1157// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1158// in subclasses. 1159// 1160// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1161// them with handles. 1162class ComputeNativeCallFrameSize { 1163 public: 1164 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1165 1166 virtual ~ComputeNativeCallFrameSize() {} 1167 1168 uint32_t GetStackSize() { 1169 return num_stack_entries_ * sizeof(uintptr_t); 1170 } 1171 1172 uint8_t* LayoutCallStack(uint8_t* sp8) { 1173 sp8 -= GetStackSize(); 1174 // Align by kStackAlignment. 1175 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1176 return sp8; 1177 } 1178 1179 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) { 1180 // Assumption is OK right now, as we have soft-float arm 1181 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1182 sp8 -= fregs * sizeof(uintptr_t); 1183 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1184 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1185 sp8 -= iregs * sizeof(uintptr_t); 1186 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1187 return sp8; 1188 } 1189 1190 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1191 uint32_t** start_fpr) { 1192 // Native call stack. 1193 sp8 = LayoutCallStack(sp8); 1194 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1195 1196 // Put fprs and gprs below. 1197 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1198 1199 // Return the new bottom. 1200 return sp8; 1201 } 1202 1203 virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) 1204 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {} 1205 1206 void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1207 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1208 1209 WalkHeader(&sm); 1210 1211 for (uint32_t i = 1; i < shorty_len; ++i) { 1212 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1213 switch (cur_type_) { 1214 case Primitive::kPrimNot: 1215 sm.AdvanceHandleScope( 1216 reinterpret_cast<mirror::Object*>(0x12345678)); 1217 break; 1218 1219 case Primitive::kPrimBoolean: 1220 case Primitive::kPrimByte: 1221 case Primitive::kPrimChar: 1222 case Primitive::kPrimShort: 1223 case Primitive::kPrimInt: 1224 sm.AdvanceInt(0); 1225 break; 1226 case Primitive::kPrimFloat: 1227 sm.AdvanceFloat(0); 1228 break; 1229 case Primitive::kPrimDouble: 1230 sm.AdvanceDouble(0); 1231 break; 1232 case Primitive::kPrimLong: 1233 sm.AdvanceLong(0); 1234 break; 1235 default: 1236 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1237 } 1238 } 1239 1240 num_stack_entries_ = sm.getStackEntries(); 1241 } 1242 1243 void PushGpr(uintptr_t /* val */) { 1244 // not optimizing registers, yet 1245 } 1246 1247 void PushFpr4(float /* val */) { 1248 // not optimizing registers, yet 1249 } 1250 1251 void PushFpr8(uint64_t /* val */) { 1252 // not optimizing registers, yet 1253 } 1254 1255 void PushStack(uintptr_t /* val */) { 1256 // counting is already done in the superclass 1257 } 1258 1259 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1260 return reinterpret_cast<uintptr_t>(nullptr); 1261 } 1262 1263 protected: 1264 uint32_t num_stack_entries_; 1265}; 1266 1267class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1268 public: 1269 ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {} 1270 1271 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1272 // is at *m = sp. Will update to point to the bottom of the save frame. 1273 // 1274 // Note: assumes ComputeAll() has been run before. 1275 void LayoutCalleeSaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table, 1276 uint32_t* handle_scope_entries) 1277 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1278 mirror::ArtMethod* method = (*m)->AsMirrorPtr(); 1279 1280 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1281 1282 // First, fix up the layout of the callee-save frame. 1283 // We have to squeeze in the HandleScope, and relocate the method pointer. 1284 1285 // "Free" the slot for the method. 1286 sp8 += kPointerSize; // In the callee-save frame we use a full pointer. 1287 1288 // Under the callee saves put handle scope and new method stack reference. 1289 *handle_scope_entries = num_handle_scope_references_; 1290 1291 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1292 size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>); 1293 1294 sp8 -= scope_and_method; 1295 // Align by kStackAlignment. 1296 sp8 = reinterpret_cast<uint8_t*>(RoundDown( 1297 reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1298 1299 uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>); 1300 *table = reinterpret_cast<HandleScope*>(sp8_table); 1301 (*table)->SetNumberOfReferences(num_handle_scope_references_); 1302 1303 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1304 uint8_t* method_pointer = sp8; 1305 StackReference<mirror::ArtMethod>* new_method_ref = 1306 reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer); 1307 new_method_ref->Assign(method); 1308 *m = new_method_ref; 1309 } 1310 1311 // Adds space for the cookie. Note: may leave stack unaligned. 1312 void LayoutCookie(uint8_t** sp) { 1313 // Reference cookie and padding 1314 *sp -= 8; 1315 } 1316 1317 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1318 // Returns the new bottom. Note: this may be unaligned. 1319 uint8_t* LayoutJNISaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table, 1320 uint32_t* handle_scope_entries) 1321 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1322 // First, fix up the layout of the callee-save frame. 1323 // We have to squeeze in the HandleScope, and relocate the method pointer. 1324 LayoutCalleeSaveFrame(m, sp, table, handle_scope_entries); 1325 1326 // The bottom of the callee-save frame is now where the method is, *m. 1327 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1328 1329 // Add space for cookie. 1330 LayoutCookie(&sp8); 1331 1332 return sp8; 1333 } 1334 1335 // WARNING: After this, *sp won't be pointing to the method anymore! 1336 uint8_t* ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty, 1337 uint32_t shorty_len, HandleScope** table, uint32_t* handle_scope_entries, 1338 uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr) 1339 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1340 Walk(shorty, shorty_len); 1341 1342 // JNI part. 1343 uint8_t* sp8 = LayoutJNISaveFrame(m, reinterpret_cast<void*>(*m), table, handle_scope_entries); 1344 1345 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1346 1347 // Return the new bottom. 1348 return sp8; 1349 } 1350 1351 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1352 1353 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1354 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1355 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1356 1357 private: 1358 uint32_t num_handle_scope_references_; 1359}; 1360 1361uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1362 num_handle_scope_references_++; 1363 return reinterpret_cast<uintptr_t>(nullptr); 1364} 1365 1366void ComputeGenericJniFrameSize::WalkHeader( 1367 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1368 // JNIEnv 1369 sm->AdvancePointer(nullptr); 1370 1371 // Class object or this as first argument 1372 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1373} 1374 1375// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1376// the template requirements of BuildGenericJniFrameStateMachine. 1377class FillNativeCall { 1378 public: 1379 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1380 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1381 1382 virtual ~FillNativeCall() {} 1383 1384 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1385 cur_gpr_reg_ = gpr_regs; 1386 cur_fpr_reg_ = fpr_regs; 1387 cur_stack_arg_ = stack_args; 1388 } 1389 1390 void PushGpr(uintptr_t val) { 1391 *cur_gpr_reg_ = val; 1392 cur_gpr_reg_++; 1393 } 1394 1395 void PushFpr4(float val) { 1396 *cur_fpr_reg_ = val; 1397 cur_fpr_reg_++; 1398 } 1399 1400 void PushFpr8(uint64_t val) { 1401 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1402 *tmp = val; 1403 cur_fpr_reg_ += 2; 1404 } 1405 1406 void PushStack(uintptr_t val) { 1407 *cur_stack_arg_ = val; 1408 cur_stack_arg_++; 1409 } 1410 1411 virtual uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1412 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1413 return 0U; 1414 } 1415 1416 private: 1417 uintptr_t* cur_gpr_reg_; 1418 uint32_t* cur_fpr_reg_; 1419 uintptr_t* cur_stack_arg_; 1420}; 1421 1422// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1423// of transitioning into native code. 1424class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1425 public: 1426 BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static, 1427 const char* shorty, uint32_t shorty_len, Thread* self) 1428 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1429 jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) { 1430 ComputeGenericJniFrameSize fsc; 1431 uintptr_t* start_gpr_reg; 1432 uint32_t* start_fpr_reg; 1433 uintptr_t* start_stack_arg; 1434 uint32_t handle_scope_entries; 1435 bottom_of_used_area_ = fsc.ComputeLayout(sp, is_static, shorty, shorty_len, &handle_scope_, 1436 &handle_scope_entries, &start_stack_arg, 1437 &start_gpr_reg, &start_fpr_reg); 1438 1439 handle_scope_->SetNumberOfReferences(handle_scope_entries); 1440 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1441 1442 // jni environment is always first argument 1443 sm_.AdvancePointer(self->GetJniEnv()); 1444 1445 if (is_static) { 1446 sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass()); 1447 } 1448 } 1449 1450 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 1451 1452 void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1453 1454 StackReference<mirror::Object>* GetFirstHandleScopeEntry() 1455 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1456 return handle_scope_->GetHandle(0).GetReference(); 1457 } 1458 1459 jobject GetFirstHandleScopeJObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1460 return handle_scope_->GetHandle(0).ToJObject(); 1461 } 1462 1463 void* GetBottomOfUsedArea() { 1464 return bottom_of_used_area_; 1465 } 1466 1467 private: 1468 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 1469 class FillJniCall FINAL : public FillNativeCall { 1470 public: 1471 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 1472 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args), 1473 handle_scope_(handle_scope), cur_entry_(0) {} 1474 1475 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1476 1477 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 1478 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 1479 handle_scope_ = scope; 1480 cur_entry_ = 0U; 1481 } 1482 1483 void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1484 // Initialize padding entries. 1485 size_t expected_slots = handle_scope_->NumberOfReferences(); 1486 while (cur_entry_ < expected_slots) { 1487 handle_scope_->GetHandle(cur_entry_++).Assign(nullptr); 1488 } 1489 DCHECK_NE(cur_entry_, 0U); 1490 } 1491 1492 private: 1493 HandleScope* handle_scope_; 1494 size_t cur_entry_; 1495 }; 1496 1497 HandleScope* handle_scope_; 1498 FillJniCall jni_call_; 1499 void* bottom_of_used_area_; 1500 1501 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 1502 1503 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1504}; 1505 1506uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 1507 uintptr_t tmp; 1508 Handle<mirror::Object> h = handle_scope_->GetHandle(cur_entry_); 1509 h.Assign(ref); 1510 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 1511 cur_entry_++; 1512 return tmp; 1513} 1514 1515void BuildGenericJniFrameVisitor::Visit() { 1516 Primitive::Type type = GetParamPrimitiveType(); 1517 switch (type) { 1518 case Primitive::kPrimLong: { 1519 jlong long_arg; 1520 if (IsSplitLongOrDouble()) { 1521 long_arg = ReadSplitLongParam(); 1522 } else { 1523 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1524 } 1525 sm_.AdvanceLong(long_arg); 1526 break; 1527 } 1528 case Primitive::kPrimDouble: { 1529 uint64_t double_arg; 1530 if (IsSplitLongOrDouble()) { 1531 // Read into union so that we don't case to a double. 1532 double_arg = ReadSplitLongParam(); 1533 } else { 1534 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1535 } 1536 sm_.AdvanceDouble(double_arg); 1537 break; 1538 } 1539 case Primitive::kPrimNot: { 1540 StackReference<mirror::Object>* stack_ref = 1541 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1542 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 1543 break; 1544 } 1545 case Primitive::kPrimFloat: 1546 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1547 break; 1548 case Primitive::kPrimBoolean: // Fall-through. 1549 case Primitive::kPrimByte: // Fall-through. 1550 case Primitive::kPrimChar: // Fall-through. 1551 case Primitive::kPrimShort: // Fall-through. 1552 case Primitive::kPrimInt: // Fall-through. 1553 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1554 break; 1555 case Primitive::kPrimVoid: 1556 LOG(FATAL) << "UNREACHABLE"; 1557 break; 1558 } 1559} 1560 1561void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 1562 // Clear out rest of the scope. 1563 jni_call_.ResetRemainingScopeSlots(); 1564 // Install HandleScope. 1565 self->PushHandleScope(handle_scope_); 1566} 1567 1568extern "C" void* artFindNativeMethod(); 1569 1570uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { 1571 if (lock != nullptr) { 1572 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1573 } else { 1574 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1575 } 1576} 1577 1578void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { 1579 if (lock != nullptr) { 1580 JniMethodEndSynchronized(cookie, lock, self); 1581 } else { 1582 JniMethodEnd(cookie, self); 1583 } 1584} 1585 1586/* 1587 * Initializes an alloca region assumed to be directly below sp for a native call: 1588 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 1589 * The final element on the stack is a pointer to the native code. 1590 * 1591 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1592 * We need to fix this, as the handle scope needs to go into the callee-save frame. 1593 * 1594 * The return of this function denotes: 1595 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1596 * 2) An error, if the value is negative. 1597 */ 1598extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, 1599 StackReference<mirror::ArtMethod>* sp) 1600 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1601 mirror::ArtMethod* called = sp->AsMirrorPtr(); 1602 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1603 uint32_t shorty_len = 0; 1604 const char* shorty = called->GetShorty(&shorty_len); 1605 1606 // Run the visitor. 1607 BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), shorty, shorty_len, self); 1608 visitor.VisitArguments(); 1609 visitor.FinalizeHandleScope(self); 1610 1611 // Fix up managed-stack things in Thread. 1612 self->SetTopOfStack(sp, 0); 1613 1614 self->VerifyStack(); 1615 1616 // Start JNI, save the cookie. 1617 uint32_t cookie; 1618 if (called->IsSynchronized()) { 1619 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 1620 if (self->IsExceptionPending()) { 1621 self->PopHandleScope(); 1622 // A negative value denotes an error. 1623 return GetTwoWordFailureValue(); 1624 } 1625 } else { 1626 cookie = JniMethodStart(self); 1627 } 1628 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1629 *(sp32 - 1) = cookie; 1630 1631 // Retrieve the stored native code. 1632 const void* nativeCode = called->GetNativeMethod(); 1633 1634 // There are two cases for the content of nativeCode: 1635 // 1) Pointer to the native function. 1636 // 2) Pointer to the trampoline for native code binding. 1637 // In the second case, we need to execute the binding and continue with the actual native function 1638 // pointer. 1639 DCHECK(nativeCode != nullptr); 1640 if (nativeCode == GetJniDlsymLookupStub()) { 1641 nativeCode = artFindNativeMethod(); 1642 1643 if (nativeCode == nullptr) { 1644 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 1645 1646 // End JNI, as the assembly will move to deliver the exception. 1647 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 1648 if (shorty[0] == 'L') { 1649 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 1650 } else { 1651 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1652 } 1653 1654 return GetTwoWordFailureValue(); 1655 } 1656 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 1657 } 1658 1659 // Return native code addr(lo) and bottom of alloca address(hi). 1660 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 1661 reinterpret_cast<uintptr_t>(nativeCode)); 1662} 1663 1664/* 1665 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 1666 * unlocking. 1667 */ 1668extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f) 1669 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1670 StackReference<mirror::ArtMethod>* sp = self->GetManagedStack()->GetTopQuickFrame(); 1671 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1672 mirror::ArtMethod* called = sp->AsMirrorPtr(); 1673 uint32_t cookie = *(sp32 - 1); 1674 1675 jobject lock = nullptr; 1676 if (called->IsSynchronized()) { 1677 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) 1678 + sizeof(StackReference<mirror::ArtMethod>)); 1679 lock = table->GetHandle(0).ToJObject(); 1680 } 1681 1682 char return_shorty_char = called->GetShorty()[0]; 1683 1684 if (return_shorty_char == 'L') { 1685 return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock); 1686 } else { 1687 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1688 1689 switch (return_shorty_char) { 1690 case 'F': // Fall-through. 1691 case 'D': 1692 return result_f; 1693 case 'Z': 1694 return result.z; 1695 case 'B': 1696 return result.b; 1697 case 'C': 1698 return result.c; 1699 case 'S': 1700 return result.s; 1701 case 'I': 1702 return result.i; 1703 case 'J': 1704 return result.j; 1705 case 'V': 1706 return 0; 1707 default: 1708 LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char; 1709 return 0; 1710 } 1711 } 1712} 1713 1714// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 1715// for the method pointer. 1716// 1717// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 1718// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations). 1719 1720template<InvokeType type, bool access_check> 1721static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1722 mirror::ArtMethod* caller_method, 1723 Thread* self, StackReference<mirror::ArtMethod>* sp); 1724 1725template<InvokeType type, bool access_check> 1726static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1727 mirror::ArtMethod* caller_method, 1728 Thread* self, StackReference<mirror::ArtMethod>* sp) { 1729 mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, 1730 type); 1731 if (UNLIKELY(method == nullptr)) { 1732 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1733 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1734 uint32_t shorty_len; 1735 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 1736 { 1737 // Remember the args in case a GC happens in FindMethodFromCode. 1738 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1739 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 1740 visitor.VisitArguments(); 1741 method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method, 1742 self); 1743 visitor.FixupReferences(); 1744 } 1745 1746 if (UNLIKELY(method == NULL)) { 1747 CHECK(self->IsExceptionPending()); 1748 return GetTwoWordFailureValue(); // Failure. 1749 } 1750 } 1751 DCHECK(!self->IsExceptionPending()); 1752 const void* code = method->GetEntryPointFromQuickCompiledCode(); 1753 1754 // When we return, the caller will branch to this address, so it had better not be 0! 1755 DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) 1756 << " location: " 1757 << method->GetDexFile()->GetLocation(); 1758 1759 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 1760 reinterpret_cast<uintptr_t>(method)); 1761} 1762 1763// Explicit artInvokeCommon template function declarations to please analysis tool. 1764#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 1765 template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ 1766 TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx, \ 1767 mirror::Object* this_object, \ 1768 mirror::ArtMethod* caller_method, \ 1769 Thread* self, \ 1770 StackReference<mirror::ArtMethod>* sp) \ 1771 1772EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 1773EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 1774EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 1775EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 1776EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 1777EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 1778EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 1779EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 1780EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 1781EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 1782#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 1783 1784// See comments in runtime_support_asm.S 1785extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 1786 uint32_t method_idx, mirror::Object* this_object, 1787 mirror::ArtMethod* caller_method, Thread* self, 1788 StackReference<mirror::ArtMethod>* sp) 1789 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1790 return artInvokeCommon<kInterface, true>(method_idx, this_object, 1791 caller_method, self, sp); 1792} 1793 1794extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 1795 uint32_t method_idx, mirror::Object* this_object, 1796 mirror::ArtMethod* caller_method, Thread* self, 1797 StackReference<mirror::ArtMethod>* sp) 1798 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1799 return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, 1800 self, sp); 1801} 1802 1803extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 1804 uint32_t method_idx, mirror::Object* this_object, 1805 mirror::ArtMethod* caller_method, Thread* self, 1806 StackReference<mirror::ArtMethod>* sp) 1807 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1808 return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, 1809 self, sp); 1810} 1811 1812extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 1813 uint32_t method_idx, mirror::Object* this_object, 1814 mirror::ArtMethod* caller_method, Thread* self, 1815 StackReference<mirror::ArtMethod>* sp) 1816 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1817 return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, 1818 self, sp); 1819} 1820 1821extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 1822 uint32_t method_idx, mirror::Object* this_object, 1823 mirror::ArtMethod* caller_method, Thread* self, 1824 StackReference<mirror::ArtMethod>* sp) 1825 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1826 return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, 1827 self, sp); 1828} 1829 1830// Determine target of interface dispatch. This object is known non-null. 1831extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method, 1832 mirror::Object* this_object, 1833 mirror::ArtMethod* caller_method, 1834 Thread* self, 1835 StackReference<mirror::ArtMethod>* sp) 1836 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1837 mirror::ArtMethod* method; 1838 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 1839 method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method); 1840 if (UNLIKELY(method == NULL)) { 1841 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1842 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object, 1843 caller_method); 1844 return GetTwoWordFailureValue(); // Failure. 1845 } 1846 } else { 1847 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1848 DCHECK(interface_method == Runtime::Current()->GetResolutionMethod()); 1849 1850 // Find the caller PC. 1851 constexpr size_t pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs); 1852 uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + pc_offset); 1853 1854 // Map the caller PC to a dex PC. 1855 uint32_t dex_pc = caller_method->ToDexPc(caller_pc); 1856 const DexFile::CodeItem* code = caller_method->GetCodeItem(); 1857 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 1858 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 1859 Instruction::Code instr_code = instr->Opcode(); 1860 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 1861 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 1862 << "Unexpected call into interface trampoline: " << instr->DumpString(NULL); 1863 uint32_t dex_method_idx; 1864 if (instr_code == Instruction::INVOKE_INTERFACE) { 1865 dex_method_idx = instr->VRegB_35c(); 1866 } else { 1867 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 1868 dex_method_idx = instr->VRegB_3rc(); 1869 } 1870 1871 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache() 1872 ->GetDexFile(); 1873 uint32_t shorty_len; 1874 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), 1875 &shorty_len); 1876 { 1877 // Remember the args in case a GC happens in FindMethodFromCode. 1878 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1879 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 1880 visitor.VisitArguments(); 1881 method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method, 1882 self); 1883 visitor.FixupReferences(); 1884 } 1885 1886 if (UNLIKELY(method == nullptr)) { 1887 CHECK(self->IsExceptionPending()); 1888 return GetTwoWordFailureValue(); // Failure. 1889 } 1890 } 1891 const void* code = method->GetEntryPointFromQuickCompiledCode(); 1892 1893 // When we return, the caller will branch to this address, so it had better not be 0! 1894 DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) 1895 << " location: " << method->GetDexFile()->GetLocation(); 1896 1897 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 1898 reinterpret_cast<uintptr_t>(method)); 1899} 1900 1901} // namespace art 1902