quick_trampoline_entrypoints.cc revision f486778d50fc8afa61330df495e94f4f3ec0e238
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "callee_save_frame.h" 18#include "common_throws.h" 19#include "dex_file-inl.h" 20#include "dex_instruction-inl.h" 21#include "entrypoints/entrypoint_utils.h" 22#include "gc/accounting/card_table-inl.h" 23#include "interpreter/interpreter.h" 24#include "mirror/art_method-inl.h" 25#include "mirror/class-inl.h" 26#include "mirror/dex_cache-inl.h" 27#include "mirror/object-inl.h" 28#include "mirror/object_array-inl.h" 29#include "object_utils.h" 30#include "runtime.h" 31#include "scoped_thread_state_change.h" 32 33namespace art { 34 35// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 36class QuickArgumentVisitor { 37 // Number of bytes for each out register in the caller method's frame. 38 static constexpr size_t kBytesStackArgLocation = 4; 39#if defined(__arm__) 40 // The callee save frame is pointed to by SP. 41 // | argN | | 42 // | ... | | 43 // | arg4 | | 44 // | arg3 spill | | Caller's frame 45 // | arg2 spill | | 46 // | arg1 spill | | 47 // | Method* | --- 48 // | LR | 49 // | ... | callee saves 50 // | R3 | arg3 51 // | R2 | arg2 52 // | R1 | arg1 53 // | R0 | padding 54 // | Method* | <- sp 55 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 56 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 57 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 58 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 59 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8; // Offset of first GPR arg. 60 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44; // Offset of return address. 61 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 48; // Frame size. 62 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 63 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 64 } 65#elif defined(__aarch64__) 66 // The callee save frame is pointed to by SP. 67 // | argN | | 68 // | ... | | 69 // | arg4 | | 70 // | arg3 spill | | Caller's frame 71 // | arg2 spill | | 72 // | arg1 spill | | 73 // | Method* | --- 74 // | LR | 75 // | X28 | 76 // | : | 77 // | X19 | 78 // | X7 | 79 // | : | 80 // | X1 | 81 // | D15 | 82 // | : | 83 // | D0 | 84 // | | padding 85 // | Method* | <- sp 86 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 87 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 88 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 89 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =16; // Offset of first FPR arg. 90 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144; // Offset of first GPR arg. 91 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296; // Offset of return address. 92 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 304; // Frame size. 93 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 94 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 95 } 96#elif defined(__mips__) 97 // The callee save frame is pointed to by SP. 98 // | argN | | 99 // | ... | | 100 // | arg4 | | 101 // | arg3 spill | | Caller's frame 102 // | arg2 spill | | 103 // | arg1 spill | | 104 // | Method* | --- 105 // | RA | 106 // | ... | callee saves 107 // | A3 | arg3 108 // | A2 | arg2 109 // | A1 | arg1 110 // | A0/Method* | <- sp 111 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 112 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 113 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 114 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 115 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 116 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60; // Offset of return address. 117 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 64; // Frame size. 118 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 119 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 120 } 121#elif defined(__i386__) 122 // The callee save frame is pointed to by SP. 123 // | argN | | 124 // | ... | | 125 // | arg4 | | 126 // | arg3 spill | | Caller's frame 127 // | arg2 spill | | 128 // | arg1 spill | | 129 // | Method* | --- 130 // | Return | 131 // | EBP,ESI,EDI | callee saves 132 // | EBX | arg3 133 // | EDX | arg2 134 // | ECX | arg1 135 // | EAX/Method* | <- sp 136 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 137 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 138 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 139 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 140 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 141 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28; // Offset of return address. 142 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 32; // Frame size. 143 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 144 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 145 } 146#elif defined(__x86_64__) 147 // The callee save frame is pointed to by SP. 148 // | argN | | 149 // | ... | | 150 // | reg. arg spills | | Caller's frame 151 // | Method* | --- 152 // | Return | 153 // | R15 | callee save 154 // | R14 | callee save 155 // | R13 | callee save 156 // | R12 | callee save 157 // | R9 | arg5 158 // | R8 | arg4 159 // | RSI/R6 | arg1 160 // | RBP/R5 | callee save 161 // | RBX/R3 | callee save 162 // | RDX/R2 | arg2 163 // | RCX/R1 | arg3 164 // | XMM7 | float arg 8 165 // | XMM6 | float arg 7 166 // | XMM5 | float arg 6 167 // | XMM4 | float arg 5 168 // | XMM3 | float arg 4 169 // | XMM2 | float arg 3 170 // | XMM1 | float arg 2 171 // | XMM0 | float arg 1 172 // | Padding | 173 // | RDI/Method* | <- sp 174 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 175 static constexpr size_t kNumQuickGprArgs = 5; // 3 arguments passed in GPRs. 176 static constexpr size_t kNumQuickFprArgs = 8; // 0 arguments passed in FPRs. 177 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 178 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg. 179 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168; // Offset of return address. 180 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 176; // Frame size. 181 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 182 switch (gpr_index) { 183 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 184 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 185 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 186 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 187 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 188 default: 189 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 190 return 0; 191 } 192 } 193#else 194#error "Unsupported architecture" 195#endif 196 197 public: 198 static mirror::ArtMethod* GetCallingMethod(mirror::ArtMethod** sp) 199 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 200 DCHECK((*sp)->IsCalleeSaveMethod()); 201 byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 202 return *reinterpret_cast<mirror::ArtMethod**>(previous_sp); 203 } 204 205 // For the given quick ref and args quick frame, return the caller's PC. 206 static uintptr_t GetCallingPc(mirror::ArtMethod** sp) 207 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 208 DCHECK((*sp)->IsCalleeSaveMethod()); 209 byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 210 return *reinterpret_cast<uintptr_t*>(lr); 211 } 212 213 QuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, 214 const char* shorty, uint32_t shorty_len) 215 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : 216 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 217 gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 218 fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 219 stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 220 + StackArgumentStartFromShorty(is_static, shorty, shorty_len)), 221 gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid), 222 is_split_long_or_double_(false) { 223 DCHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, 224 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); 225 } 226 227 virtual ~QuickArgumentVisitor() {} 228 229 virtual void Visit() = 0; 230 231 Primitive::Type GetParamPrimitiveType() const { 232 return cur_type_; 233 } 234 235 byte* GetParamAddress() const { 236 if (!kQuickSoftFloatAbi) { 237 Primitive::Type type = GetParamPrimitiveType(); 238 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 239 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 240 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 241 } 242 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 243 } 244 } 245 if (gpr_index_ < kNumQuickGprArgs) { 246 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 247 } 248 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 249 } 250 251 bool IsSplitLongOrDouble() const { 252 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 253 return is_split_long_or_double_; 254 } else { 255 return false; // An optimization for when GPR and FPRs are 64bit. 256 } 257 } 258 259 bool IsParamAReference() const { 260 return GetParamPrimitiveType() == Primitive::kPrimNot; 261 } 262 263 bool IsParamALongOrDouble() const { 264 Primitive::Type type = GetParamPrimitiveType(); 265 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 266 } 267 268 uint64_t ReadSplitLongParam() const { 269 DCHECK(IsSplitLongOrDouble()); 270 uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress()); 271 uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_); 272 return (low_half & 0xffffffffULL) | (high_half << 32); 273 } 274 275 void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 276 // This implementation doesn't support reg-spill area for hard float 277 // ABI targets such as x86_64 and aarch64. So, for those targets whose 278 // 'kQuickSoftFloatAbi' is 'false': 279 // (a) 'stack_args_' should point to the first method's argument 280 // (b) whatever the argument type it is, the 'stack_index_' should 281 // be moved forward along with every visiting. 282 gpr_index_ = 0; 283 fpr_index_ = 0; 284 stack_index_ = 0; 285 if (!is_static_) { // Handle this. 286 cur_type_ = Primitive::kPrimNot; 287 is_split_long_or_double_ = false; 288 Visit(); 289 if (!kQuickSoftFloatAbi || kNumQuickGprArgs == 0) { 290 stack_index_++; 291 } 292 if (kNumQuickGprArgs > 0) { 293 gpr_index_++; 294 } 295 } 296 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 297 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 298 switch (cur_type_) { 299 case Primitive::kPrimNot: 300 case Primitive::kPrimBoolean: 301 case Primitive::kPrimByte: 302 case Primitive::kPrimChar: 303 case Primitive::kPrimShort: 304 case Primitive::kPrimInt: 305 is_split_long_or_double_ = false; 306 Visit(); 307 if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) { 308 stack_index_++; 309 } 310 if (gpr_index_ < kNumQuickGprArgs) { 311 gpr_index_++; 312 } 313 break; 314 case Primitive::kPrimFloat: 315 is_split_long_or_double_ = false; 316 Visit(); 317 if (kQuickSoftFloatAbi) { 318 if (gpr_index_ < kNumQuickGprArgs) { 319 gpr_index_++; 320 } else { 321 stack_index_++; 322 } 323 } else { 324 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 325 fpr_index_++; 326 } 327 stack_index_++; 328 } 329 break; 330 case Primitive::kPrimDouble: 331 case Primitive::kPrimLong: 332 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 333 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 334 ((gpr_index_ + 1) == kNumQuickGprArgs); 335 Visit(); 336 if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) { 337 if (kBytesStackArgLocation == 4) { 338 stack_index_+= 2; 339 } else { 340 CHECK_EQ(kBytesStackArgLocation, 8U); 341 stack_index_++; 342 } 343 } 344 if (gpr_index_ < kNumQuickGprArgs) { 345 gpr_index_++; 346 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 347 if (gpr_index_ < kNumQuickGprArgs) { 348 gpr_index_++; 349 } else if (kQuickSoftFloatAbi) { 350 stack_index_++; 351 } 352 } 353 } 354 } else { 355 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 356 ((fpr_index_ + 1) == kNumQuickFprArgs); 357 Visit(); 358 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 359 fpr_index_++; 360 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 361 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 362 fpr_index_++; 363 } 364 } 365 } 366 if (kBytesStackArgLocation == 4) { 367 stack_index_+= 2; 368 } else { 369 CHECK_EQ(kBytesStackArgLocation, 8U); 370 stack_index_++; 371 } 372 } 373 break; 374 default: 375 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 376 } 377 } 378 } 379 380 private: 381 static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty, 382 uint32_t shorty_len) { 383 if (kQuickSoftFloatAbi) { 384 CHECK_EQ(kNumQuickFprArgs, 0U); 385 return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA)) 386 + GetBytesPerGprSpillLocation(kRuntimeISA) /* ArtMethod* */; 387 } else { 388 // For now, there is no reg-spill area for the targets with 389 // hard float ABI. So, the offset pointing to the first method's 390 // parameter ('this' for non-static methods) should be returned. 391 return GetBytesPerGprSpillLocation(kRuntimeISA); // Skip Method*. 392 } 393 } 394 395 const bool is_static_; 396 const char* const shorty_; 397 const uint32_t shorty_len_; 398 byte* const gpr_args_; // Address of GPR arguments in callee save frame. 399 byte* const fpr_args_; // Address of FPR arguments in callee save frame. 400 byte* const stack_args_; // Address of stack arguments in caller's frame. 401 uint32_t gpr_index_; // Index into spilled GPRs. 402 uint32_t fpr_index_; // Index into spilled FPRs. 403 uint32_t stack_index_; // Index into arguments on the stack. 404 // The current type of argument during VisitArguments. 405 Primitive::Type cur_type_; 406 // Does a 64bit parameter straddle the register and stack arguments? 407 bool is_split_long_or_double_; 408}; 409 410// Visits arguments on the stack placing them into the shadow frame. 411class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 412 public: 413 BuildQuickShadowFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty, 414 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 415 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 416 417 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 418 419 private: 420 ShadowFrame* const sf_; 421 uint32_t cur_reg_; 422 423 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 424}; 425 426void BuildQuickShadowFrameVisitor::Visit() { 427 Primitive::Type type = GetParamPrimitiveType(); 428 switch (type) { 429 case Primitive::kPrimLong: // Fall-through. 430 case Primitive::kPrimDouble: 431 if (IsSplitLongOrDouble()) { 432 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 433 } else { 434 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 435 } 436 ++cur_reg_; 437 break; 438 case Primitive::kPrimNot: { 439 StackReference<mirror::Object>* stack_ref = 440 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 441 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 442 } 443 break; 444 case Primitive::kPrimBoolean: // Fall-through. 445 case Primitive::kPrimByte: // Fall-through. 446 case Primitive::kPrimChar: // Fall-through. 447 case Primitive::kPrimShort: // Fall-through. 448 case Primitive::kPrimInt: // Fall-through. 449 case Primitive::kPrimFloat: 450 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 451 break; 452 case Primitive::kPrimVoid: 453 LOG(FATAL) << "UNREACHABLE"; 454 break; 455 } 456 ++cur_reg_; 457} 458 459extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 460 mirror::ArtMethod** sp) 461 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 462 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 463 // frame. 464 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 465 466 if (method->IsAbstract()) { 467 ThrowAbstractMethodError(method); 468 return 0; 469 } else { 470 DCHECK(!method->IsNative()) << PrettyMethod(method); 471 const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame"); 472 MethodHelper mh(method); 473 const DexFile::CodeItem* code_item = mh.GetCodeItem(); 474 DCHECK(code_item != nullptr) << PrettyMethod(method); 475 uint16_t num_regs = code_item->registers_size_; 476 void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); 477 ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL, // No last shadow coming from quick. 478 method, 0, memory)); 479 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 480 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, mh.IsStatic(), mh.GetShorty(), 481 mh.GetShortyLength(), 482 shadow_frame, first_arg_reg); 483 shadow_frame_builder.VisitArguments(); 484 // Push a transition back into managed code onto the linked list in thread. 485 ManagedStack fragment; 486 self->PushManagedStackFragment(&fragment); 487 self->PushShadowFrame(shadow_frame); 488 self->EndAssertNoThreadSuspension(old_cause); 489 490 if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) { 491 // Ensure static method's class is initialized. 492 SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass()); 493 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) { 494 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method); 495 self->PopManagedStackFragment(fragment); 496 return 0; 497 } 498 } 499 500 JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame); 501 // Pop transition. 502 self->PopManagedStackFragment(fragment); 503 // No need to restore the args since the method has already been run by the interpreter. 504 return result.GetJ(); 505 } 506} 507 508// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 509// to jobjects. 510class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 511 public: 512 BuildQuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty, 513 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa, 514 std::vector<jvalue>* args) : 515 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 516 517 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 518 519 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 520 521 private: 522 ScopedObjectAccessUnchecked* const soa_; 523 std::vector<jvalue>* const args_; 524 // References which we must update when exiting in case the GC moved the objects. 525 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 526 527 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 528}; 529 530void BuildQuickArgumentVisitor::Visit() { 531 jvalue val; 532 Primitive::Type type = GetParamPrimitiveType(); 533 switch (type) { 534 case Primitive::kPrimNot: { 535 StackReference<mirror::Object>* stack_ref = 536 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 537 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 538 references_.push_back(std::make_pair(val.l, stack_ref)); 539 break; 540 } 541 case Primitive::kPrimLong: // Fall-through. 542 case Primitive::kPrimDouble: 543 if (IsSplitLongOrDouble()) { 544 val.j = ReadSplitLongParam(); 545 } else { 546 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 547 } 548 break; 549 case Primitive::kPrimBoolean: // Fall-through. 550 case Primitive::kPrimByte: // Fall-through. 551 case Primitive::kPrimChar: // Fall-through. 552 case Primitive::kPrimShort: // Fall-through. 553 case Primitive::kPrimInt: // Fall-through. 554 case Primitive::kPrimFloat: 555 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 556 break; 557 case Primitive::kPrimVoid: 558 LOG(FATAL) << "UNREACHABLE"; 559 val.j = 0; 560 break; 561 } 562 args_->push_back(val); 563} 564 565void BuildQuickArgumentVisitor::FixupReferences() { 566 // Fixup any references which may have changed. 567 for (const auto& pair : references_) { 568 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 569 soa_->Env()->DeleteLocalRef(pair.first); 570 } 571} 572 573// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 574// which is responsible for recording callee save registers. We explicitly place into jobjects the 575// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 576// field within the proxy object, which will box the primitive arguments and deal with error cases. 577extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, 578 mirror::Object* receiver, 579 Thread* self, mirror::ArtMethod** sp) 580 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 581 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 582 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 583 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 584 const char* old_cause = 585 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 586 // Register the top of the managed stack, making stack crawlable. 587 DCHECK_EQ(*sp, proxy_method) << PrettyMethod(proxy_method); 588 self->SetTopOfStack(sp, 0); 589 DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), 590 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()) 591 << PrettyMethod(proxy_method); 592 self->VerifyStack(); 593 // Start new JNI local reference state. 594 JNIEnvExt* env = self->GetJniEnv(); 595 ScopedObjectAccessUnchecked soa(env); 596 ScopedJniEnvLocalRefState env_state(env); 597 // Create local ref. copies of proxy method and the receiver. 598 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 599 600 // Placing arguments into args vector and remove the receiver. 601 MethodHelper proxy_mh(proxy_method); 602 DCHECK(!proxy_mh.IsStatic()) << PrettyMethod(proxy_method); 603 std::vector<jvalue> args; 604 BuildQuickArgumentVisitor local_ref_visitor(sp, proxy_mh.IsStatic(), proxy_mh.GetShorty(), 605 proxy_mh.GetShortyLength(), &soa, &args); 606 607 local_ref_visitor.VisitArguments(); 608 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 609 args.erase(args.begin()); 610 611 // Convert proxy method into expected interface method. 612 mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod(); 613 DCHECK(interface_method != NULL) << PrettyMethod(proxy_method); 614 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 615 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method); 616 617 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 618 // that performs allocations. 619 self->EndAssertNoThreadSuspension(old_cause); 620 JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), 621 rcvr_jobj, interface_method_jobj, args); 622 // Restore references which might have moved. 623 local_ref_visitor.FixupReferences(); 624 return result.GetJ(); 625} 626 627// Read object references held in arguments from quick frames and place in a JNI local references, 628// so they don't get garbage collected. 629class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 630 public: 631 RememberForGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty, 632 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 633 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 634 635 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 636 637 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 638 639 private: 640 ScopedObjectAccessUnchecked* const soa_; 641 // References which we must update when exiting in case the GC moved the objects. 642 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 643 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 644}; 645 646void RememberForGcArgumentVisitor::Visit() { 647 if (IsParamAReference()) { 648 StackReference<mirror::Object>* stack_ref = 649 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 650 jobject reference = 651 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 652 references_.push_back(std::make_pair(reference, stack_ref)); 653 } 654} 655 656void RememberForGcArgumentVisitor::FixupReferences() { 657 // Fixup any references which may have changed. 658 for (const auto& pair : references_) { 659 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 660 soa_->Env()->DeleteLocalRef(pair.first); 661 } 662} 663 664 665// Lazily resolve a method for quick. Called by stub code. 666extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, 667 mirror::Object* receiver, 668 Thread* self, mirror::ArtMethod** sp) 669 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 670 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 671 // Start new JNI local reference state 672 JNIEnvExt* env = self->GetJniEnv(); 673 ScopedObjectAccessUnchecked soa(env); 674 ScopedJniEnvLocalRefState env_state(env); 675 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 676 677 // Compute details about the called method (avoid GCs) 678 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 679 mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 680 InvokeType invoke_type; 681 const DexFile* dex_file; 682 uint32_t dex_method_idx; 683 if (called->IsRuntimeMethod()) { 684 uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp)); 685 const DexFile::CodeItem* code; 686 { 687 MethodHelper mh(caller); 688 dex_file = &mh.GetDexFile(); 689 code = mh.GetCodeItem(); 690 } 691 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 692 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 693 Instruction::Code instr_code = instr->Opcode(); 694 bool is_range; 695 switch (instr_code) { 696 case Instruction::INVOKE_DIRECT: 697 invoke_type = kDirect; 698 is_range = false; 699 break; 700 case Instruction::INVOKE_DIRECT_RANGE: 701 invoke_type = kDirect; 702 is_range = true; 703 break; 704 case Instruction::INVOKE_STATIC: 705 invoke_type = kStatic; 706 is_range = false; 707 break; 708 case Instruction::INVOKE_STATIC_RANGE: 709 invoke_type = kStatic; 710 is_range = true; 711 break; 712 case Instruction::INVOKE_SUPER: 713 invoke_type = kSuper; 714 is_range = false; 715 break; 716 case Instruction::INVOKE_SUPER_RANGE: 717 invoke_type = kSuper; 718 is_range = true; 719 break; 720 case Instruction::INVOKE_VIRTUAL: 721 invoke_type = kVirtual; 722 is_range = false; 723 break; 724 case Instruction::INVOKE_VIRTUAL_RANGE: 725 invoke_type = kVirtual; 726 is_range = true; 727 break; 728 case Instruction::INVOKE_INTERFACE: 729 invoke_type = kInterface; 730 is_range = false; 731 break; 732 case Instruction::INVOKE_INTERFACE_RANGE: 733 invoke_type = kInterface; 734 is_range = true; 735 break; 736 default: 737 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); 738 // Avoid used uninitialized warnings. 739 invoke_type = kDirect; 740 is_range = false; 741 } 742 dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 743 744 } else { 745 invoke_type = kStatic; 746 dex_file = &MethodHelper(called).GetDexFile(); 747 dex_method_idx = called->GetDexMethodIndex(); 748 } 749 uint32_t shorty_len; 750 const char* shorty = 751 dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len); 752 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 753 visitor.VisitArguments(); 754 self->EndAssertNoThreadSuspension(old_cause); 755 bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 756 // Resolve method filling in dex cache. 757 if (called->IsRuntimeMethod()) { 758 SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr); 759 called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); 760 receiver = sirt_receiver.get(); 761 } 762 const void* code = NULL; 763 if (LIKELY(!self->IsExceptionPending())) { 764 // Incompatible class change should have been handled in resolve method. 765 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 766 << PrettyMethod(called) << " " << invoke_type; 767 if (virtual_or_interface) { 768 // Refine called method based on receiver. 769 CHECK(receiver != nullptr) << invoke_type; 770 771 mirror::ArtMethod* orig_called = called; 772 if (invoke_type == kVirtual) { 773 called = receiver->GetClass()->FindVirtualMethodForVirtual(called); 774 } else { 775 called = receiver->GetClass()->FindVirtualMethodForInterface(called); 776 } 777 778 CHECK(called != nullptr) << PrettyMethod(orig_called) << " " 779 << PrettyTypeOf(receiver) << " " 780 << invoke_type << " " << orig_called->GetVtableIndex(); 781 782 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 783 // of the sharpened method. 784 if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) { 785 caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called); 786 } else { 787 // Calling from one dex file to another, need to compute the method index appropriate to 788 // the caller's dex file. Since we get here only if the original called was a runtime 789 // method, we've got the correct dex_file and a dex_method_idx from above. 790 DCHECK(&MethodHelper(caller).GetDexFile() == dex_file); 791 uint32_t method_index = 792 MethodHelper(called).FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx); 793 if (method_index != DexFile::kDexNoIndex) { 794 caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called); 795 } 796 } 797 } 798 // Ensure that the called method's class is initialized. 799 SirtRef<mirror::Class> called_class(soa.Self(), called->GetDeclaringClass()); 800 linker->EnsureInitialized(called_class, true, true); 801 if (LIKELY(called_class->IsInitialized())) { 802 code = called->GetEntryPointFromQuickCompiledCode(); 803 } else if (called_class->IsInitializing()) { 804 if (invoke_type == kStatic) { 805 // Class is still initializing, go to oat and grab code (trampoline must be left in place 806 // until class is initialized to stop races between threads). 807 code = linker->GetQuickOatCodeFor(called); 808 } else { 809 // No trampoline for non-static methods. 810 code = called->GetEntryPointFromQuickCompiledCode(); 811 } 812 } else { 813 DCHECK(called_class->IsErroneous()); 814 } 815 } 816 CHECK_EQ(code == NULL, self->IsExceptionPending()); 817 // Fixup any locally saved objects may have moved during a GC. 818 visitor.FixupReferences(); 819 // Place called method in callee-save frame to be placed as first argument to quick method. 820 *sp = called; 821 return code; 822} 823 824 825 826/* 827 * This class uses a couple of observations to unite the different calling conventions through 828 * a few constants. 829 * 830 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 831 * possible alignment. 832 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 833 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 834 * when we have to split things 835 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 836 * and we can use Int handling directly. 837 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 838 * necessary when widening. Also, widening of Ints will take place implicitly, and the 839 * extension should be compatible with Aarch64, which mandates copying the available bits 840 * into LSB and leaving the rest unspecified. 841 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 842 * the stack. 843 * 6) There is only little endian. 844 * 845 * 846 * Actual work is supposed to be done in a delegate of the template type. The interface is as 847 * follows: 848 * 849 * void PushGpr(uintptr_t): Add a value for the next GPR 850 * 851 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 852 * padding, that is, think the architecture is 32b and aligns 64b. 853 * 854 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 855 * split this if necessary. The current state will have aligned, if 856 * necessary. 857 * 858 * void PushStack(uintptr_t): Push a value to the stack. 859 * 860 * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. This _will_ have nullptr, 861 * as this might be important for null initialization. 862 * Must return the jobject, that is, the reference to the 863 * entry in the Sirt (nullptr if necessary). 864 * 865 */ 866template <class T> class BuildGenericJniFrameStateMachine { 867 public: 868#if defined(__arm__) 869 // TODO: These are all dummy values! 870 static constexpr bool kNativeSoftFloatAbi = true; 871 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 872 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 873 874 static constexpr size_t kRegistersNeededForLong = 2; 875 static constexpr size_t kRegistersNeededForDouble = 2; 876 static constexpr bool kMultiRegistersAligned = true; 877 static constexpr bool kMultiRegistersWidened = false; 878 static constexpr bool kAlignLongOnStack = true; 879 static constexpr bool kAlignDoubleOnStack = true; 880#elif defined(__aarch64__) 881 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 882 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 883 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 884 885 static constexpr size_t kRegistersNeededForLong = 1; 886 static constexpr size_t kRegistersNeededForDouble = 1; 887 static constexpr bool kMultiRegistersAligned = false; 888 static constexpr bool kMultiRegistersWidened = false; 889 static constexpr bool kAlignLongOnStack = false; 890 static constexpr bool kAlignDoubleOnStack = false; 891#elif defined(__mips__) 892 // TODO: These are all dummy values! 893 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 894 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 895 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 896 897 static constexpr size_t kRegistersNeededForLong = 2; 898 static constexpr size_t kRegistersNeededForDouble = 2; 899 static constexpr bool kMultiRegistersAligned = true; 900 static constexpr bool kMultiRegistersWidened = true; 901 static constexpr bool kAlignLongOnStack = false; 902 static constexpr bool kAlignDoubleOnStack = false; 903#elif defined(__i386__) 904 // TODO: Check these! 905 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 906 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 907 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 908 909 static constexpr size_t kRegistersNeededForLong = 2; 910 static constexpr size_t kRegistersNeededForDouble = 2; 911 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 912 static constexpr bool kMultiRegistersWidened = false; 913 static constexpr bool kAlignLongOnStack = false; 914 static constexpr bool kAlignDoubleOnStack = false; 915#elif defined(__x86_64__) 916 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 917 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 918 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 919 920 static constexpr size_t kRegistersNeededForLong = 1; 921 static constexpr size_t kRegistersNeededForDouble = 1; 922 static constexpr bool kMultiRegistersAligned = false; 923 static constexpr bool kMultiRegistersWidened = false; 924 static constexpr bool kAlignLongOnStack = false; 925 static constexpr bool kAlignDoubleOnStack = false; 926#else 927#error "Unsupported architecture" 928#endif 929 930 public: 931 explicit BuildGenericJniFrameStateMachine(T* delegate) : gpr_index_(kNumNativeGprArgs), 932 fpr_index_(kNumNativeFprArgs), 933 stack_entries_(0), 934 delegate_(delegate) { 935 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 936 // the next register is even; counting down is just to make the compiler happy... 937 CHECK_EQ(kNumNativeGprArgs % 2, 0U); 938 CHECK_EQ(kNumNativeFprArgs % 2, 0U); 939 } 940 941 virtual ~BuildGenericJniFrameStateMachine() {} 942 943 bool HavePointerGpr() { 944 return gpr_index_ > 0; 945 } 946 947 void AdvancePointer(void* val) { 948 if (HavePointerGpr()) { 949 gpr_index_--; 950 PushGpr(reinterpret_cast<uintptr_t>(val)); 951 } else { 952 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 953 PushStack(reinterpret_cast<uintptr_t>(val)); 954 gpr_index_ = 0; 955 } 956 } 957 958 959 bool HaveSirtGpr() { 960 return gpr_index_ > 0; 961 } 962 963 void AdvanceSirt(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 964 uintptr_t sirtRef = PushSirt(ptr); 965 if (HaveSirtGpr()) { 966 gpr_index_--; 967 PushGpr(sirtRef); 968 } else { 969 stack_entries_++; 970 PushStack(sirtRef); 971 gpr_index_ = 0; 972 } 973 } 974 975 976 bool HaveIntGpr() { 977 return gpr_index_ > 0; 978 } 979 980 void AdvanceInt(uint32_t val) { 981 if (HaveIntGpr()) { 982 gpr_index_--; 983 PushGpr(val); 984 } else { 985 stack_entries_++; 986 PushStack(val); 987 gpr_index_ = 0; 988 } 989 } 990 991 992 bool HaveLongGpr() { 993 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 994 } 995 996 bool LongGprNeedsPadding() { 997 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 998 kAlignLongOnStack && // and when it needs alignment 999 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1000 } 1001 1002 bool LongStackNeedsPadding() { 1003 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1004 kAlignLongOnStack && // and when it needs 8B alignment 1005 (stack_entries_ & 1) == 1; // counter is odd 1006 } 1007 1008 void AdvanceLong(uint64_t val) { 1009 if (HaveLongGpr()) { 1010 if (LongGprNeedsPadding()) { 1011 PushGpr(0); 1012 gpr_index_--; 1013 } 1014 if (kRegistersNeededForLong == 1) { 1015 PushGpr(static_cast<uintptr_t>(val)); 1016 } else { 1017 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1018 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1019 } 1020 gpr_index_ -= kRegistersNeededForLong; 1021 } else { 1022 if (LongStackNeedsPadding()) { 1023 PushStack(0); 1024 stack_entries_++; 1025 } 1026 if (kRegistersNeededForLong == 1) { 1027 PushStack(static_cast<uintptr_t>(val)); 1028 stack_entries_++; 1029 } else { 1030 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1031 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1032 stack_entries_ += 2; 1033 } 1034 gpr_index_ = 0; 1035 } 1036 } 1037 1038 1039 bool HaveFloatFpr() { 1040 return fpr_index_ > 0; 1041 } 1042 1043 template <typename U, typename V> V convert(U in) { 1044 CHECK_LE(sizeof(U), sizeof(V)); 1045 union { U u; V v; } tmp; 1046 tmp.u = in; 1047 return tmp.v; 1048 } 1049 1050 void AdvanceFloat(float val) { 1051 if (kNativeSoftFloatAbi) { 1052 AdvanceInt(convert<float, uint32_t>(val)); 1053 } else { 1054 if (HaveFloatFpr()) { 1055 fpr_index_--; 1056 if (kRegistersNeededForDouble == 1) { 1057 if (kMultiRegistersWidened) { 1058 PushFpr8(convert<double, uint64_t>(val)); 1059 } else { 1060 // No widening, just use the bits. 1061 PushFpr8(convert<float, uint64_t>(val)); 1062 } 1063 } else { 1064 PushFpr4(val); 1065 } 1066 } else { 1067 stack_entries_++; 1068 if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) { 1069 // Need to widen before storing: Note the "double" in the template instantiation. 1070 PushStack(convert<double, uintptr_t>(val)); 1071 } else { 1072 PushStack(convert<float, uintptr_t>(val)); 1073 } 1074 fpr_index_ = 0; 1075 } 1076 } 1077 } 1078 1079 1080 bool HaveDoubleFpr() { 1081 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1082 } 1083 1084 bool DoubleFprNeedsPadding() { 1085 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1086 kAlignDoubleOnStack && // and when it needs alignment 1087 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1088 } 1089 1090 bool DoubleStackNeedsPadding() { 1091 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1092 kAlignDoubleOnStack && // and when it needs 8B alignment 1093 (stack_entries_ & 1) == 1; // counter is odd 1094 } 1095 1096 void AdvanceDouble(uint64_t val) { 1097 if (kNativeSoftFloatAbi) { 1098 AdvanceLong(val); 1099 } else { 1100 if (HaveDoubleFpr()) { 1101 if (DoubleFprNeedsPadding()) { 1102 PushFpr4(0); 1103 fpr_index_--; 1104 } 1105 PushFpr8(val); 1106 fpr_index_ -= kRegistersNeededForDouble; 1107 } else { 1108 if (DoubleStackNeedsPadding()) { 1109 PushStack(0); 1110 stack_entries_++; 1111 } 1112 if (kRegistersNeededForDouble == 1) { 1113 PushStack(static_cast<uintptr_t>(val)); 1114 stack_entries_++; 1115 } else { 1116 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1117 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1118 stack_entries_ += 2; 1119 } 1120 fpr_index_ = 0; 1121 } 1122 } 1123 } 1124 1125 uint32_t getStackEntries() { 1126 return stack_entries_; 1127 } 1128 1129 uint32_t getNumberOfUsedGprs() { 1130 return kNumNativeGprArgs - gpr_index_; 1131 } 1132 1133 uint32_t getNumberOfUsedFprs() { 1134 return kNumNativeFprArgs - fpr_index_; 1135 } 1136 1137 private: 1138 void PushGpr(uintptr_t val) { 1139 delegate_->PushGpr(val); 1140 } 1141 void PushFpr4(float val) { 1142 delegate_->PushFpr4(val); 1143 } 1144 void PushFpr8(uint64_t val) { 1145 delegate_->PushFpr8(val); 1146 } 1147 void PushStack(uintptr_t val) { 1148 delegate_->PushStack(val); 1149 } 1150 uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1151 return delegate_->PushSirt(ref); 1152 } 1153 1154 uint32_t gpr_index_; // Number of free GPRs 1155 uint32_t fpr_index_; // Number of free FPRs 1156 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1157 // extended 1158 T* delegate_; // What Push implementation gets called 1159}; 1160 1161class ComputeGenericJniFrameSize FINAL { 1162 public: 1163 ComputeGenericJniFrameSize() : num_sirt_references_(0), num_stack_entries_(0) {} 1164 1165 uint32_t GetStackSize() { 1166 return num_stack_entries_ * sizeof(uintptr_t); 1167 } 1168 1169 // WARNING: After this, *sp won't be pointing to the method anymore! 1170 void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len, 1171 void* sp, StackIndirectReferenceTable** table, uint32_t* sirt_entries, 1172 uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr, 1173 void** code_return, size_t* overall_size) 1174 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1175 ComputeAll(is_static, shorty, shorty_len); 1176 1177 mirror::ArtMethod* method = **m; 1178 1179 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1180 1181 // First, fix up the layout of the callee-save frame. 1182 // We have to squeeze in the Sirt, and relocate the method pointer. 1183 1184 // "Free" the slot for the method. 1185 sp8 += kPointerSize; 1186 1187 // Add the Sirt. 1188 *sirt_entries = num_sirt_references_; 1189 size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(num_sirt_references_); 1190 sp8 -= sirt_size; 1191 *table = reinterpret_cast<StackIndirectReferenceTable*>(sp8); 1192 (*table)->SetNumberOfReferences(num_sirt_references_); 1193 1194 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1195 sp8 -= kPointerSize; 1196 uint8_t* method_pointer = sp8; 1197 *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method; 1198 *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer); 1199 1200 // Reference cookie and padding 1201 sp8 -= 8; 1202 // Store Sirt size 1203 *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(sirt_size & 0xFFFFFFFF); 1204 1205 // Next comes the native call stack. 1206 sp8 -= GetStackSize(); 1207 // Now align the call stack below. This aligns by 16, as AArch64 seems to require. 1208 uintptr_t mask = ~0x0F; 1209 sp8 = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(sp8) & mask); 1210 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1211 1212 // put fprs and gprs below 1213 // Assumption is OK right now, as we have soft-float arm 1214 size_t fregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeFprArgs; 1215 sp8 -= fregs * sizeof(uintptr_t); 1216 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1217 size_t iregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeGprArgs; 1218 sp8 -= iregs * sizeof(uintptr_t); 1219 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1220 1221 // reserve space for the code pointer 1222 sp8 -= kPointerSize; 1223 *code_return = reinterpret_cast<void*>(sp8); 1224 1225 *overall_size = reinterpret_cast<uint8_t*>(sp) - sp8; 1226 1227 // The new SP is stored at the end of the alloca, so it can be immediately popped 1228 sp8 = reinterpret_cast<uint8_t*>(sp) - 5 * KB; 1229 *(reinterpret_cast<uint8_t**>(sp8)) = method_pointer; 1230 } 1231 1232 void ComputeSirtOffset() { } // nothing to do, static right now 1233 1234 void ComputeAll(bool is_static, const char* shorty, uint32_t shorty_len) 1235 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1236 BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize> sm(this); 1237 1238 // JNIEnv 1239 sm.AdvancePointer(nullptr); 1240 1241 // Class object or this as first argument 1242 sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678)); 1243 1244 for (uint32_t i = 1; i < shorty_len; ++i) { 1245 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1246 switch (cur_type_) { 1247 case Primitive::kPrimNot: 1248 sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678)); 1249 break; 1250 1251 case Primitive::kPrimBoolean: 1252 case Primitive::kPrimByte: 1253 case Primitive::kPrimChar: 1254 case Primitive::kPrimShort: 1255 case Primitive::kPrimInt: 1256 sm.AdvanceInt(0); 1257 break; 1258 case Primitive::kPrimFloat: 1259 sm.AdvanceFloat(0); 1260 break; 1261 case Primitive::kPrimDouble: 1262 sm.AdvanceDouble(0); 1263 break; 1264 case Primitive::kPrimLong: 1265 sm.AdvanceLong(0); 1266 break; 1267 default: 1268 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1269 } 1270 } 1271 1272 num_stack_entries_ = sm.getStackEntries(); 1273 } 1274 1275 void PushGpr(uintptr_t /* val */) { 1276 // not optimizing registers, yet 1277 } 1278 1279 void PushFpr4(float /* val */) { 1280 // not optimizing registers, yet 1281 } 1282 1283 void PushFpr8(uint64_t /* val */) { 1284 // not optimizing registers, yet 1285 } 1286 1287 void PushStack(uintptr_t /* val */) { 1288 // counting is already done in the superclass 1289 } 1290 1291 uintptr_t PushSirt(mirror::Object* /* ptr */) { 1292 num_sirt_references_++; 1293 return reinterpret_cast<uintptr_t>(nullptr); 1294 } 1295 1296 private: 1297 uint32_t num_sirt_references_; 1298 uint32_t num_stack_entries_; 1299}; 1300 1301// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1302// of transitioning into native code. 1303class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1304 public: 1305 BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty, 1306 uint32_t shorty_len, Thread* self) : 1307 QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) { 1308 ComputeGenericJniFrameSize fsc; 1309 fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &sirt_, &sirt_expected_refs_, 1310 &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_, 1311 &alloca_used_size_); 1312 sirt_number_of_references_ = 0; 1313 cur_sirt_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstSirtEntry()); 1314 1315 // jni environment is always first argument 1316 sm_.AdvancePointer(self->GetJniEnv()); 1317 1318 if (is_static) { 1319 sm_.AdvanceSirt((**sp)->GetDeclaringClass()); 1320 } 1321 } 1322 1323 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 1324 1325 void FinalizeSirt(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1326 1327 jobject GetFirstSirtEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1328 return reinterpret_cast<jobject>(sirt_->GetStackReference(0)); 1329 } 1330 1331 void PushGpr(uintptr_t val) { 1332 *cur_gpr_reg_ = val; 1333 cur_gpr_reg_++; 1334 } 1335 1336 void PushFpr4(float val) { 1337 *cur_fpr_reg_ = val; 1338 cur_fpr_reg_++; 1339 } 1340 1341 void PushFpr8(uint64_t val) { 1342 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1343 *tmp = val; 1344 cur_fpr_reg_ += 2; 1345 } 1346 1347 void PushStack(uintptr_t val) { 1348 *cur_stack_arg_ = val; 1349 cur_stack_arg_++; 1350 } 1351 1352 uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1353 uintptr_t tmp; 1354 if (ref == nullptr) { 1355 *cur_sirt_entry_ = StackReference<mirror::Object>(); 1356 tmp = reinterpret_cast<uintptr_t>(nullptr); 1357 } else { 1358 *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref); 1359 tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_); 1360 } 1361 cur_sirt_entry_++; 1362 sirt_number_of_references_++; 1363 return tmp; 1364 } 1365 1366 // Size of the part of the alloca that we actually need. 1367 size_t GetAllocaUsedSize() { 1368 return alloca_used_size_; 1369 } 1370 1371 void* GetCodeReturn() { 1372 return code_return_; 1373 } 1374 1375 private: 1376 uint32_t sirt_number_of_references_; 1377 StackReference<mirror::Object>* cur_sirt_entry_; 1378 StackIndirectReferenceTable* sirt_; 1379 uint32_t sirt_expected_refs_; 1380 uintptr_t* cur_gpr_reg_; 1381 uint32_t* cur_fpr_reg_; 1382 uintptr_t* cur_stack_arg_; 1383 // StackReference<mirror::Object>* top_of_sirt_; 1384 void* code_return_; 1385 size_t alloca_used_size_; 1386 1387 BuildGenericJniFrameStateMachine<BuildGenericJniFrameVisitor> sm_; 1388 1389 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1390}; 1391 1392void BuildGenericJniFrameVisitor::Visit() { 1393 Primitive::Type type = GetParamPrimitiveType(); 1394 switch (type) { 1395 case Primitive::kPrimLong: { 1396 jlong long_arg; 1397 if (IsSplitLongOrDouble()) { 1398 long_arg = ReadSplitLongParam(); 1399 } else { 1400 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1401 } 1402 sm_.AdvanceLong(long_arg); 1403 break; 1404 } 1405 case Primitive::kPrimDouble: { 1406 uint64_t double_arg; 1407 if (IsSplitLongOrDouble()) { 1408 // Read into union so that we don't case to a double. 1409 double_arg = ReadSplitLongParam(); 1410 } else { 1411 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1412 } 1413 sm_.AdvanceDouble(double_arg); 1414 break; 1415 } 1416 case Primitive::kPrimNot: { 1417 StackReference<mirror::Object>* stack_ref = 1418 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1419 sm_.AdvanceSirt(stack_ref->AsMirrorPtr()); 1420 break; 1421 } 1422 case Primitive::kPrimFloat: 1423 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1424 break; 1425 case Primitive::kPrimBoolean: // Fall-through. 1426 case Primitive::kPrimByte: // Fall-through. 1427 case Primitive::kPrimChar: // Fall-through. 1428 case Primitive::kPrimShort: // Fall-through. 1429 case Primitive::kPrimInt: // Fall-through. 1430 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1431 break; 1432 case Primitive::kPrimVoid: 1433 LOG(FATAL) << "UNREACHABLE"; 1434 break; 1435 } 1436} 1437 1438void BuildGenericJniFrameVisitor::FinalizeSirt(Thread* self) { 1439 // Initialize padding entries. 1440 while (sirt_number_of_references_ < sirt_expected_refs_) { 1441 *cur_sirt_entry_ = StackReference<mirror::Object>(); 1442 cur_sirt_entry_++; 1443 sirt_number_of_references_++; 1444 } 1445 sirt_->SetNumberOfReferences(sirt_expected_refs_); 1446 DCHECK_NE(sirt_expected_refs_, 0U); 1447 // Install Sirt. 1448 self->PushSirt(sirt_); 1449} 1450 1451extern "C" void* artFindNativeMethod(); 1452 1453uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { 1454 if (lock != nullptr) { 1455 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1456 } else { 1457 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1458 } 1459} 1460 1461void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { 1462 if (lock != nullptr) { 1463 JniMethodEndSynchronized(cookie, lock, self); 1464 } else { 1465 JniMethodEnd(cookie, self); 1466 } 1467} 1468 1469/* 1470 * Initializes an alloca region assumed to be directly below sp for a native call: 1471 * Create a Sirt and call stack and fill a mini stack with values to be pushed to registers. 1472 * The final element on the stack is a pointer to the native code. 1473 * 1474 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1475 * We need to fix this, as the Sirt needs to go into the callee-save frame. 1476 * 1477 * The return of this function denotes: 1478 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1479 * 2) An error, if the value is negative. 1480 */ 1481extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp) 1482 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1483 mirror::ArtMethod* called = *sp; 1484 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1485 1486 // run the visitor 1487 MethodHelper mh(called); 1488 1489 BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(), 1490 self); 1491 visitor.VisitArguments(); 1492 visitor.FinalizeSirt(self); 1493 1494 // fix up managed-stack things in Thread 1495 self->SetTopOfStack(sp, 0); 1496 1497 self->VerifyStack(); 1498 1499 // Start JNI, save the cookie. 1500 uint32_t cookie; 1501 if (called->IsSynchronized()) { 1502 cookie = JniMethodStartSynchronized(visitor.GetFirstSirtEntry(), self); 1503 if (self->IsExceptionPending()) { 1504 self->PopSirt(); 1505 // A negative value denotes an error. 1506 return -1; 1507 } 1508 } else { 1509 cookie = JniMethodStart(self); 1510 } 1511 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1512 *(sp32 - 1) = cookie; 1513 1514 // Retrieve the stored native code. 1515 const void* nativeCode = called->GetNativeMethod(); 1516 1517 // There are two cases for the content of nativeCode: 1518 // 1) Pointer to the native function. 1519 // 2) Pointer to the trampoline for native code binding. 1520 // In the second case, we need to execute the binding and continue with the actual native function 1521 // pointer. 1522 DCHECK(nativeCode != nullptr); 1523 if (nativeCode == GetJniDlsymLookupStub()) { 1524 nativeCode = artFindNativeMethod(); 1525 1526 if (nativeCode == nullptr) { 1527 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 1528 1529 // End JNI, as the assembly will move to deliver the exception. 1530 jobject lock = called->IsSynchronized() ? visitor.GetFirstSirtEntry() : nullptr; 1531 if (mh.GetShorty()[0] == 'L') { 1532 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 1533 } else { 1534 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1535 } 1536 1537 return -1; 1538 } 1539 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 1540 } 1541 1542 // Store the native code pointer in the stack at the right location. 1543 uintptr_t* code_pointer = reinterpret_cast<uintptr_t*>(visitor.GetCodeReturn()); 1544 *code_pointer = reinterpret_cast<uintptr_t>(nativeCode); 1545 1546 // 5K reserved, window_size + frame pointer used. 1547 size_t window_size = visitor.GetAllocaUsedSize(); 1548 return (5 * KB) - window_size - kPointerSize; 1549} 1550 1551/* 1552 * Is called after the native JNI code. Responsible for cleanup (SIRT, saved state) and 1553 * unlocking. 1554 */ 1555extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp, 1556 jvalue result, uint64_t result_f) 1557 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1558 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1559 mirror::ArtMethod* called = *sp; 1560 uint32_t cookie = *(sp32 - 1); 1561 1562 jobject lock = nullptr; 1563 if (called->IsSynchronized()) { 1564 StackIndirectReferenceTable* table = 1565 reinterpret_cast<StackIndirectReferenceTable*>( 1566 reinterpret_cast<uint8_t*>(sp) + kPointerSize); 1567 lock = reinterpret_cast<jobject>(table->GetStackReference(0)); 1568 } 1569 1570 MethodHelper mh(called); 1571 char return_shorty_char = mh.GetShorty()[0]; 1572 1573 if (return_shorty_char == 'L') { 1574 return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock); 1575 } else { 1576 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1577 1578 switch (return_shorty_char) { 1579 case 'F': // Fall-through. 1580 case 'D': 1581 return result_f; 1582 case 'Z': 1583 return result.z; 1584 case 'B': 1585 return result.b; 1586 case 'C': 1587 return result.c; 1588 case 'S': 1589 return result.s; 1590 case 'I': 1591 return result.i; 1592 case 'J': 1593 return result.j; 1594 case 'V': 1595 return 0; 1596 default: 1597 LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char; 1598 return 0; 1599 } 1600 } 1601} 1602 1603template<InvokeType type, bool access_check> 1604static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1605 mirror::ArtMethod* caller_method, 1606 Thread* self, mirror::ArtMethod** sp); 1607 1608template<InvokeType type, bool access_check> 1609static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1610 mirror::ArtMethod* caller_method, 1611 Thread* self, mirror::ArtMethod** sp) { 1612 mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, 1613 type); 1614 if (UNLIKELY(method == nullptr)) { 1615 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1616 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1617 uint32_t shorty_len; 1618 const char* shorty = 1619 dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 1620 { 1621 // Remember the args in case a GC happens in FindMethodFromCode. 1622 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1623 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 1624 visitor.VisitArguments(); 1625 method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, self); 1626 visitor.FixupReferences(); 1627 } 1628 1629 if (UNLIKELY(method == NULL)) { 1630 CHECK(self->IsExceptionPending()); 1631 return 0; // failure 1632 } 1633 } 1634 DCHECK(!self->IsExceptionPending()); 1635 const void* code = method->GetEntryPointFromQuickCompiledCode(); 1636 1637 // When we return, the caller will branch to this address, so it had better not be 0! 1638 DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: " 1639 << MethodHelper(method).GetDexFile().GetLocation(); 1640#ifdef __LP64__ 1641 UNIMPLEMENTED(FATAL); 1642 return 0; 1643#else 1644 uint32_t method_uint = reinterpret_cast<uint32_t>(method); 1645 uint64_t code_uint = reinterpret_cast<uint32_t>(code); 1646 uint64_t result = ((code_uint << 32) | method_uint); 1647 return result; 1648#endif 1649} 1650 1651// Explicit artInvokeCommon template function declarations to please analysis tool. 1652#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 1653 template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ 1654 uint64_t artInvokeCommon<type, access_check>(uint32_t method_idx, \ 1655 mirror::Object* this_object, \ 1656 mirror::ArtMethod* caller_method, \ 1657 Thread* self, mirror::ArtMethod** sp) \ 1658 1659EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 1660EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 1661EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 1662EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 1663EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 1664EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 1665EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 1666EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 1667EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 1668EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 1669#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 1670 1671 1672// See comments in runtime_support_asm.S 1673extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx, 1674 mirror::Object* this_object, 1675 mirror::ArtMethod* caller_method, 1676 Thread* self, 1677 mirror::ArtMethod** sp) 1678 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1679 return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp); 1680} 1681 1682 1683extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx, 1684 mirror::Object* this_object, 1685 mirror::ArtMethod* caller_method, 1686 Thread* self, 1687 mirror::ArtMethod** sp) 1688 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1689 return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp); 1690} 1691 1692extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx, 1693 mirror::Object* this_object, 1694 mirror::ArtMethod* caller_method, 1695 Thread* self, 1696 mirror::ArtMethod** sp) 1697 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1698 return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp); 1699} 1700 1701extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx, 1702 mirror::Object* this_object, 1703 mirror::ArtMethod* caller_method, 1704 Thread* self, 1705 mirror::ArtMethod** sp) 1706 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1707 return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp); 1708} 1709 1710extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx, 1711 mirror::Object* this_object, 1712 mirror::ArtMethod* caller_method, 1713 Thread* self, 1714 mirror::ArtMethod** sp) 1715 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1716 return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp); 1717} 1718 1719// Determine target of interface dispatch. This object is known non-null. 1720extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method, 1721 mirror::Object* this_object, 1722 mirror::ArtMethod* caller_method, 1723 Thread* self, mirror::ArtMethod** sp) 1724 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1725 mirror::ArtMethod* method; 1726 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 1727 method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method); 1728 if (UNLIKELY(method == NULL)) { 1729 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1730 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object, 1731 caller_method); 1732 return 0; // Failure. 1733 } 1734 } else { 1735 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1736 DCHECK(interface_method == Runtime::Current()->GetResolutionMethod()); 1737 // Determine method index from calling dex instruction. 1738#if defined(__arm__) 1739 // On entry the stack pointed by sp is: 1740 // | argN | | 1741 // | ... | | 1742 // | arg4 | | 1743 // | arg3 spill | | Caller's frame 1744 // | arg2 spill | | 1745 // | arg1 spill | | 1746 // | Method* | --- 1747 // | LR | 1748 // | ... | callee saves 1749 // | R3 | arg3 1750 // | R2 | arg2 1751 // | R1 | arg1 1752 // | R0 | 1753 // | Method* | <- sp 1754 DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); 1755 uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize); 1756 uintptr_t caller_pc = regs[10]; 1757#elif defined(__i386__) 1758 // On entry the stack pointed by sp is: 1759 // | argN | | 1760 // | ... | | 1761 // | arg4 | | 1762 // | arg3 spill | | Caller's frame 1763 // | arg2 spill | | 1764 // | arg1 spill | | 1765 // | Method* | --- 1766 // | Return | 1767 // | EBP,ESI,EDI | callee saves 1768 // | EBX | arg3 1769 // | EDX | arg2 1770 // | ECX | arg1 1771 // | EAX/Method* | <- sp 1772 DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); 1773 uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp)); 1774 uintptr_t caller_pc = regs[7]; 1775#elif defined(__mips__) 1776 // On entry the stack pointed by sp is: 1777 // | argN | | 1778 // | ... | | 1779 // | arg4 | | 1780 // | arg3 spill | | Caller's frame 1781 // | arg2 spill | | 1782 // | arg1 spill | | 1783 // | Method* | --- 1784 // | RA | 1785 // | ... | callee saves 1786 // | A3 | arg3 1787 // | A2 | arg2 1788 // | A1 | arg1 1789 // | A0/Method* | <- sp 1790 DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); 1791 uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp)); 1792 uintptr_t caller_pc = regs[15]; 1793#else 1794 UNIMPLEMENTED(FATAL); 1795 uintptr_t caller_pc = 0; 1796#endif 1797 uint32_t dex_pc = caller_method->ToDexPc(caller_pc); 1798 const DexFile::CodeItem* code = MethodHelper(caller_method).GetCodeItem(); 1799 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 1800 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 1801 Instruction::Code instr_code = instr->Opcode(); 1802 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 1803 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 1804 << "Unexpected call into interface trampoline: " << instr->DumpString(NULL); 1805 uint32_t dex_method_idx; 1806 if (instr_code == Instruction::INVOKE_INTERFACE) { 1807 dex_method_idx = instr->VRegB_35c(); 1808 } else { 1809 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 1810 dex_method_idx = instr->VRegB_3rc(); 1811 } 1812 1813 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1814 uint32_t shorty_len; 1815 const char* shorty = 1816 dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len); 1817 { 1818 // Remember the args in case a GC happens in FindMethodFromCode. 1819 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1820 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 1821 visitor.VisitArguments(); 1822 method = FindMethodFromCode<kInterface, false>(dex_method_idx, this_object, caller_method, 1823 self); 1824 visitor.FixupReferences(); 1825 } 1826 1827 if (UNLIKELY(method == nullptr)) { 1828 CHECK(self->IsExceptionPending()); 1829 return 0; // Failure. 1830 } 1831 } 1832 const void* code = method->GetEntryPointFromQuickCompiledCode(); 1833 1834 // When we return, the caller will branch to this address, so it had better not be 0! 1835 DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: " 1836 << MethodHelper(method).GetDexFile().GetLocation(); 1837#ifdef __LP64__ 1838 UNIMPLEMENTED(FATAL); 1839 return 0; 1840#else 1841 uint32_t method_uint = reinterpret_cast<uint32_t>(method); 1842 uint64_t code_uint = reinterpret_cast<uint32_t>(code); 1843 uint64_t result = ((code_uint << 32) | method_uint); 1844 return result; 1845#endif 1846} 1847 1848} // namespace art 1849