quick_trampoline_entrypoints.cc revision ad61517890168ff6ed19063cc8032a9c033d135b
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "callee_save_frame.h" 18#include "common_throws.h" 19#include "dex_file-inl.h" 20#include "dex_instruction-inl.h" 21#include "entrypoints/entrypoint_utils.h" 22#include "gc/accounting/card_table-inl.h" 23#include "interpreter/interpreter.h" 24#include "mirror/art_method-inl.h" 25#include "mirror/class-inl.h" 26#include "mirror/dex_cache-inl.h" 27#include "mirror/object-inl.h" 28#include "mirror/object_array-inl.h" 29#include "object_utils.h" 30#include "runtime.h" 31#include "scoped_thread_state_change.h" 32 33namespace art { 34 35// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 36class QuickArgumentVisitor { 37 // Size of each spilled GPR. 38#ifdef __LP64__ 39 static constexpr size_t kBytesPerGprSpillLocation = 8; 40#else 41 static constexpr size_t kBytesPerGprSpillLocation = 4; 42#endif 43 // Number of bytes for each out register in the caller method's frame. 44 static constexpr size_t kBytesStackArgLocation = 4; 45#if defined(__arm__) 46 // The callee save frame is pointed to by SP. 47 // | argN | | 48 // | ... | | 49 // | arg4 | | 50 // | arg3 spill | | Caller's frame 51 // | arg2 spill | | 52 // | arg1 spill | | 53 // | Method* | --- 54 // | LR | 55 // | ... | callee saves 56 // | R3 | arg3 57 // | R2 | arg2 58 // | R1 | arg1 59 // | R0 | padding 60 // | Method* | <- sp 61 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 62 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 63 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 64 static constexpr size_t kBytesPerFprSpillLocation = 4; // FPR spill size is 4 bytes. 65 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 66 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8; // Offset of first GPR arg. 67 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44; // Offset of return address. 68 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 48; // Frame size. 69 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 70 return gpr_index * kBytesPerGprSpillLocation; 71 } 72#elif defined(__aarch64__) 73 // The callee save frame is pointed to by SP. 74 // | argN | | 75 // | ... | | 76 // | arg4 | | 77 // | arg3 spill | | Caller's frame 78 // | arg2 spill | | 79 // | arg1 spill | | 80 // | Method* | --- 81 // | LR | 82 // | X28 | 83 // | : | 84 // | X19 | 85 // | X7 | 86 // | : | 87 // | X1 | 88 // | D15 | 89 // | : | 90 // | D0 | 91 // | | padding 92 // | Method* | <- sp 93 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 94 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 95 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 96 static constexpr size_t kBytesPerFprSpillLocation = 8; // FPR spill size is 8 bytes. 97 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =16; // Offset of first FPR arg. 98 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144; // Offset of first GPR arg. 99 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296; // Offset of return address. 100 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 304; // Frame size. 101 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 102 return gpr_index * kBytesPerGprSpillLocation; 103 } 104#elif defined(__mips__) 105 // The callee save frame is pointed to by SP. 106 // | argN | | 107 // | ... | | 108 // | arg4 | | 109 // | arg3 spill | | Caller's frame 110 // | arg2 spill | | 111 // | arg1 spill | | 112 // | Method* | --- 113 // | RA | 114 // | ... | callee saves 115 // | A3 | arg3 116 // | A2 | arg2 117 // | A1 | arg1 118 // | A0/Method* | <- sp 119 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 120 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 121 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 122 static constexpr size_t kBytesPerFprSpillLocation = 4; // FPR spill size is 4 bytes. 123 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 124 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 125 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60; // Offset of return address. 126 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 64; // Frame size. 127 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 128 return gpr_index * kBytesPerGprSpillLocation; 129 } 130#elif defined(__i386__) 131 // The callee save frame is pointed to by SP. 132 // | argN | | 133 // | ... | | 134 // | arg4 | | 135 // | arg3 spill | | Caller's frame 136 // | arg2 spill | | 137 // | arg1 spill | | 138 // | Method* | --- 139 // | Return | 140 // | EBP,ESI,EDI | callee saves 141 // | EBX | arg3 142 // | EDX | arg2 143 // | ECX | arg1 144 // | EAX/Method* | <- sp 145 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 146 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 147 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 148 static constexpr size_t kBytesPerFprSpillLocation = 8; // FPR spill size is 8 bytes. 149 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 150 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg. 151 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28; // Offset of return address. 152 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 32; // Frame size. 153 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 154 return gpr_index * kBytesPerGprSpillLocation; 155 } 156#elif defined(__x86_64__) 157 // The callee save frame is pointed to by SP. 158 // | argN | | 159 // | ... | | 160 // | reg. arg spills | | Caller's frame 161 // | Method* | --- 162 // | Return | 163 // | R15 | callee save 164 // | R14 | callee save 165 // | R13 | callee save 166 // | R12 | callee save 167 // | R9 | arg5 168 // | R8 | arg4 169 // | RSI/R6 | arg1 170 // | RBP/R5 | callee save 171 // | RBX/R3 | callee save 172 // | RDX/R2 | arg2 173 // | RCX/R1 | arg3 174 // | XMM7 | float arg 8 175 // | XMM6 | float arg 7 176 // | XMM5 | float arg 6 177 // | XMM4 | float arg 5 178 // | XMM3 | float arg 4 179 // | XMM2 | float arg 3 180 // | XMM1 | float arg 2 181 // | XMM0 | float arg 1 182 // | Padding | 183 // | RDI/Method* | <- sp 184 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 185 static constexpr size_t kNumQuickGprArgs = 5; // 3 arguments passed in GPRs. 186 static constexpr size_t kNumQuickFprArgs = 8; // 0 arguments passed in FPRs. 187 static constexpr size_t kBytesPerFprSpillLocation = 8; // FPR spill size is 8 bytes. 188 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 189 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg. 190 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168; // Offset of return address. 191 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 176; // Frame size. 192 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 193 switch (gpr_index) { 194 case 0: return (4 * kBytesPerGprSpillLocation); 195 case 1: return (1 * kBytesPerGprSpillLocation); 196 case 2: return (0 * kBytesPerGprSpillLocation); 197 case 3: return (5 * kBytesPerGprSpillLocation); 198 case 4: return (6 * kBytesPerGprSpillLocation); 199 default: 200 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 201 return 0; 202 } 203 } 204#else 205#error "Unsupported architecture" 206#endif 207 208 public: 209 static mirror::ArtMethod* GetCallingMethod(mirror::ArtMethod** sp) 210 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 211 DCHECK((*sp)->IsCalleeSaveMethod()); 212 byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 213 return *reinterpret_cast<mirror::ArtMethod**>(previous_sp); 214 } 215 216 // For the given quick ref and args quick frame, return the caller's PC. 217 static uintptr_t GetCallingPc(mirror::ArtMethod** sp) 218 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 219 DCHECK((*sp)->IsCalleeSaveMethod()); 220 byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 221 return *reinterpret_cast<uintptr_t*>(lr); 222 } 223 224 QuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, 225 const char* shorty, uint32_t shorty_len) 226 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : 227 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 228 gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 229 fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 230 stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 231 + StackArgumentStartFromShorty(is_static, shorty, shorty_len)), 232 gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid), 233 is_split_long_or_double_(false) { 234 DCHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, 235 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); 236 } 237 238 virtual ~QuickArgumentVisitor() {} 239 240 virtual void Visit() = 0; 241 242 Primitive::Type GetParamPrimitiveType() const { 243 return cur_type_; 244 } 245 246 byte* GetParamAddress() const { 247 if (!kQuickSoftFloatAbi) { 248 Primitive::Type type = GetParamPrimitiveType(); 249 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 250 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 251 return fpr_args_ + (fpr_index_ * kBytesPerFprSpillLocation); 252 } 253 } 254 } 255 if (gpr_index_ < kNumQuickGprArgs) { 256 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 257 } 258 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 259 } 260 261 bool IsSplitLongOrDouble() const { 262 if ((kBytesPerGprSpillLocation == 4) || (kBytesPerFprSpillLocation == 4)) { 263 return is_split_long_or_double_; 264 } else { 265 return false; // An optimization for when GPR and FPRs are 64bit. 266 } 267 } 268 269 bool IsParamAReference() const { 270 return GetParamPrimitiveType() == Primitive::kPrimNot; 271 } 272 273 bool IsParamALongOrDouble() const { 274 Primitive::Type type = GetParamPrimitiveType(); 275 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 276 } 277 278 uint64_t ReadSplitLongParam() const { 279 DCHECK(IsSplitLongOrDouble()); 280 uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress()); 281 uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_); 282 return (low_half & 0xffffffffULL) | (high_half << 32); 283 } 284 285 void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 286 gpr_index_ = 0; 287 fpr_index_ = 0; 288 stack_index_ = 0; 289 if (!is_static_) { // Handle this. 290 cur_type_ = Primitive::kPrimNot; 291 is_split_long_or_double_ = false; 292 Visit(); 293 if (kNumQuickGprArgs > 0) { 294 gpr_index_++; 295 } else { 296 stack_index_++; 297 } 298 } 299 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 300 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 301 switch (cur_type_) { 302 case Primitive::kPrimNot: 303 case Primitive::kPrimBoolean: 304 case Primitive::kPrimByte: 305 case Primitive::kPrimChar: 306 case Primitive::kPrimShort: 307 case Primitive::kPrimInt: 308 is_split_long_or_double_ = false; 309 Visit(); 310 if (gpr_index_ < kNumQuickGprArgs) { 311 gpr_index_++; 312 } else { 313 stack_index_++; 314 } 315 break; 316 case Primitive::kPrimFloat: 317 is_split_long_or_double_ = false; 318 Visit(); 319 if (kQuickSoftFloatAbi) { 320 if (gpr_index_ < kNumQuickGprArgs) { 321 gpr_index_++; 322 } else { 323 stack_index_++; 324 } 325 } else { 326 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 327 fpr_index_++; 328 } else { 329 stack_index_++; 330 } 331 } 332 break; 333 case Primitive::kPrimDouble: 334 case Primitive::kPrimLong: 335 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 336 is_split_long_or_double_ = (kBytesPerGprSpillLocation == 4) && 337 ((gpr_index_ + 1) == kNumQuickGprArgs); 338 Visit(); 339 if (gpr_index_ < kNumQuickGprArgs) { 340 gpr_index_++; 341 if (kBytesPerGprSpillLocation == 4) { 342 if (gpr_index_ < kNumQuickGprArgs) { 343 gpr_index_++; 344 } else { 345 stack_index_++; 346 } 347 } 348 } else { 349 if (kBytesStackArgLocation == 4) { 350 stack_index_+= 2; 351 } else { 352 CHECK_EQ(kBytesStackArgLocation, 8U); 353 stack_index_++; 354 } 355 } 356 } else { 357 is_split_long_or_double_ = (kBytesPerFprSpillLocation == 4) && 358 ((fpr_index_ + 1) == kNumQuickFprArgs); 359 Visit(); 360 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 361 fpr_index_++; 362 if (kBytesPerFprSpillLocation == 4) { 363 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) { 364 fpr_index_++; 365 } else { 366 stack_index_++; 367 } 368 } 369 } else { 370 if (kBytesStackArgLocation == 4) { 371 stack_index_+= 2; 372 } else { 373 CHECK_EQ(kBytesStackArgLocation, 8U); 374 stack_index_++; 375 } 376 } 377 } 378 break; 379 default: 380 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 381 } 382 } 383 } 384 385 private: 386 static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty, 387 uint32_t shorty_len) { 388 if (kQuickSoftFloatAbi) { 389 CHECK_EQ(kNumQuickFprArgs, 0U); 390 return (kNumQuickGprArgs * kBytesPerGprSpillLocation) + kBytesPerGprSpillLocation /* ArtMethod* */; 391 } else { 392 size_t offset = kBytesPerGprSpillLocation; // Skip Method*. 393 size_t gprs_seen = 0; 394 size_t fprs_seen = 0; 395 if (!is_static && (gprs_seen < kNumQuickGprArgs)) { 396 gprs_seen++; 397 offset += kBytesStackArgLocation; 398 } 399 for (uint32_t i = 1; i < shorty_len; ++i) { 400 switch (shorty[i]) { 401 case 'Z': 402 case 'B': 403 case 'C': 404 case 'S': 405 case 'I': 406 case 'L': 407 if (gprs_seen < kNumQuickGprArgs) { 408 gprs_seen++; 409 offset += kBytesStackArgLocation; 410 } 411 break; 412 case 'J': 413 if (gprs_seen < kNumQuickGprArgs) { 414 gprs_seen++; 415 offset += 2 * kBytesStackArgLocation; 416 if (kBytesPerGprSpillLocation == 4) { 417 if (gprs_seen < kNumQuickGprArgs) { 418 gprs_seen++; 419 } 420 } 421 } 422 break; 423 case 'F': 424 if ((kNumQuickFprArgs != 0) && (fprs_seen + 1 < kNumQuickFprArgs + 1)) { 425 fprs_seen++; 426 offset += kBytesStackArgLocation; 427 } 428 break; 429 case 'D': 430 if ((kNumQuickFprArgs != 0) && (fprs_seen + 1 < kNumQuickFprArgs + 1)) { 431 fprs_seen++; 432 offset += 2 * kBytesStackArgLocation; 433 if (kBytesPerFprSpillLocation == 4) { 434 if ((kNumQuickFprArgs != 0) && (fprs_seen + 1 < kNumQuickFprArgs + 1)) { 435 fprs_seen++; 436 } 437 } 438 } 439 break; 440 default: 441 LOG(FATAL) << "Unexpected shorty character: " << shorty[i] << " in " << shorty; 442 } 443 } 444 return offset; 445 } 446 } 447 448 const bool is_static_; 449 const char* const shorty_; 450 const uint32_t shorty_len_; 451 byte* const gpr_args_; // Address of GPR arguments in callee save frame. 452 byte* const fpr_args_; // Address of FPR arguments in callee save frame. 453 byte* const stack_args_; // Address of stack arguments in caller's frame. 454 uint32_t gpr_index_; // Index into spilled GPRs. 455 uint32_t fpr_index_; // Index into spilled FPRs. 456 uint32_t stack_index_; // Index into arguments on the stack. 457 // The current type of argument during VisitArguments. 458 Primitive::Type cur_type_; 459 // Does a 64bit parameter straddle the register and stack arguments? 460 bool is_split_long_or_double_; 461}; 462 463// Visits arguments on the stack placing them into the shadow frame. 464class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 465 public: 466 BuildQuickShadowFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty, 467 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 468 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 469 470 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 471 472 private: 473 ShadowFrame* const sf_; 474 uint32_t cur_reg_; 475 476 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 477}; 478 479void BuildQuickShadowFrameVisitor::Visit() { 480 Primitive::Type type = GetParamPrimitiveType(); 481 switch (type) { 482 case Primitive::kPrimLong: // Fall-through. 483 case Primitive::kPrimDouble: 484 if (IsSplitLongOrDouble()) { 485 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 486 } else { 487 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 488 } 489 ++cur_reg_; 490 break; 491 case Primitive::kPrimNot: { 492 StackReference<mirror::Object>* stack_ref = 493 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 494 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 495 } 496 break; 497 case Primitive::kPrimBoolean: // Fall-through. 498 case Primitive::kPrimByte: // Fall-through. 499 case Primitive::kPrimChar: // Fall-through. 500 case Primitive::kPrimShort: // Fall-through. 501 case Primitive::kPrimInt: // Fall-through. 502 case Primitive::kPrimFloat: 503 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 504 break; 505 case Primitive::kPrimVoid: 506 LOG(FATAL) << "UNREACHABLE"; 507 break; 508 } 509 ++cur_reg_; 510} 511 512extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 513 mirror::ArtMethod** sp) 514 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 515 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 516 // frame. 517 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 518 519 if (method->IsAbstract()) { 520 ThrowAbstractMethodError(method); 521 return 0; 522 } else { 523 DCHECK(!method->IsNative()) << PrettyMethod(method); 524 const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame"); 525 MethodHelper mh(method); 526 const DexFile::CodeItem* code_item = mh.GetCodeItem(); 527 DCHECK(code_item != nullptr) << PrettyMethod(method); 528 uint16_t num_regs = code_item->registers_size_; 529 void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); 530 ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL, // No last shadow coming from quick. 531 method, 0, memory)); 532 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 533 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, mh.IsStatic(), mh.GetShorty(), 534 mh.GetShortyLength(), 535 shadow_frame, first_arg_reg); 536 shadow_frame_builder.VisitArguments(); 537 // Push a transition back into managed code onto the linked list in thread. 538 ManagedStack fragment; 539 self->PushManagedStackFragment(&fragment); 540 self->PushShadowFrame(shadow_frame); 541 self->EndAssertNoThreadSuspension(old_cause); 542 543 if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) { 544 // Ensure static method's class is initialized. 545 SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass()); 546 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) { 547 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method); 548 self->PopManagedStackFragment(fragment); 549 return 0; 550 } 551 } 552 553 JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame); 554 // Pop transition. 555 self->PopManagedStackFragment(fragment); 556 // No need to restore the args since the method has already been run by the interpreter. 557 return result.GetJ(); 558 } 559} 560 561// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 562// to jobjects. 563class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 564 public: 565 BuildQuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty, 566 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa, 567 std::vector<jvalue>* args) : 568 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 569 570 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 571 572 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 573 574 private: 575 ScopedObjectAccessUnchecked* const soa_; 576 std::vector<jvalue>* const args_; 577 // References which we must update when exiting in case the GC moved the objects. 578 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 579 580 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 581}; 582 583void BuildQuickArgumentVisitor::Visit() { 584 jvalue val; 585 Primitive::Type type = GetParamPrimitiveType(); 586 switch (type) { 587 case Primitive::kPrimNot: { 588 StackReference<mirror::Object>* stack_ref = 589 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 590 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 591 references_.push_back(std::make_pair(val.l, stack_ref)); 592 break; 593 } 594 case Primitive::kPrimLong: // Fall-through. 595 case Primitive::kPrimDouble: 596 if (IsSplitLongOrDouble()) { 597 val.j = ReadSplitLongParam(); 598 } else { 599 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 600 } 601 break; 602 case Primitive::kPrimBoolean: // Fall-through. 603 case Primitive::kPrimByte: // Fall-through. 604 case Primitive::kPrimChar: // Fall-through. 605 case Primitive::kPrimShort: // Fall-through. 606 case Primitive::kPrimInt: // Fall-through. 607 case Primitive::kPrimFloat: 608 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 609 break; 610 case Primitive::kPrimVoid: 611 LOG(FATAL) << "UNREACHABLE"; 612 val.j = 0; 613 break; 614 } 615 args_->push_back(val); 616} 617 618void BuildQuickArgumentVisitor::FixupReferences() { 619 // Fixup any references which may have changed. 620 for (const auto& pair : references_) { 621 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 622 soa_->Env()->DeleteLocalRef(pair.first); 623 } 624} 625 626// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 627// which is responsible for recording callee save registers. We explicitly place into jobjects the 628// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 629// field within the proxy object, which will box the primitive arguments and deal with error cases. 630extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, 631 mirror::Object* receiver, 632 Thread* self, mirror::ArtMethod** sp) 633 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 634 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 635 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 636 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 637 const char* old_cause = 638 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 639 // Register the top of the managed stack, making stack crawlable. 640 DCHECK_EQ(*sp, proxy_method) << PrettyMethod(proxy_method); 641 self->SetTopOfStack(sp, 0); 642 DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), 643 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()) 644 << PrettyMethod(proxy_method); 645 self->VerifyStack(); 646 // Start new JNI local reference state. 647 JNIEnvExt* env = self->GetJniEnv(); 648 ScopedObjectAccessUnchecked soa(env); 649 ScopedJniEnvLocalRefState env_state(env); 650 // Create local ref. copies of proxy method and the receiver. 651 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 652 653 // Placing arguments into args vector and remove the receiver. 654 MethodHelper proxy_mh(proxy_method); 655 DCHECK(!proxy_mh.IsStatic()) << PrettyMethod(proxy_method); 656 std::vector<jvalue> args; 657 BuildQuickArgumentVisitor local_ref_visitor(sp, proxy_mh.IsStatic(), proxy_mh.GetShorty(), 658 proxy_mh.GetShortyLength(), &soa, &args); 659 660 local_ref_visitor.VisitArguments(); 661 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 662 args.erase(args.begin()); 663 664 // Convert proxy method into expected interface method. 665 mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod(); 666 DCHECK(interface_method != NULL) << PrettyMethod(proxy_method); 667 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 668 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method); 669 670 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 671 // that performs allocations. 672 self->EndAssertNoThreadSuspension(old_cause); 673 JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), 674 rcvr_jobj, interface_method_jobj, args); 675 // Restore references which might have moved. 676 local_ref_visitor.FixupReferences(); 677 return result.GetJ(); 678} 679 680// Read object references held in arguments from quick frames and place in a JNI local references, 681// so they don't get garbage collected. 682class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 683 public: 684 RememberForGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty, 685 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 686 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 687 688 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 689 690 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 691 692 private: 693 ScopedObjectAccessUnchecked* const soa_; 694 // References which we must update when exiting in case the GC moved the objects. 695 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 696 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 697}; 698 699void RememberForGcArgumentVisitor::Visit() { 700 if (IsParamAReference()) { 701 StackReference<mirror::Object>* stack_ref = 702 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 703 jobject reference = 704 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 705 references_.push_back(std::make_pair(reference, stack_ref)); 706 } 707} 708 709void RememberForGcArgumentVisitor::FixupReferences() { 710 // Fixup any references which may have changed. 711 for (const auto& pair : references_) { 712 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 713 soa_->Env()->DeleteLocalRef(pair.first); 714 } 715} 716 717 718// Lazily resolve a method for quick. Called by stub code. 719extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, 720 mirror::Object* receiver, 721 Thread* self, mirror::ArtMethod** sp) 722 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 723 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 724 // Start new JNI local reference state 725 JNIEnvExt* env = self->GetJniEnv(); 726 ScopedObjectAccessUnchecked soa(env); 727 ScopedJniEnvLocalRefState env_state(env); 728 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 729 730 // Compute details about the called method (avoid GCs) 731 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 732 mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 733 InvokeType invoke_type; 734 const DexFile* dex_file; 735 uint32_t dex_method_idx; 736 if (called->IsRuntimeMethod()) { 737 uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp)); 738 const DexFile::CodeItem* code; 739 { 740 MethodHelper mh(caller); 741 dex_file = &mh.GetDexFile(); 742 code = mh.GetCodeItem(); 743 } 744 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 745 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 746 Instruction::Code instr_code = instr->Opcode(); 747 bool is_range; 748 switch (instr_code) { 749 case Instruction::INVOKE_DIRECT: 750 invoke_type = kDirect; 751 is_range = false; 752 break; 753 case Instruction::INVOKE_DIRECT_RANGE: 754 invoke_type = kDirect; 755 is_range = true; 756 break; 757 case Instruction::INVOKE_STATIC: 758 invoke_type = kStatic; 759 is_range = false; 760 break; 761 case Instruction::INVOKE_STATIC_RANGE: 762 invoke_type = kStatic; 763 is_range = true; 764 break; 765 case Instruction::INVOKE_SUPER: 766 invoke_type = kSuper; 767 is_range = false; 768 break; 769 case Instruction::INVOKE_SUPER_RANGE: 770 invoke_type = kSuper; 771 is_range = true; 772 break; 773 case Instruction::INVOKE_VIRTUAL: 774 invoke_type = kVirtual; 775 is_range = false; 776 break; 777 case Instruction::INVOKE_VIRTUAL_RANGE: 778 invoke_type = kVirtual; 779 is_range = true; 780 break; 781 case Instruction::INVOKE_INTERFACE: 782 invoke_type = kInterface; 783 is_range = false; 784 break; 785 case Instruction::INVOKE_INTERFACE_RANGE: 786 invoke_type = kInterface; 787 is_range = true; 788 break; 789 default: 790 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); 791 // Avoid used uninitialized warnings. 792 invoke_type = kDirect; 793 is_range = false; 794 } 795 dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 796 797 } else { 798 invoke_type = kStatic; 799 dex_file = &MethodHelper(called).GetDexFile(); 800 dex_method_idx = called->GetDexMethodIndex(); 801 } 802 uint32_t shorty_len; 803 const char* shorty = 804 dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len); 805 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 806 visitor.VisitArguments(); 807 self->EndAssertNoThreadSuspension(old_cause); 808 bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 809 // Resolve method filling in dex cache. 810 if (called->IsRuntimeMethod()) { 811 SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr); 812 called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); 813 receiver = sirt_receiver.get(); 814 } 815 const void* code = NULL; 816 if (LIKELY(!self->IsExceptionPending())) { 817 // Incompatible class change should have been handled in resolve method. 818 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 819 << PrettyMethod(called) << " " << invoke_type; 820 if (virtual_or_interface) { 821 // Refine called method based on receiver. 822 CHECK(receiver != nullptr) << invoke_type; 823 if (invoke_type == kVirtual) { 824 called = receiver->GetClass()->FindVirtualMethodForVirtual(called); 825 } else { 826 called = receiver->GetClass()->FindVirtualMethodForInterface(called); 827 } 828 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 829 // of the sharpened method. 830 if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) { 831 caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called); 832 } else { 833 // Calling from one dex file to another, need to compute the method index appropriate to 834 // the caller's dex file. Since we get here only if the original called was a runtime 835 // method, we've got the correct dex_file and a dex_method_idx from above. 836 DCHECK(&MethodHelper(caller).GetDexFile() == dex_file); 837 uint32_t method_index = 838 MethodHelper(called).FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx); 839 if (method_index != DexFile::kDexNoIndex) { 840 caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called); 841 } 842 } 843 } 844 // Ensure that the called method's class is initialized. 845 SirtRef<mirror::Class> called_class(soa.Self(), called->GetDeclaringClass()); 846 linker->EnsureInitialized(called_class, true, true); 847 if (LIKELY(called_class->IsInitialized())) { 848 code = called->GetEntryPointFromQuickCompiledCode(); 849 } else if (called_class->IsInitializing()) { 850 if (invoke_type == kStatic) { 851 // Class is still initializing, go to oat and grab code (trampoline must be left in place 852 // until class is initialized to stop races between threads). 853 code = linker->GetQuickOatCodeFor(called); 854 } else { 855 // No trampoline for non-static methods. 856 code = called->GetEntryPointFromQuickCompiledCode(); 857 } 858 } else { 859 DCHECK(called_class->IsErroneous()); 860 } 861 } 862 CHECK_EQ(code == NULL, self->IsExceptionPending()); 863 // Fixup any locally saved objects may have moved during a GC. 864 visitor.FixupReferences(); 865 // Place called method in callee-save frame to be placed as first argument to quick method. 866 *sp = called; 867 return code; 868} 869 870 871 872/* 873 * This class uses a couple of observations to unite the different calling conventions through 874 * a few constants. 875 * 876 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 877 * possible alignment. 878 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 879 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 880 * when we have to split things 881 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 882 * and we can use Int handling directly. 883 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 884 * necessary when widening. Also, widening of Ints will take place implicitly, and the 885 * extension should be compatible with Aarch64, which mandates copying the available bits 886 * into LSB and leaving the rest unspecified. 887 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 888 * the stack. 889 * 6) There is only little endian. 890 * 891 * 892 * Actual work is supposed to be done in a delegate of the template type. The interface is as 893 * follows: 894 * 895 * void PushGpr(uintptr_t): Add a value for the next GPR 896 * 897 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 898 * padding, that is, think the architecture is 32b and aligns 64b. 899 * 900 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 901 * split this if necessary. The current state will have aligned, if 902 * necessary. 903 * 904 * void PushStack(uintptr_t): Push a value to the stack. 905 * 906 * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. This _will_ have nullptr, 907 * as this might be important for null initialization. 908 * Must return the jobject, that is, the reference to the 909 * entry in the Sirt (nullptr if necessary). 910 * 911 */ 912template <class T> class BuildGenericJniFrameStateMachine { 913 public: 914#if defined(__arm__) 915 // TODO: These are all dummy values! 916 static constexpr bool kNativeSoftFloatAbi = true; 917 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 918 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 919 920 static constexpr size_t kRegistersNeededForLong = 2; 921 static constexpr size_t kRegistersNeededForDouble = 2; 922 static constexpr bool kMultiRegistersAligned = true; 923 static constexpr bool kMultiRegistersWidened = false; 924 static constexpr bool kAlignLongOnStack = true; 925 static constexpr bool kAlignDoubleOnStack = true; 926#elif defined(__aarch64__) 927 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 928 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 929 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 930 931 static constexpr size_t kRegistersNeededForLong = 1; 932 static constexpr size_t kRegistersNeededForDouble = 1; 933 static constexpr bool kMultiRegistersAligned = false; 934 static constexpr bool kMultiRegistersWidened = false; 935 static constexpr bool kAlignLongOnStack = false; 936 static constexpr bool kAlignDoubleOnStack = false; 937#elif defined(__mips__) 938 // TODO: These are all dummy values! 939 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 940 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 941 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 942 943 static constexpr size_t kRegistersNeededForLong = 2; 944 static constexpr size_t kRegistersNeededForDouble = 2; 945 static constexpr bool kMultiRegistersAligned = true; 946 static constexpr bool kMultiRegistersWidened = true; 947 static constexpr bool kAlignLongOnStack = false; 948 static constexpr bool kAlignDoubleOnStack = false; 949#elif defined(__i386__) 950 // TODO: Check these! 951 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 952 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 953 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 954 955 static constexpr size_t kRegistersNeededForLong = 2; 956 static constexpr size_t kRegistersNeededForDouble = 2; 957 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 958 static constexpr bool kMultiRegistersWidened = false; 959 static constexpr bool kAlignLongOnStack = false; 960 static constexpr bool kAlignDoubleOnStack = false; 961#elif defined(__x86_64__) 962 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 963 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 964 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 965 966 static constexpr size_t kRegistersNeededForLong = 1; 967 static constexpr size_t kRegistersNeededForDouble = 1; 968 static constexpr bool kMultiRegistersAligned = false; 969 static constexpr bool kMultiRegistersWidened = false; 970 static constexpr bool kAlignLongOnStack = false; 971 static constexpr bool kAlignDoubleOnStack = false; 972#else 973#error "Unsupported architecture" 974#endif 975 976 public: 977 explicit BuildGenericJniFrameStateMachine(T* delegate) : gpr_index_(kNumNativeGprArgs), 978 fpr_index_(kNumNativeFprArgs), 979 stack_entries_(0), 980 delegate_(delegate) { 981 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 982 // the next register is even; counting down is just to make the compiler happy... 983 CHECK_EQ(kNumNativeGprArgs % 2, 0U); 984 CHECK_EQ(kNumNativeFprArgs % 2, 0U); 985 } 986 987 virtual ~BuildGenericJniFrameStateMachine() {} 988 989 bool HavePointerGpr() { 990 return gpr_index_ > 0; 991 } 992 993 void AdvancePointer(void* val) { 994 if (HavePointerGpr()) { 995 gpr_index_--; 996 PushGpr(reinterpret_cast<uintptr_t>(val)); 997 } else { 998 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 999 PushStack(reinterpret_cast<uintptr_t>(val)); 1000 gpr_index_ = 0; 1001 } 1002 } 1003 1004 1005 bool HaveSirtGpr() { 1006 return gpr_index_ > 0; 1007 } 1008 1009 void AdvanceSirt(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1010 uintptr_t sirtRef = PushSirt(ptr); 1011 if (HaveSirtGpr()) { 1012 gpr_index_--; 1013 PushGpr(sirtRef); 1014 } else { 1015 stack_entries_++; 1016 PushStack(sirtRef); 1017 gpr_index_ = 0; 1018 } 1019 } 1020 1021 1022 bool HaveIntGpr() { 1023 return gpr_index_ > 0; 1024 } 1025 1026 void AdvanceInt(uint32_t val) { 1027 if (HaveIntGpr()) { 1028 gpr_index_--; 1029 PushGpr(val); 1030 } else { 1031 stack_entries_++; 1032 PushStack(val); 1033 gpr_index_ = 0; 1034 } 1035 } 1036 1037 1038 bool HaveLongGpr() { 1039 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1040 } 1041 1042 bool LongGprNeedsPadding() { 1043 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1044 kAlignLongOnStack && // and when it needs alignment 1045 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1046 } 1047 1048 bool LongStackNeedsPadding() { 1049 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1050 kAlignLongOnStack && // and when it needs 8B alignment 1051 (stack_entries_ & 1) == 1; // counter is odd 1052 } 1053 1054 void AdvanceLong(uint64_t val) { 1055 if (HaveLongGpr()) { 1056 if (LongGprNeedsPadding()) { 1057 PushGpr(0); 1058 gpr_index_--; 1059 } 1060 if (kRegistersNeededForLong == 1) { 1061 PushGpr(static_cast<uintptr_t>(val)); 1062 } else { 1063 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1064 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1065 } 1066 gpr_index_ -= kRegistersNeededForLong; 1067 } else { 1068 if (LongStackNeedsPadding()) { 1069 PushStack(0); 1070 stack_entries_++; 1071 } 1072 if (kRegistersNeededForLong == 1) { 1073 PushStack(static_cast<uintptr_t>(val)); 1074 stack_entries_++; 1075 } else { 1076 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1077 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1078 stack_entries_ += 2; 1079 } 1080 gpr_index_ = 0; 1081 } 1082 } 1083 1084 1085 bool HaveFloatFpr() { 1086 return fpr_index_ > 0; 1087 } 1088 1089 template <typename U, typename V> V convert(U in) { 1090 CHECK_LE(sizeof(U), sizeof(V)); 1091 union { U u; V v; } tmp; 1092 tmp.u = in; 1093 return tmp.v; 1094 } 1095 1096 void AdvanceFloat(float val) { 1097 if (kNativeSoftFloatAbi) { 1098 AdvanceInt(convert<float, uint32_t>(val)); 1099 } else { 1100 if (HaveFloatFpr()) { 1101 fpr_index_--; 1102 if (kRegistersNeededForDouble == 1) { 1103 if (kMultiRegistersWidened) { 1104 PushFpr8(convert<double, uint64_t>(val)); 1105 } else { 1106 // No widening, just use the bits. 1107 PushFpr8(convert<float, uint64_t>(val)); 1108 } 1109 } else { 1110 PushFpr4(val); 1111 } 1112 } else { 1113 stack_entries_++; 1114 if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) { 1115 // Need to widen before storing: Note the "double" in the template instantiation. 1116 PushStack(convert<double, uintptr_t>(val)); 1117 } else { 1118 PushStack(convert<float, uintptr_t>(val)); 1119 } 1120 fpr_index_ = 0; 1121 } 1122 } 1123 } 1124 1125 1126 bool HaveDoubleFpr() { 1127 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1128 } 1129 1130 bool DoubleFprNeedsPadding() { 1131 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1132 kAlignDoubleOnStack && // and when it needs alignment 1133 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1134 } 1135 1136 bool DoubleStackNeedsPadding() { 1137 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1138 kAlignDoubleOnStack && // and when it needs 8B alignment 1139 (stack_entries_ & 1) == 1; // counter is odd 1140 } 1141 1142 void AdvanceDouble(uint64_t val) { 1143 if (kNativeSoftFloatAbi) { 1144 AdvanceLong(val); 1145 } else { 1146 if (HaveDoubleFpr()) { 1147 if (DoubleFprNeedsPadding()) { 1148 PushFpr4(0); 1149 fpr_index_--; 1150 } 1151 PushFpr8(val); 1152 fpr_index_ -= kRegistersNeededForDouble; 1153 } else { 1154 if (DoubleStackNeedsPadding()) { 1155 PushStack(0); 1156 stack_entries_++; 1157 } 1158 if (kRegistersNeededForDouble == 1) { 1159 PushStack(static_cast<uintptr_t>(val)); 1160 stack_entries_++; 1161 } else { 1162 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1163 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1164 stack_entries_ += 2; 1165 } 1166 fpr_index_ = 0; 1167 } 1168 } 1169 } 1170 1171 uint32_t getStackEntries() { 1172 return stack_entries_; 1173 } 1174 1175 uint32_t getNumberOfUsedGprs() { 1176 return kNumNativeGprArgs - gpr_index_; 1177 } 1178 1179 uint32_t getNumberOfUsedFprs() { 1180 return kNumNativeFprArgs - fpr_index_; 1181 } 1182 1183 private: 1184 void PushGpr(uintptr_t val) { 1185 delegate_->PushGpr(val); 1186 } 1187 void PushFpr4(float val) { 1188 delegate_->PushFpr4(val); 1189 } 1190 void PushFpr8(uint64_t val) { 1191 delegate_->PushFpr8(val); 1192 } 1193 void PushStack(uintptr_t val) { 1194 delegate_->PushStack(val); 1195 } 1196 uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1197 return delegate_->PushSirt(ref); 1198 } 1199 1200 uint32_t gpr_index_; // Number of free GPRs 1201 uint32_t fpr_index_; // Number of free FPRs 1202 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1203 // extended 1204 T* delegate_; // What Push implementation gets called 1205}; 1206 1207class ComputeGenericJniFrameSize FINAL { 1208 public: 1209 ComputeGenericJniFrameSize() : num_sirt_references_(0), num_stack_entries_(0) {} 1210 1211 uint32_t GetStackSize() { 1212 return num_stack_entries_ * sizeof(uintptr_t); 1213 } 1214 1215 // WARNING: After this, *sp won't be pointing to the method anymore! 1216 void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len, 1217 void* sp, StackIndirectReferenceTable** table, uint32_t* sirt_entries, 1218 uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr, 1219 void** code_return, size_t* overall_size) 1220 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1221 ComputeAll(is_static, shorty, shorty_len); 1222 1223 mirror::ArtMethod* method = **m; 1224 1225 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1226 1227 // First, fix up the layout of the callee-save frame. 1228 // We have to squeeze in the Sirt, and relocate the method pointer. 1229 1230 // "Free" the slot for the method. 1231 sp8 += kPointerSize; 1232 1233 // Add the Sirt. 1234 *sirt_entries = num_sirt_references_; 1235 size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(num_sirt_references_); 1236 sp8 -= sirt_size; 1237 *table = reinterpret_cast<StackIndirectReferenceTable*>(sp8); 1238 (*table)->SetNumberOfReferences(num_sirt_references_); 1239 1240 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1241 sp8 -= kPointerSize; 1242 uint8_t* method_pointer = sp8; 1243 *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method; 1244 *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer); 1245 1246 // Reference cookie and padding 1247 sp8 -= 8; 1248 // Store Sirt size 1249 *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(sirt_size & 0xFFFFFFFF); 1250 1251 // Next comes the native call stack. 1252 sp8 -= GetStackSize(); 1253 // Now align the call stack below. This aligns by 16, as AArch64 seems to require. 1254 uintptr_t mask = ~0x0F; 1255 sp8 = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(sp8) & mask); 1256 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1257 1258 // put fprs and gprs below 1259 // Assumption is OK right now, as we have soft-float arm 1260 size_t fregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeFprArgs; 1261 sp8 -= fregs * sizeof(uintptr_t); 1262 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1263 size_t iregs = BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize>::kNumNativeGprArgs; 1264 sp8 -= iregs * sizeof(uintptr_t); 1265 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1266 1267 // reserve space for the code pointer 1268 sp8 -= kPointerSize; 1269 *code_return = reinterpret_cast<void*>(sp8); 1270 1271 *overall_size = reinterpret_cast<uint8_t*>(sp) - sp8; 1272 1273 // The new SP is stored at the end of the alloca, so it can be immediately popped 1274 sp8 = reinterpret_cast<uint8_t*>(sp) - 5 * KB; 1275 *(reinterpret_cast<uint8_t**>(sp8)) = method_pointer; 1276 } 1277 1278 void ComputeSirtOffset() { } // nothing to do, static right now 1279 1280 void ComputeAll(bool is_static, const char* shorty, uint32_t shorty_len) 1281 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1282 BuildGenericJniFrameStateMachine<ComputeGenericJniFrameSize> sm(this); 1283 1284 // JNIEnv 1285 sm.AdvancePointer(nullptr); 1286 1287 // Class object or this as first argument 1288 sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678)); 1289 1290 for (uint32_t i = 1; i < shorty_len; ++i) { 1291 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1292 switch (cur_type_) { 1293 case Primitive::kPrimNot: 1294 sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678)); 1295 break; 1296 1297 case Primitive::kPrimBoolean: 1298 case Primitive::kPrimByte: 1299 case Primitive::kPrimChar: 1300 case Primitive::kPrimShort: 1301 case Primitive::kPrimInt: 1302 sm.AdvanceInt(0); 1303 break; 1304 case Primitive::kPrimFloat: 1305 sm.AdvanceFloat(0); 1306 break; 1307 case Primitive::kPrimDouble: 1308 sm.AdvanceDouble(0); 1309 break; 1310 case Primitive::kPrimLong: 1311 sm.AdvanceLong(0); 1312 break; 1313 default: 1314 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1315 } 1316 } 1317 1318 num_stack_entries_ = sm.getStackEntries(); 1319 } 1320 1321 void PushGpr(uintptr_t /* val */) { 1322 // not optimizing registers, yet 1323 } 1324 1325 void PushFpr4(float /* val */) { 1326 // not optimizing registers, yet 1327 } 1328 1329 void PushFpr8(uint64_t /* val */) { 1330 // not optimizing registers, yet 1331 } 1332 1333 void PushStack(uintptr_t /* val */) { 1334 // counting is already done in the superclass 1335 } 1336 1337 uintptr_t PushSirt(mirror::Object* /* ptr */) { 1338 num_sirt_references_++; 1339 return reinterpret_cast<uintptr_t>(nullptr); 1340 } 1341 1342 private: 1343 uint32_t num_sirt_references_; 1344 uint32_t num_stack_entries_; 1345}; 1346 1347// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1348// of transitioning into native code. 1349class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1350 public: 1351 BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty, 1352 uint32_t shorty_len, Thread* self) : 1353 QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) { 1354 ComputeGenericJniFrameSize fsc; 1355 fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &sirt_, &sirt_expected_refs_, 1356 &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_, 1357 &alloca_used_size_); 1358 sirt_number_of_references_ = 0; 1359 cur_sirt_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstSirtEntry()); 1360 1361 // jni environment is always first argument 1362 sm_.AdvancePointer(self->GetJniEnv()); 1363 1364 if (is_static) { 1365 sm_.AdvanceSirt((**sp)->GetDeclaringClass()); 1366 } 1367 } 1368 1369 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 1370 1371 void FinalizeSirt(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1372 1373 jobject GetFirstSirtEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1374 return reinterpret_cast<jobject>(sirt_->GetStackReference(0)); 1375 } 1376 1377 void PushGpr(uintptr_t val) { 1378 *cur_gpr_reg_ = val; 1379 cur_gpr_reg_++; 1380 } 1381 1382 void PushFpr4(float val) { 1383 *cur_fpr_reg_ = val; 1384 cur_fpr_reg_++; 1385 } 1386 1387 void PushFpr8(uint64_t val) { 1388 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1389 *tmp = val; 1390 cur_fpr_reg_ += 2; 1391 } 1392 1393 void PushStack(uintptr_t val) { 1394 *cur_stack_arg_ = val; 1395 cur_stack_arg_++; 1396 } 1397 1398 uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1399 uintptr_t tmp; 1400 if (ref == nullptr) { 1401 *cur_sirt_entry_ = StackReference<mirror::Object>(); 1402 tmp = reinterpret_cast<uintptr_t>(nullptr); 1403 } else { 1404 *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref); 1405 tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_); 1406 } 1407 cur_sirt_entry_++; 1408 sirt_number_of_references_++; 1409 return tmp; 1410 } 1411 1412 // Size of the part of the alloca that we actually need. 1413 size_t GetAllocaUsedSize() { 1414 return alloca_used_size_; 1415 } 1416 1417 void* GetCodeReturn() { 1418 return code_return_; 1419 } 1420 1421 private: 1422 uint32_t sirt_number_of_references_; 1423 StackReference<mirror::Object>* cur_sirt_entry_; 1424 StackIndirectReferenceTable* sirt_; 1425 uint32_t sirt_expected_refs_; 1426 uintptr_t* cur_gpr_reg_; 1427 uint32_t* cur_fpr_reg_; 1428 uintptr_t* cur_stack_arg_; 1429 // StackReference<mirror::Object>* top_of_sirt_; 1430 void* code_return_; 1431 size_t alloca_used_size_; 1432 1433 BuildGenericJniFrameStateMachine<BuildGenericJniFrameVisitor> sm_; 1434 1435 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1436}; 1437 1438void BuildGenericJniFrameVisitor::Visit() { 1439 Primitive::Type type = GetParamPrimitiveType(); 1440 switch (type) { 1441 case Primitive::kPrimLong: { 1442 jlong long_arg; 1443 if (IsSplitLongOrDouble()) { 1444 long_arg = ReadSplitLongParam(); 1445 } else { 1446 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1447 } 1448 sm_.AdvanceLong(long_arg); 1449 break; 1450 } 1451 case Primitive::kPrimDouble: { 1452 uint64_t double_arg; 1453 if (IsSplitLongOrDouble()) { 1454 // Read into union so that we don't case to a double. 1455 double_arg = ReadSplitLongParam(); 1456 } else { 1457 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1458 } 1459 sm_.AdvanceDouble(double_arg); 1460 break; 1461 } 1462 case Primitive::kPrimNot: { 1463 StackReference<mirror::Object>* stack_ref = 1464 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1465 sm_.AdvanceSirt(stack_ref->AsMirrorPtr()); 1466 break; 1467 } 1468 case Primitive::kPrimFloat: 1469 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1470 break; 1471 case Primitive::kPrimBoolean: // Fall-through. 1472 case Primitive::kPrimByte: // Fall-through. 1473 case Primitive::kPrimChar: // Fall-through. 1474 case Primitive::kPrimShort: // Fall-through. 1475 case Primitive::kPrimInt: // Fall-through. 1476 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1477 break; 1478 case Primitive::kPrimVoid: 1479 LOG(FATAL) << "UNREACHABLE"; 1480 break; 1481 } 1482} 1483 1484void BuildGenericJniFrameVisitor::FinalizeSirt(Thread* self) { 1485 // Initialize padding entries. 1486 while (sirt_number_of_references_ < sirt_expected_refs_) { 1487 *cur_sirt_entry_ = StackReference<mirror::Object>(); 1488 cur_sirt_entry_++; 1489 sirt_number_of_references_++; 1490 } 1491 sirt_->SetNumberOfReferences(sirt_expected_refs_); 1492 DCHECK_NE(sirt_expected_refs_, 0U); 1493 // Install Sirt. 1494 self->PushSirt(sirt_); 1495} 1496 1497extern "C" void* artFindNativeMethod(); 1498 1499uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { 1500 if (lock != nullptr) { 1501 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1502 } else { 1503 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1504 } 1505} 1506 1507void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { 1508 if (lock != nullptr) { 1509 JniMethodEndSynchronized(cookie, lock, self); 1510 } else { 1511 JniMethodEnd(cookie, self); 1512 } 1513} 1514 1515/* 1516 * Initializes an alloca region assumed to be directly below sp for a native call: 1517 * Create a Sirt and call stack and fill a mini stack with values to be pushed to registers. 1518 * The final element on the stack is a pointer to the native code. 1519 * 1520 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1521 * We need to fix this, as the Sirt needs to go into the callee-save frame. 1522 * 1523 * The return of this function denotes: 1524 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1525 * 2) An error, if the value is negative. 1526 */ 1527extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp) 1528 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1529 mirror::ArtMethod* called = *sp; 1530 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1531 1532 // run the visitor 1533 MethodHelper mh(called); 1534 1535 BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(), 1536 self); 1537 visitor.VisitArguments(); 1538 visitor.FinalizeSirt(self); 1539 1540 // fix up managed-stack things in Thread 1541 self->SetTopOfStack(sp, 0); 1542 1543 self->VerifyStack(); 1544 1545 // Start JNI, save the cookie. 1546 uint32_t cookie; 1547 if (called->IsSynchronized()) { 1548 cookie = JniMethodStartSynchronized(visitor.GetFirstSirtEntry(), self); 1549 if (self->IsExceptionPending()) { 1550 self->PopSirt(); 1551 // A negative value denotes an error. 1552 return -1; 1553 } 1554 } else { 1555 cookie = JniMethodStart(self); 1556 } 1557 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1558 *(sp32 - 1) = cookie; 1559 1560 // Retrieve the stored native code. 1561 const void* nativeCode = called->GetNativeMethod(); 1562 1563 // There are two cases for the content of nativeCode: 1564 // 1) Pointer to the native function. 1565 // 2) Pointer to the trampoline for native code binding. 1566 // In the second case, we need to execute the binding and continue with the actual native function 1567 // pointer. 1568 DCHECK(nativeCode != nullptr); 1569 if (nativeCode == GetJniDlsymLookupStub()) { 1570 nativeCode = artFindNativeMethod(); 1571 1572 if (nativeCode == nullptr) { 1573 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 1574 1575 // End JNI, as the assembly will move to deliver the exception. 1576 jobject lock = called->IsSynchronized() ? visitor.GetFirstSirtEntry() : nullptr; 1577 if (mh.GetShorty()[0] == 'L') { 1578 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 1579 } else { 1580 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1581 } 1582 1583 return -1; 1584 } 1585 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 1586 } 1587 1588 // Store the native code pointer in the stack at the right location. 1589 uintptr_t* code_pointer = reinterpret_cast<uintptr_t*>(visitor.GetCodeReturn()); 1590 *code_pointer = reinterpret_cast<uintptr_t>(nativeCode); 1591 1592 // 5K reserved, window_size + frame pointer used. 1593 size_t window_size = visitor.GetAllocaUsedSize(); 1594 return (5 * KB) - window_size - kPointerSize; 1595} 1596 1597/* 1598 * Is called after the native JNI code. Responsible for cleanup (SIRT, saved state) and 1599 * unlocking. 1600 */ 1601extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp, 1602 jvalue result, uint64_t result_f) 1603 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1604 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1605 mirror::ArtMethod* called = *sp; 1606 uint32_t cookie = *(sp32 - 1); 1607 1608 jobject lock = nullptr; 1609 if (called->IsSynchronized()) { 1610 StackIndirectReferenceTable* table = 1611 reinterpret_cast<StackIndirectReferenceTable*>( 1612 reinterpret_cast<uint8_t*>(sp) + kPointerSize); 1613 lock = reinterpret_cast<jobject>(table->GetStackReference(0)); 1614 } 1615 1616 MethodHelper mh(called); 1617 char return_shorty_char = mh.GetShorty()[0]; 1618 1619 if (return_shorty_char == 'L') { 1620 return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock); 1621 } else { 1622 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1623 1624 switch (return_shorty_char) { 1625 case 'F': // Fall-through. 1626 case 'D': 1627 return result_f; 1628 case 'Z': 1629 return result.z; 1630 case 'B': 1631 return result.b; 1632 case 'C': 1633 return result.c; 1634 case 'S': 1635 return result.s; 1636 case 'I': 1637 return result.i; 1638 case 'J': 1639 return result.j; 1640 case 'V': 1641 return 0; 1642 default: 1643 LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char; 1644 return 0; 1645 } 1646 } 1647} 1648 1649template<InvokeType type, bool access_check> 1650static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1651 mirror::ArtMethod* caller_method, 1652 Thread* self, mirror::ArtMethod** sp); 1653 1654template<InvokeType type, bool access_check> 1655static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1656 mirror::ArtMethod* caller_method, 1657 Thread* self, mirror::ArtMethod** sp) { 1658 mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, 1659 type); 1660 if (UNLIKELY(method == nullptr)) { 1661 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1662 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1663 uint32_t shorty_len; 1664 const char* shorty = 1665 dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 1666 { 1667 // Remember the args in case a GC happens in FindMethodFromCode. 1668 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1669 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 1670 visitor.VisitArguments(); 1671 method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, self); 1672 visitor.FixupReferences(); 1673 } 1674 1675 if (UNLIKELY(method == NULL)) { 1676 CHECK(self->IsExceptionPending()); 1677 return 0; // failure 1678 } 1679 } 1680 DCHECK(!self->IsExceptionPending()); 1681 const void* code = method->GetEntryPointFromQuickCompiledCode(); 1682 1683 // When we return, the caller will branch to this address, so it had better not be 0! 1684 DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: " 1685 << MethodHelper(method).GetDexFile().GetLocation(); 1686#ifdef __LP64__ 1687 UNIMPLEMENTED(FATAL); 1688 return 0; 1689#else 1690 uint32_t method_uint = reinterpret_cast<uint32_t>(method); 1691 uint64_t code_uint = reinterpret_cast<uint32_t>(code); 1692 uint64_t result = ((code_uint << 32) | method_uint); 1693 return result; 1694#endif 1695} 1696 1697// Explicit artInvokeCommon template function declarations to please analysis tool. 1698#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 1699 template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ 1700 uint64_t artInvokeCommon<type, access_check>(uint32_t method_idx, \ 1701 mirror::Object* this_object, \ 1702 mirror::ArtMethod* caller_method, \ 1703 Thread* self, mirror::ArtMethod** sp) \ 1704 1705EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 1706EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 1707EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 1708EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 1709EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 1710EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 1711EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 1712EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 1713EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 1714EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 1715#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 1716 1717 1718// See comments in runtime_support_asm.S 1719extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx, 1720 mirror::Object* this_object, 1721 mirror::ArtMethod* caller_method, 1722 Thread* self, 1723 mirror::ArtMethod** sp) 1724 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1725 return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp); 1726} 1727 1728 1729extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx, 1730 mirror::Object* this_object, 1731 mirror::ArtMethod* caller_method, 1732 Thread* self, 1733 mirror::ArtMethod** sp) 1734 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1735 return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp); 1736} 1737 1738extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx, 1739 mirror::Object* this_object, 1740 mirror::ArtMethod* caller_method, 1741 Thread* self, 1742 mirror::ArtMethod** sp) 1743 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1744 return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp); 1745} 1746 1747extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx, 1748 mirror::Object* this_object, 1749 mirror::ArtMethod* caller_method, 1750 Thread* self, 1751 mirror::ArtMethod** sp) 1752 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1753 return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp); 1754} 1755 1756extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx, 1757 mirror::Object* this_object, 1758 mirror::ArtMethod* caller_method, 1759 Thread* self, 1760 mirror::ArtMethod** sp) 1761 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1762 return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp); 1763} 1764 1765// Determine target of interface dispatch. This object is known non-null. 1766extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method, 1767 mirror::Object* this_object, 1768 mirror::ArtMethod* caller_method, 1769 Thread* self, mirror::ArtMethod** sp) 1770 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1771 mirror::ArtMethod* method; 1772 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 1773 method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method); 1774 if (UNLIKELY(method == NULL)) { 1775 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1776 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object, 1777 caller_method); 1778 return 0; // Failure. 1779 } 1780 } else { 1781 FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); 1782 DCHECK(interface_method == Runtime::Current()->GetResolutionMethod()); 1783 // Determine method index from calling dex instruction. 1784#if defined(__arm__) 1785 // On entry the stack pointed by sp is: 1786 // | argN | | 1787 // | ... | | 1788 // | arg4 | | 1789 // | arg3 spill | | Caller's frame 1790 // | arg2 spill | | 1791 // | arg1 spill | | 1792 // | Method* | --- 1793 // | LR | 1794 // | ... | callee saves 1795 // | R3 | arg3 1796 // | R2 | arg2 1797 // | R1 | arg1 1798 // | R0 | 1799 // | Method* | <- sp 1800 DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); 1801 uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize); 1802 uintptr_t caller_pc = regs[10]; 1803#elif defined(__i386__) 1804 // On entry the stack pointed by sp is: 1805 // | argN | | 1806 // | ... | | 1807 // | arg4 | | 1808 // | arg3 spill | | Caller's frame 1809 // | arg2 spill | | 1810 // | arg1 spill | | 1811 // | Method* | --- 1812 // | Return | 1813 // | EBP,ESI,EDI | callee saves 1814 // | EBX | arg3 1815 // | EDX | arg2 1816 // | ECX | arg1 1817 // | EAX/Method* | <- sp 1818 DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); 1819 uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp)); 1820 uintptr_t caller_pc = regs[7]; 1821#elif defined(__mips__) 1822 // On entry the stack pointed by sp is: 1823 // | argN | | 1824 // | ... | | 1825 // | arg4 | | 1826 // | arg3 spill | | Caller's frame 1827 // | arg2 spill | | 1828 // | arg1 spill | | 1829 // | Method* | --- 1830 // | RA | 1831 // | ... | callee saves 1832 // | A3 | arg3 1833 // | A2 | arg2 1834 // | A1 | arg1 1835 // | A0/Method* | <- sp 1836 DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); 1837 uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp)); 1838 uintptr_t caller_pc = regs[15]; 1839#else 1840 UNIMPLEMENTED(FATAL); 1841 uintptr_t caller_pc = 0; 1842#endif 1843 uint32_t dex_pc = caller_method->ToDexPc(caller_pc); 1844 const DexFile::CodeItem* code = MethodHelper(caller_method).GetCodeItem(); 1845 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 1846 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 1847 Instruction::Code instr_code = instr->Opcode(); 1848 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 1849 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 1850 << "Unexpected call into interface trampoline: " << instr->DumpString(NULL); 1851 uint32_t dex_method_idx; 1852 if (instr_code == Instruction::INVOKE_INTERFACE) { 1853 dex_method_idx = instr->VRegB_35c(); 1854 } else { 1855 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 1856 dex_method_idx = instr->VRegB_3rc(); 1857 } 1858 1859 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1860 uint32_t shorty_len; 1861 const char* shorty = 1862 dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len); 1863 { 1864 // Remember the args in case a GC happens in FindMethodFromCode. 1865 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1866 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 1867 visitor.VisitArguments(); 1868 method = FindMethodFromCode<kInterface, false>(dex_method_idx, this_object, caller_method, 1869 self); 1870 visitor.FixupReferences(); 1871 } 1872 1873 if (UNLIKELY(method == nullptr)) { 1874 CHECK(self->IsExceptionPending()); 1875 return 0; // Failure. 1876 } 1877 } 1878 const void* code = method->GetEntryPointFromQuickCompiledCode(); 1879 1880 // When we return, the caller will branch to this address, so it had better not be 0! 1881 DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: " 1882 << MethodHelper(method).GetDexFile().GetLocation(); 1883#ifdef __LP64__ 1884 UNIMPLEMENTED(FATAL); 1885 return 0; 1886#else 1887 uint32_t method_uint = reinterpret_cast<uint32_t>(method); 1888 uint64_t code_uint = reinterpret_cast<uint32_t>(code); 1889 uint64_t result = ((code_uint << 32) | method_uint); 1890 return result; 1891#endif 1892} 1893 1894} // namespace art 1895