quick_trampoline_entrypoints.cc revision 3d21bdf8894e780d349c481e5c9e29fe1556051c
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "art_method-inl.h" 18#include "callee_save_frame.h" 19#include "common_throws.h" 20#include "dex_file-inl.h" 21#include "dex_instruction-inl.h" 22#include "entrypoints/entrypoint_utils-inl.h" 23#include "entrypoints/runtime_asm_entrypoints.h" 24#include "gc/accounting/card_table-inl.h" 25#include "interpreter/interpreter.h" 26#include "method_reference.h" 27#include "mirror/class-inl.h" 28#include "mirror/dex_cache-inl.h" 29#include "mirror/method.h" 30#include "mirror/object-inl.h" 31#include "mirror/object_array-inl.h" 32#include "runtime.h" 33#include "scoped_thread_state_change.h" 34#include "debugger.h" 35 36namespace art { 37 38// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 39class QuickArgumentVisitor { 40 // Number of bytes for each out register in the caller method's frame. 41 static constexpr size_t kBytesStackArgLocation = 4; 42 // Frame size in bytes of a callee-save frame for RefsAndArgs. 43 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 44 GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); 45#if defined(__arm__) 46 // The callee save frame is pointed to by SP. 47 // | argN | | 48 // | ... | | 49 // | arg4 | | 50 // | arg3 spill | | Caller's frame 51 // | arg2 spill | | 52 // | arg1 spill | | 53 // | Method* | --- 54 // | LR | 55 // | ... | 4x6 bytes callee saves 56 // | R3 | 57 // | R2 | 58 // | R1 | 59 // | S15 | 60 // | : | 61 // | S0 | 62 // | | 4x2 bytes padding 63 // | Method* | <- sp 64 static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat; 65 static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat; 66 static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat; 67 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat; 68 static constexpr size_t kNumQuickGprArgs = 3; 69 static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16; 70 static constexpr bool kGprFprLockstep = false; 71 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 72 arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg. 73 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 74 arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg. 75 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 76 arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address. 77 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 78 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 79 } 80#elif defined(__aarch64__) 81 // The callee save frame is pointed to by SP. 82 // | argN | | 83 // | ... | | 84 // | arg4 | | 85 // | arg3 spill | | Caller's frame 86 // | arg2 spill | | 87 // | arg1 spill | | 88 // | Method* | --- 89 // | LR | 90 // | X29 | 91 // | : | 92 // | X19 | 93 // | X7 | 94 // | : | 95 // | X1 | 96 // | D7 | 97 // | : | 98 // | D0 | 99 // | | padding 100 // | Method* | <- sp 101 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 102 static constexpr bool kAlignPairRegister = false; 103 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 104 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 105 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 106 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 107 static constexpr bool kGprFprLockstep = false; 108 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 109 arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg. 110 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 111 arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg. 112 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 113 arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address. 114 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 115 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 116 } 117#elif defined(__mips__) && !defined(__LP64__) 118 // The callee save frame is pointed to by SP. 119 // | argN | | 120 // | ... | | 121 // | arg4 | | 122 // | arg3 spill | | Caller's frame 123 // | arg2 spill | | 124 // | arg1 spill | | 125 // | Method* | --- 126 // | RA | 127 // | ... | callee saves 128 // | A3 | arg3 129 // | A2 | arg2 130 // | A1 | arg1 131 // | A0/Method* | <- sp 132 static constexpr bool kSplitPairAcrossRegisterAndStack = true; 133 static constexpr bool kAlignPairRegister = false; 134 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 135 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 136 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 137 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 138 static constexpr bool kGprFprLockstep = false; 139 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 140 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 16; // Offset of first GPR arg. 141 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60; // Offset of return address. 142 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 143 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 144 } 145#elif defined(__mips__) && defined(__LP64__) 146 // The callee save frame is pointed to by SP. 147 // | argN | | 148 // | ... | | 149 // | arg4 | | 150 // | arg3 spill | | Caller's frame 151 // | arg2 spill | | 152 // | arg1 spill | | 153 // | Method* | --- 154 // | RA | 155 // | ... | callee saves 156 // | F7 | f_arg7 157 // | F6 | f_arg6 158 // | F5 | f_arg5 159 // | F4 | f_arg4 160 // | F3 | f_arg3 161 // | F2 | f_arg2 162 // | F1 | f_arg1 163 // | F0 | f_arg0 164 // | A7 | arg7 165 // | A6 | arg6 166 // | A5 | arg5 167 // | A4 | arg4 168 // | A3 | arg3 169 // | A2 | arg2 170 // | A1 | arg1 171 // | | padding 172 // | A0/Method* | <- sp 173 // NOTE: for Mip64, when A0 is skipped, F0 is also skipped. 174 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 175 static constexpr bool kAlignPairRegister = false; 176 static constexpr bool kQuickSoftFloatAbi = false; 177 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 178 // These values are set to zeros because GPR and FPR register 179 // assignments for Mips64 are interleaved, which the current VisitArguments() 180 // function does not support. 181 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 182 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 183 static constexpr bool kGprFprLockstep = true; 184 185 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F1). 186 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 187 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 188 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 189 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 190 } 191#elif defined(__i386__) 192 // The callee save frame is pointed to by SP. 193 // | argN | | 194 // | ... | | 195 // | arg4 | | 196 // | arg3 spill | | Caller's frame 197 // | arg2 spill | | 198 // | arg1 spill | | 199 // | Method* | --- 200 // | Return | 201 // | EBP,ESI,EDI | callee saves 202 // | EBX | arg3 203 // | EDX | arg2 204 // | ECX | arg1 205 // | XMM3 | float arg 4 206 // | XMM2 | float arg 3 207 // | XMM1 | float arg 2 208 // | XMM0 | float arg 1 209 // | EAX/Method* | <- sp 210 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 211 static constexpr bool kAlignPairRegister = false; 212 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 213 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 214 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 215 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 216 static constexpr bool kGprFprLockstep = false; 217 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 218 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 219 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 220 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 221 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 222 } 223#elif defined(__x86_64__) 224 // The callee save frame is pointed to by SP. 225 // | argN | | 226 // | ... | | 227 // | reg. arg spills | | Caller's frame 228 // | Method* | --- 229 // | Return | 230 // | R15 | callee save 231 // | R14 | callee save 232 // | R13 | callee save 233 // | R12 | callee save 234 // | R9 | arg5 235 // | R8 | arg4 236 // | RSI/R6 | arg1 237 // | RBP/R5 | callee save 238 // | RBX/R3 | callee save 239 // | RDX/R2 | arg2 240 // | RCX/R1 | arg3 241 // | XMM7 | float arg 8 242 // | XMM6 | float arg 7 243 // | XMM5 | float arg 6 244 // | XMM4 | float arg 5 245 // | XMM3 | float arg 4 246 // | XMM2 | float arg 3 247 // | XMM1 | float arg 2 248 // | XMM0 | float arg 1 249 // | Padding | 250 // | RDI/Method* | <- sp 251 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 252 static constexpr bool kAlignPairRegister = false; 253 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 254 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 255 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 256 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 257 static constexpr bool kGprFprLockstep = false; 258 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 259 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 260 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 261 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 262 switch (gpr_index) { 263 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 264 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 265 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 266 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 267 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 268 default: 269 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 270 return 0; 271 } 272 } 273#else 274#error "Unsupported architecture" 275#endif 276 277 public: 278 // Special handling for proxy methods. Proxy methods are instance methods so the 279 // 'this' object is the 1st argument. They also have the same frame layout as the 280 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 281 // 1st GPR. 282 static mirror::Object* GetProxyThisObject(ArtMethod** sp) 283 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 284 CHECK((*sp)->IsProxyMethod()); 285 CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes()); 286 CHECK_GT(kNumQuickGprArgs, 0u); 287 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 288 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 289 GprIndexToGprOffset(kThisGprIndex); 290 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 291 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); 292 } 293 294 static ArtMethod* GetCallingMethod(ArtMethod** sp) 295 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 296 DCHECK((*sp)->IsCalleeSaveMethod()); 297 uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + 298 kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 299 return *reinterpret_cast<ArtMethod**>(previous_sp); 300 } 301 302 // For the given quick ref and args quick frame, return the caller's PC. 303 static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 304 DCHECK((*sp)->IsCalleeSaveMethod()); 305 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 306 return *reinterpret_cast<uintptr_t*>(lr); 307 } 308 309 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 310 uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : 311 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 312 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 313 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 314 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 315 + sizeof(ArtMethod*)), // Skip ArtMethod*. 316 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 317 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 318 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 319 "Number of Quick FPR arguments unexpected"); 320 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 321 "Double alignment unexpected"); 322 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 323 // next register is even. 324 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 325 "Number of Quick FPR arguments not even"); 326 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); 327 } 328 329 virtual ~QuickArgumentVisitor() {} 330 331 virtual void Visit() = 0; 332 333 Primitive::Type GetParamPrimitiveType() const { 334 return cur_type_; 335 } 336 337 uint8_t* GetParamAddress() const { 338 if (!kQuickSoftFloatAbi) { 339 Primitive::Type type = GetParamPrimitiveType(); 340 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 341 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 342 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 343 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 344 } 345 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 346 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 347 } 348 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 349 } 350 } 351 if (gpr_index_ < kNumQuickGprArgs) { 352 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 353 } 354 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 355 } 356 357 bool IsSplitLongOrDouble() const { 358 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 359 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 360 return is_split_long_or_double_; 361 } else { 362 return false; // An optimization for when GPR and FPRs are 64bit. 363 } 364 } 365 366 bool IsParamAReference() const { 367 return GetParamPrimitiveType() == Primitive::kPrimNot; 368 } 369 370 bool IsParamALongOrDouble() const { 371 Primitive::Type type = GetParamPrimitiveType(); 372 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 373 } 374 375 uint64_t ReadSplitLongParam() const { 376 // The splitted long is always available through the stack. 377 return *reinterpret_cast<uint64_t*>(stack_args_ 378 + stack_index_ * kBytesStackArgLocation); 379 } 380 381 void IncGprIndex() { 382 gpr_index_++; 383 if (kGprFprLockstep) { 384 fpr_index_++; 385 } 386 } 387 388 void IncFprIndex() { 389 fpr_index_++; 390 if (kGprFprLockstep) { 391 gpr_index_++; 392 } 393 } 394 395 void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 396 // (a) 'stack_args_' should point to the first method's argument 397 // (b) whatever the argument type it is, the 'stack_index_' should 398 // be moved forward along with every visiting. 399 gpr_index_ = 0; 400 fpr_index_ = 0; 401 if (kQuickDoubleRegAlignedFloatBackFilled) { 402 fpr_double_index_ = 0; 403 } 404 stack_index_ = 0; 405 if (!is_static_) { // Handle this. 406 cur_type_ = Primitive::kPrimNot; 407 is_split_long_or_double_ = false; 408 Visit(); 409 stack_index_++; 410 if (kNumQuickGprArgs > 0) { 411 IncGprIndex(); 412 } 413 } 414 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 415 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 416 switch (cur_type_) { 417 case Primitive::kPrimNot: 418 case Primitive::kPrimBoolean: 419 case Primitive::kPrimByte: 420 case Primitive::kPrimChar: 421 case Primitive::kPrimShort: 422 case Primitive::kPrimInt: 423 is_split_long_or_double_ = false; 424 Visit(); 425 stack_index_++; 426 if (gpr_index_ < kNumQuickGprArgs) { 427 IncGprIndex(); 428 } 429 break; 430 case Primitive::kPrimFloat: 431 is_split_long_or_double_ = false; 432 Visit(); 433 stack_index_++; 434 if (kQuickSoftFloatAbi) { 435 if (gpr_index_ < kNumQuickGprArgs) { 436 IncGprIndex(); 437 } 438 } else { 439 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 440 IncFprIndex(); 441 if (kQuickDoubleRegAlignedFloatBackFilled) { 442 // Double should not overlap with float. 443 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 444 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 445 // Float should not overlap with double. 446 if (fpr_index_ % 2 == 0) { 447 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 448 } 449 } 450 } 451 } 452 break; 453 case Primitive::kPrimDouble: 454 case Primitive::kPrimLong: 455 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 456 if (cur_type_ == Primitive::kPrimLong && kAlignPairRegister && gpr_index_ == 0) { 457 // Currently, this is only for ARM, where the first available parameter register 458 // is R1. So we skip it, and use R2 instead. 459 IncGprIndex(); 460 } 461 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 462 ((gpr_index_ + 1) == kNumQuickGprArgs); 463 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 464 // We don't want to split this. Pass over this register. 465 gpr_index_++; 466 is_split_long_or_double_ = false; 467 } 468 Visit(); 469 if (kBytesStackArgLocation == 4) { 470 stack_index_+= 2; 471 } else { 472 CHECK_EQ(kBytesStackArgLocation, 8U); 473 stack_index_++; 474 } 475 if (gpr_index_ < kNumQuickGprArgs) { 476 IncGprIndex(); 477 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 478 if (gpr_index_ < kNumQuickGprArgs) { 479 IncGprIndex(); 480 } 481 } 482 } 483 } else { 484 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 485 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 486 Visit(); 487 if (kBytesStackArgLocation == 4) { 488 stack_index_+= 2; 489 } else { 490 CHECK_EQ(kBytesStackArgLocation, 8U); 491 stack_index_++; 492 } 493 if (kQuickDoubleRegAlignedFloatBackFilled) { 494 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 495 fpr_double_index_ += 2; 496 // Float should not overlap with double. 497 if (fpr_index_ % 2 == 0) { 498 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 499 } 500 } 501 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 502 IncFprIndex(); 503 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 504 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 505 IncFprIndex(); 506 } 507 } 508 } 509 } 510 break; 511 default: 512 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 513 } 514 } 515 } 516 517 protected: 518 const bool is_static_; 519 const char* const shorty_; 520 const uint32_t shorty_len_; 521 522 private: 523 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 524 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 525 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 526 uint32_t gpr_index_; // Index into spilled GPRs. 527 // Index into spilled FPRs. 528 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 529 // holds a higher register number. 530 uint32_t fpr_index_; 531 // Index into spilled FPRs for aligned double. 532 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 533 // terms of singles, may be behind fpr_index. 534 uint32_t fpr_double_index_; 535 uint32_t stack_index_; // Index into arguments on the stack. 536 // The current type of argument during VisitArguments. 537 Primitive::Type cur_type_; 538 // Does a 64bit parameter straddle the register and stack arguments? 539 bool is_split_long_or_double_; 540}; 541 542// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 543// allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 544extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 545 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 546 return QuickArgumentVisitor::GetProxyThisObject(sp); 547} 548 549// Visits arguments on the stack placing them into the shadow frame. 550class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 551 public: 552 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 553 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 554 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 555 556 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 557 558 private: 559 ShadowFrame* const sf_; 560 uint32_t cur_reg_; 561 562 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 563}; 564 565void BuildQuickShadowFrameVisitor::Visit() { 566 Primitive::Type type = GetParamPrimitiveType(); 567 switch (type) { 568 case Primitive::kPrimLong: // Fall-through. 569 case Primitive::kPrimDouble: 570 if (IsSplitLongOrDouble()) { 571 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 572 } else { 573 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 574 } 575 ++cur_reg_; 576 break; 577 case Primitive::kPrimNot: { 578 StackReference<mirror::Object>* stack_ref = 579 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 580 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 581 } 582 break; 583 case Primitive::kPrimBoolean: // Fall-through. 584 case Primitive::kPrimByte: // Fall-through. 585 case Primitive::kPrimChar: // Fall-through. 586 case Primitive::kPrimShort: // Fall-through. 587 case Primitive::kPrimInt: // Fall-through. 588 case Primitive::kPrimFloat: 589 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 590 break; 591 case Primitive::kPrimVoid: 592 LOG(FATAL) << "UNREACHABLE"; 593 UNREACHABLE(); 594 } 595 ++cur_reg_; 596} 597 598extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 599 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 600 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 601 // frame. 602 ScopedQuickEntrypointChecks sqec(self); 603 604 if (method->IsAbstract()) { 605 ThrowAbstractMethodError(method); 606 return 0; 607 } else { 608 DCHECK(!method->IsNative()) << PrettyMethod(method); 609 const char* old_cause = self->StartAssertNoThreadSuspension( 610 "Building interpreter shadow frame"); 611 const DexFile::CodeItem* code_item = method->GetCodeItem(); 612 DCHECK(code_item != nullptr) << PrettyMethod(method); 613 uint16_t num_regs = code_item->registers_size_; 614 void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); 615 // No last shadow coming from quick. 616 ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory)); 617 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 618 uint32_t shorty_len = 0; 619 auto* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*)); 620 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 621 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 622 shadow_frame, first_arg_reg); 623 shadow_frame_builder.VisitArguments(); 624 const bool needs_initialization = 625 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 626 // Push a transition back into managed code onto the linked list in thread. 627 ManagedStack fragment; 628 self->PushManagedStackFragment(&fragment); 629 self->PushShadowFrame(shadow_frame); 630 self->EndAssertNoThreadSuspension(old_cause); 631 632 if (needs_initialization) { 633 // Ensure static method's class is initialized. 634 StackHandleScope<1> hs(self); 635 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 636 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 637 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(shadow_frame->GetMethod()); 638 self->PopManagedStackFragment(fragment); 639 return 0; 640 } 641 } 642 JValue result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); 643 // Pop transition. 644 self->PopManagedStackFragment(fragment); 645 646 // Request a stack deoptimization if needed 647 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 648 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { 649 self->SetException(Thread::GetDeoptimizationException()); 650 self->SetDeoptimizationReturnValue(result); 651 } 652 653 // No need to restore the args since the method has already been run by the interpreter. 654 return result.GetJ(); 655 } 656} 657 658// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 659// to jobjects. 660class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 661 public: 662 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 663 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 664 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 665 666 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 667 668 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 669 670 private: 671 ScopedObjectAccessUnchecked* const soa_; 672 std::vector<jvalue>* const args_; 673 // References which we must update when exiting in case the GC moved the objects. 674 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 675 676 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 677}; 678 679void BuildQuickArgumentVisitor::Visit() { 680 jvalue val; 681 Primitive::Type type = GetParamPrimitiveType(); 682 switch (type) { 683 case Primitive::kPrimNot: { 684 StackReference<mirror::Object>* stack_ref = 685 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 686 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 687 references_.push_back(std::make_pair(val.l, stack_ref)); 688 break; 689 } 690 case Primitive::kPrimLong: // Fall-through. 691 case Primitive::kPrimDouble: 692 if (IsSplitLongOrDouble()) { 693 val.j = ReadSplitLongParam(); 694 } else { 695 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 696 } 697 break; 698 case Primitive::kPrimBoolean: // Fall-through. 699 case Primitive::kPrimByte: // Fall-through. 700 case Primitive::kPrimChar: // Fall-through. 701 case Primitive::kPrimShort: // Fall-through. 702 case Primitive::kPrimInt: // Fall-through. 703 case Primitive::kPrimFloat: 704 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 705 break; 706 case Primitive::kPrimVoid: 707 LOG(FATAL) << "UNREACHABLE"; 708 UNREACHABLE(); 709 } 710 args_->push_back(val); 711} 712 713void BuildQuickArgumentVisitor::FixupReferences() { 714 // Fixup any references which may have changed. 715 for (const auto& pair : references_) { 716 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 717 soa_->Env()->DeleteLocalRef(pair.first); 718 } 719} 720 721// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 722// which is responsible for recording callee save registers. We explicitly place into jobjects the 723// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 724// field within the proxy object, which will box the primitive arguments and deal with error cases. 725extern "C" uint64_t artQuickProxyInvokeHandler( 726 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 727 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 728 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 729 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 730 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 731 const char* old_cause = 732 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 733 // Register the top of the managed stack, making stack crawlable. 734 DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method); 735 DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), 736 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()) 737 << PrettyMethod(proxy_method); 738 self->VerifyStack(); 739 // Start new JNI local reference state. 740 JNIEnvExt* env = self->GetJniEnv(); 741 ScopedObjectAccessUnchecked soa(env); 742 ScopedJniEnvLocalRefState env_state(env); 743 // Create local ref. copies of proxy method and the receiver. 744 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 745 746 // Placing arguments into args vector and remove the receiver. 747 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*)); 748 CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " 749 << PrettyMethod(non_proxy_method); 750 std::vector<jvalue> args; 751 uint32_t shorty_len = 0; 752 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 753 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 754 755 local_ref_visitor.VisitArguments(); 756 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 757 args.erase(args.begin()); 758 759 // Convert proxy method into expected interface method. 760 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(sizeof(void*)); 761 DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method); 762 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 763 self->EndAssertNoThreadSuspension(old_cause); 764 jobject interface_method_jobj = soa.AddLocalReference<jobject>( 765 mirror::Method::CreateFromArtMethod(soa.Self(), interface_method)); 766 767 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 768 // that performs allocations. 769 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 770 // Restore references which might have moved. 771 local_ref_visitor.FixupReferences(); 772 return result.GetJ(); 773} 774 775// Read object references held in arguments from quick frames and place in a JNI local references, 776// so they don't get garbage collected. 777class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 778 public: 779 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 780 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 781 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 782 783 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 784 785 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 786 787 private: 788 ScopedObjectAccessUnchecked* const soa_; 789 // References which we must update when exiting in case the GC moved the objects. 790 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 791 792 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 793}; 794 795void RememberForGcArgumentVisitor::Visit() { 796 if (IsParamAReference()) { 797 StackReference<mirror::Object>* stack_ref = 798 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 799 jobject reference = 800 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 801 references_.push_back(std::make_pair(reference, stack_ref)); 802 } 803} 804 805void RememberForGcArgumentVisitor::FixupReferences() { 806 // Fixup any references which may have changed. 807 for (const auto& pair : references_) { 808 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 809 soa_->Env()->DeleteLocalRef(pair.first); 810 } 811} 812 813// Lazily resolve a method for quick. Called by stub code. 814extern "C" const void* artQuickResolutionTrampoline( 815 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 816 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 817 ScopedQuickEntrypointChecks sqec(self); 818 // Start new JNI local reference state 819 JNIEnvExt* env = self->GetJniEnv(); 820 ScopedObjectAccessUnchecked soa(env); 821 ScopedJniEnvLocalRefState env_state(env); 822 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 823 824 // Compute details about the called method (avoid GCs) 825 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 826 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 827 InvokeType invoke_type; 828 MethodReference called_method(nullptr, 0); 829 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 830 if (!called_method_known_on_entry) { 831 uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp)); 832 const DexFile::CodeItem* code; 833 called_method.dex_file = caller->GetDexFile(); 834 code = caller->GetCodeItem(); 835 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 836 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 837 Instruction::Code instr_code = instr->Opcode(); 838 bool is_range; 839 switch (instr_code) { 840 case Instruction::INVOKE_DIRECT: 841 invoke_type = kDirect; 842 is_range = false; 843 break; 844 case Instruction::INVOKE_DIRECT_RANGE: 845 invoke_type = kDirect; 846 is_range = true; 847 break; 848 case Instruction::INVOKE_STATIC: 849 invoke_type = kStatic; 850 is_range = false; 851 break; 852 case Instruction::INVOKE_STATIC_RANGE: 853 invoke_type = kStatic; 854 is_range = true; 855 break; 856 case Instruction::INVOKE_SUPER: 857 invoke_type = kSuper; 858 is_range = false; 859 break; 860 case Instruction::INVOKE_SUPER_RANGE: 861 invoke_type = kSuper; 862 is_range = true; 863 break; 864 case Instruction::INVOKE_VIRTUAL: 865 invoke_type = kVirtual; 866 is_range = false; 867 break; 868 case Instruction::INVOKE_VIRTUAL_RANGE: 869 invoke_type = kVirtual; 870 is_range = true; 871 break; 872 case Instruction::INVOKE_INTERFACE: 873 invoke_type = kInterface; 874 is_range = false; 875 break; 876 case Instruction::INVOKE_INTERFACE_RANGE: 877 invoke_type = kInterface; 878 is_range = true; 879 break; 880 default: 881 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr); 882 UNREACHABLE(); 883 } 884 called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 885 } else { 886 invoke_type = kStatic; 887 called_method.dex_file = called->GetDexFile(); 888 called_method.dex_method_index = called->GetDexMethodIndex(); 889 } 890 uint32_t shorty_len; 891 const char* shorty = 892 called_method.dex_file->GetMethodShorty( 893 called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len); 894 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 895 visitor.VisitArguments(); 896 self->EndAssertNoThreadSuspension(old_cause); 897 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 898 // Resolve method filling in dex cache. 899 if (!called_method_known_on_entry) { 900 StackHandleScope<1> hs(self); 901 mirror::Object* dummy = nullptr; 902 HandleWrapper<mirror::Object> h_receiver( 903 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 904 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 905 called = linker->ResolveMethod(self, called_method.dex_method_index, caller, invoke_type); 906 } 907 const void* code = nullptr; 908 if (LIKELY(!self->IsExceptionPending())) { 909 // Incompatible class change should have been handled in resolve method. 910 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 911 << PrettyMethod(called) << " " << invoke_type; 912 if (virtual_or_interface) { 913 // Refine called method based on receiver. 914 CHECK(receiver != nullptr) << invoke_type; 915 916 ArtMethod* orig_called = called; 917 if (invoke_type == kVirtual) { 918 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, sizeof(void*)); 919 } else { 920 called = receiver->GetClass()->FindVirtualMethodForInterface(called, sizeof(void*)); 921 } 922 923 CHECK(called != nullptr) << PrettyMethod(orig_called) << " " 924 << PrettyTypeOf(receiver) << " " 925 << invoke_type << " " << orig_called->GetVtableIndex(); 926 927 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 928 // of the sharpened method avoiding dirtying the dex cache if possible. 929 // Note, called_method.dex_method_index references the dex method before the 930 // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares 931 // about the name and signature. 932 uint32_t update_dex_cache_method_index = called->GetDexMethodIndex(); 933 if (!called->HasSameDexCacheResolvedMethods(caller)) { 934 // Calling from one dex file to another, need to compute the method index appropriate to 935 // the caller's dex file. Since we get here only if the original called was a runtime 936 // method, we've got the correct dex_file and a dex_method_idx from above. 937 DCHECK(!called_method_known_on_entry); 938 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 939 const DexFile* caller_dex_file = called_method.dex_file; 940 uint32_t caller_method_name_and_sig_index = called_method.dex_method_index; 941 update_dex_cache_method_index = 942 called->FindDexMethodIndexInOtherDexFile(*caller_dex_file, 943 caller_method_name_and_sig_index); 944 } 945 if ((update_dex_cache_method_index != DexFile::kDexNoIndex) && 946 (caller->GetDexCacheResolvedMethod( 947 update_dex_cache_method_index, sizeof(void*)) != called)) { 948 caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called, sizeof(void*)); 949 } 950 } else if (invoke_type == kStatic) { 951 const auto called_dex_method_idx = called->GetDexMethodIndex(); 952 // For static invokes, we may dispatch to the static method in the superclass but resolve 953 // using the subclass. To prevent getting slow paths on each invoke, we force set the 954 // resolved method for the super class dex method index if we are in the same dex file. 955 // b/19175856 956 if (called->GetDexFile() == called_method.dex_file && 957 called_method.dex_method_index != called_dex_method_idx) { 958 called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called, sizeof(void*)); 959 } 960 } 961 962 // Ensure that the called method's class is initialized. 963 StackHandleScope<1> hs(soa.Self()); 964 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 965 linker->EnsureInitialized(soa.Self(), called_class, true, true); 966 if (LIKELY(called_class->IsInitialized())) { 967 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 968 // If we are single-stepping or the called method is deoptimized (by a 969 // breakpoint, for example), then we have to execute the called method 970 // with the interpreter. 971 code = GetQuickToInterpreterBridge(); 972 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 973 // If the caller is deoptimized (by a breakpoint, for example), we have to 974 // continue its execution with interpreter when returning from the called 975 // method. Because we do not want to execute the called method with the 976 // interpreter, we wrap its execution into the instrumentation stubs. 977 // When the called method returns, it will execute the instrumentation 978 // exit hook that will determine the need of the interpreter with a call 979 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 980 // it is needed. 981 code = GetQuickInstrumentationEntryPoint(); 982 } else { 983 code = called->GetEntryPointFromQuickCompiledCode(); 984 } 985 } else if (called_class->IsInitializing()) { 986 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 987 // If we are single-stepping or the called method is deoptimized (by a 988 // breakpoint, for example), then we have to execute the called method 989 // with the interpreter. 990 code = GetQuickToInterpreterBridge(); 991 } else if (invoke_type == kStatic) { 992 // Class is still initializing, go to oat and grab code (trampoline must be left in place 993 // until class is initialized to stop races between threads). 994 code = linker->GetQuickOatCodeFor(called); 995 } else { 996 // No trampoline for non-static methods. 997 code = called->GetEntryPointFromQuickCompiledCode(); 998 } 999 } else { 1000 DCHECK(called_class->IsErroneous()); 1001 } 1002 } 1003 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1004 // Fixup any locally saved objects may have moved during a GC. 1005 visitor.FixupReferences(); 1006 // Place called method in callee-save frame to be placed as first argument to quick method. 1007 *sp = called; 1008 1009 return code; 1010} 1011 1012/* 1013 * This class uses a couple of observations to unite the different calling conventions through 1014 * a few constants. 1015 * 1016 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1017 * possible alignment. 1018 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1019 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1020 * when we have to split things 1021 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1022 * and we can use Int handling directly. 1023 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1024 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1025 * extension should be compatible with Aarch64, which mandates copying the available bits 1026 * into LSB and leaving the rest unspecified. 1027 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1028 * the stack. 1029 * 6) There is only little endian. 1030 * 1031 * 1032 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1033 * follows: 1034 * 1035 * void PushGpr(uintptr_t): Add a value for the next GPR 1036 * 1037 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1038 * padding, that is, think the architecture is 32b and aligns 64b. 1039 * 1040 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1041 * split this if necessary. The current state will have aligned, if 1042 * necessary. 1043 * 1044 * void PushStack(uintptr_t): Push a value to the stack. 1045 * 1046 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1047 * as this might be important for null initialization. 1048 * Must return the jobject, that is, the reference to the 1049 * entry in the HandleScope (nullptr if necessary). 1050 * 1051 */ 1052template<class T> class BuildNativeCallFrameStateMachine { 1053 public: 1054#if defined(__arm__) 1055 // TODO: These are all dummy values! 1056 static constexpr bool kNativeSoftFloatAbi = true; 1057 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1058 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1059 1060 static constexpr size_t kRegistersNeededForLong = 2; 1061 static constexpr size_t kRegistersNeededForDouble = 2; 1062 static constexpr bool kMultiRegistersAligned = true; 1063 static constexpr bool kMultiFPRegistersWidened = false; 1064 static constexpr bool kMultiGPRegistersWidened = false; 1065 static constexpr bool kAlignLongOnStack = true; 1066 static constexpr bool kAlignDoubleOnStack = true; 1067#elif defined(__aarch64__) 1068 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1069 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1070 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1071 1072 static constexpr size_t kRegistersNeededForLong = 1; 1073 static constexpr size_t kRegistersNeededForDouble = 1; 1074 static constexpr bool kMultiRegistersAligned = false; 1075 static constexpr bool kMultiFPRegistersWidened = false; 1076 static constexpr bool kMultiGPRegistersWidened = false; 1077 static constexpr bool kAlignLongOnStack = false; 1078 static constexpr bool kAlignDoubleOnStack = false; 1079#elif defined(__mips__) && !defined(__LP64__) 1080 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1081 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1082 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1083 1084 static constexpr size_t kRegistersNeededForLong = 2; 1085 static constexpr size_t kRegistersNeededForDouble = 2; 1086 static constexpr bool kMultiRegistersAligned = true; 1087 static constexpr bool kMultiFPRegistersWidened = true; 1088 static constexpr bool kMultiGPRegistersWidened = false; 1089 static constexpr bool kAlignLongOnStack = true; 1090 static constexpr bool kAlignDoubleOnStack = true; 1091#elif defined(__mips__) && defined(__LP64__) 1092 // Let the code prepare GPRs only and we will load the FPRs with same data. 1093 static constexpr bool kNativeSoftFloatAbi = true; 1094 static constexpr size_t kNumNativeGprArgs = 8; 1095 static constexpr size_t kNumNativeFprArgs = 0; 1096 1097 static constexpr size_t kRegistersNeededForLong = 1; 1098 static constexpr size_t kRegistersNeededForDouble = 1; 1099 static constexpr bool kMultiRegistersAligned = false; 1100 static constexpr bool kMultiFPRegistersWidened = false; 1101 static constexpr bool kMultiGPRegistersWidened = true; 1102 static constexpr bool kAlignLongOnStack = false; 1103 static constexpr bool kAlignDoubleOnStack = false; 1104#elif defined(__i386__) 1105 // TODO: Check these! 1106 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1107 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1108 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1109 1110 static constexpr size_t kRegistersNeededForLong = 2; 1111 static constexpr size_t kRegistersNeededForDouble = 2; 1112 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1113 static constexpr bool kMultiFPRegistersWidened = false; 1114 static constexpr bool kMultiGPRegistersWidened = false; 1115 static constexpr bool kAlignLongOnStack = false; 1116 static constexpr bool kAlignDoubleOnStack = false; 1117#elif defined(__x86_64__) 1118 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1119 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1120 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1121 1122 static constexpr size_t kRegistersNeededForLong = 1; 1123 static constexpr size_t kRegistersNeededForDouble = 1; 1124 static constexpr bool kMultiRegistersAligned = false; 1125 static constexpr bool kMultiFPRegistersWidened = false; 1126 static constexpr bool kMultiGPRegistersWidened = false; 1127 static constexpr bool kAlignLongOnStack = false; 1128 static constexpr bool kAlignDoubleOnStack = false; 1129#else 1130#error "Unsupported architecture" 1131#endif 1132 1133 public: 1134 explicit BuildNativeCallFrameStateMachine(T* delegate) 1135 : gpr_index_(kNumNativeGprArgs), 1136 fpr_index_(kNumNativeFprArgs), 1137 stack_entries_(0), 1138 delegate_(delegate) { 1139 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1140 // the next register is even; counting down is just to make the compiler happy... 1141 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1142 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1143 } 1144 1145 virtual ~BuildNativeCallFrameStateMachine() {} 1146 1147 bool HavePointerGpr() const { 1148 return gpr_index_ > 0; 1149 } 1150 1151 void AdvancePointer(const void* val) { 1152 if (HavePointerGpr()) { 1153 gpr_index_--; 1154 PushGpr(reinterpret_cast<uintptr_t>(val)); 1155 } else { 1156 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1157 PushStack(reinterpret_cast<uintptr_t>(val)); 1158 gpr_index_ = 0; 1159 } 1160 } 1161 1162 bool HaveHandleScopeGpr() const { 1163 return gpr_index_ > 0; 1164 } 1165 1166 void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1167 uintptr_t handle = PushHandle(ptr); 1168 if (HaveHandleScopeGpr()) { 1169 gpr_index_--; 1170 PushGpr(handle); 1171 } else { 1172 stack_entries_++; 1173 PushStack(handle); 1174 gpr_index_ = 0; 1175 } 1176 } 1177 1178 bool HaveIntGpr() const { 1179 return gpr_index_ > 0; 1180 } 1181 1182 void AdvanceInt(uint32_t val) { 1183 if (HaveIntGpr()) { 1184 gpr_index_--; 1185 if (kMultiGPRegistersWidened) { 1186 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1187 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1188 } else { 1189 PushGpr(val); 1190 } 1191 } else { 1192 stack_entries_++; 1193 if (kMultiGPRegistersWidened) { 1194 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1195 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1196 } else { 1197 PushStack(val); 1198 } 1199 gpr_index_ = 0; 1200 } 1201 } 1202 1203 bool HaveLongGpr() const { 1204 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1205 } 1206 1207 bool LongGprNeedsPadding() const { 1208 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1209 kAlignLongOnStack && // and when it needs alignment 1210 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1211 } 1212 1213 bool LongStackNeedsPadding() const { 1214 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1215 kAlignLongOnStack && // and when it needs 8B alignment 1216 (stack_entries_ & 1) == 1; // counter is odd 1217 } 1218 1219 void AdvanceLong(uint64_t val) { 1220 if (HaveLongGpr()) { 1221 if (LongGprNeedsPadding()) { 1222 PushGpr(0); 1223 gpr_index_--; 1224 } 1225 if (kRegistersNeededForLong == 1) { 1226 PushGpr(static_cast<uintptr_t>(val)); 1227 } else { 1228 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1229 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1230 } 1231 gpr_index_ -= kRegistersNeededForLong; 1232 } else { 1233 if (LongStackNeedsPadding()) { 1234 PushStack(0); 1235 stack_entries_++; 1236 } 1237 if (kRegistersNeededForLong == 1) { 1238 PushStack(static_cast<uintptr_t>(val)); 1239 stack_entries_++; 1240 } else { 1241 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1242 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1243 stack_entries_ += 2; 1244 } 1245 gpr_index_ = 0; 1246 } 1247 } 1248 1249 bool HaveFloatFpr() const { 1250 return fpr_index_ > 0; 1251 } 1252 1253 void AdvanceFloat(float val) { 1254 if (kNativeSoftFloatAbi) { 1255 AdvanceInt(bit_cast<uint32_t, float>(val)); 1256 } else { 1257 if (HaveFloatFpr()) { 1258 fpr_index_--; 1259 if (kRegistersNeededForDouble == 1) { 1260 if (kMultiFPRegistersWidened) { 1261 PushFpr8(bit_cast<uint64_t, double>(val)); 1262 } else { 1263 // No widening, just use the bits. 1264 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1265 } 1266 } else { 1267 PushFpr4(val); 1268 } 1269 } else { 1270 stack_entries_++; 1271 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1272 // Need to widen before storing: Note the "double" in the template instantiation. 1273 // Note: We need to jump through those hoops to make the compiler happy. 1274 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1275 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1276 } else { 1277 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1278 } 1279 fpr_index_ = 0; 1280 } 1281 } 1282 } 1283 1284 bool HaveDoubleFpr() const { 1285 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1286 } 1287 1288 bool DoubleFprNeedsPadding() const { 1289 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1290 kAlignDoubleOnStack && // and when it needs alignment 1291 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1292 } 1293 1294 bool DoubleStackNeedsPadding() const { 1295 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1296 kAlignDoubleOnStack && // and when it needs 8B alignment 1297 (stack_entries_ & 1) == 1; // counter is odd 1298 } 1299 1300 void AdvanceDouble(uint64_t val) { 1301 if (kNativeSoftFloatAbi) { 1302 AdvanceLong(val); 1303 } else { 1304 if (HaveDoubleFpr()) { 1305 if (DoubleFprNeedsPadding()) { 1306 PushFpr4(0); 1307 fpr_index_--; 1308 } 1309 PushFpr8(val); 1310 fpr_index_ -= kRegistersNeededForDouble; 1311 } else { 1312 if (DoubleStackNeedsPadding()) { 1313 PushStack(0); 1314 stack_entries_++; 1315 } 1316 if (kRegistersNeededForDouble == 1) { 1317 PushStack(static_cast<uintptr_t>(val)); 1318 stack_entries_++; 1319 } else { 1320 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1321 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1322 stack_entries_ += 2; 1323 } 1324 fpr_index_ = 0; 1325 } 1326 } 1327 } 1328 1329 uint32_t GetStackEntries() const { 1330 return stack_entries_; 1331 } 1332 1333 uint32_t GetNumberOfUsedGprs() const { 1334 return kNumNativeGprArgs - gpr_index_; 1335 } 1336 1337 uint32_t GetNumberOfUsedFprs() const { 1338 return kNumNativeFprArgs - fpr_index_; 1339 } 1340 1341 private: 1342 void PushGpr(uintptr_t val) { 1343 delegate_->PushGpr(val); 1344 } 1345 void PushFpr4(float val) { 1346 delegate_->PushFpr4(val); 1347 } 1348 void PushFpr8(uint64_t val) { 1349 delegate_->PushFpr8(val); 1350 } 1351 void PushStack(uintptr_t val) { 1352 delegate_->PushStack(val); 1353 } 1354 uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1355 return delegate_->PushHandle(ref); 1356 } 1357 1358 uint32_t gpr_index_; // Number of free GPRs 1359 uint32_t fpr_index_; // Number of free FPRs 1360 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1361 // extended 1362 T* const delegate_; // What Push implementation gets called 1363}; 1364 1365// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1366// in subclasses. 1367// 1368// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1369// them with handles. 1370class ComputeNativeCallFrameSize { 1371 public: 1372 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1373 1374 virtual ~ComputeNativeCallFrameSize() {} 1375 1376 uint32_t GetStackSize() const { 1377 return num_stack_entries_ * sizeof(uintptr_t); 1378 } 1379 1380 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1381 sp8 -= GetStackSize(); 1382 // Align by kStackAlignment. 1383 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1384 return sp8; 1385 } 1386 1387 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1388 const { 1389 // Assumption is OK right now, as we have soft-float arm 1390 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1391 sp8 -= fregs * sizeof(uintptr_t); 1392 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1393 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1394 sp8 -= iregs * sizeof(uintptr_t); 1395 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1396 return sp8; 1397 } 1398 1399 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1400 uint32_t** start_fpr) const { 1401 // Native call stack. 1402 sp8 = LayoutCallStack(sp8); 1403 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1404 1405 // Put fprs and gprs below. 1406 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1407 1408 // Return the new bottom. 1409 return sp8; 1410 } 1411 1412 virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) 1413 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1414 UNUSED(sm); 1415 } 1416 1417 void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1418 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1419 1420 WalkHeader(&sm); 1421 1422 for (uint32_t i = 1; i < shorty_len; ++i) { 1423 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1424 switch (cur_type_) { 1425 case Primitive::kPrimNot: 1426 // TODO: fix abuse of mirror types. 1427 sm.AdvanceHandleScope( 1428 reinterpret_cast<mirror::Object*>(0x12345678)); 1429 break; 1430 1431 case Primitive::kPrimBoolean: 1432 case Primitive::kPrimByte: 1433 case Primitive::kPrimChar: 1434 case Primitive::kPrimShort: 1435 case Primitive::kPrimInt: 1436 sm.AdvanceInt(0); 1437 break; 1438 case Primitive::kPrimFloat: 1439 sm.AdvanceFloat(0); 1440 break; 1441 case Primitive::kPrimDouble: 1442 sm.AdvanceDouble(0); 1443 break; 1444 case Primitive::kPrimLong: 1445 sm.AdvanceLong(0); 1446 break; 1447 default: 1448 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1449 UNREACHABLE(); 1450 } 1451 } 1452 1453 num_stack_entries_ = sm.GetStackEntries(); 1454 } 1455 1456 void PushGpr(uintptr_t /* val */) { 1457 // not optimizing registers, yet 1458 } 1459 1460 void PushFpr4(float /* val */) { 1461 // not optimizing registers, yet 1462 } 1463 1464 void PushFpr8(uint64_t /* val */) { 1465 // not optimizing registers, yet 1466 } 1467 1468 void PushStack(uintptr_t /* val */) { 1469 // counting is already done in the superclass 1470 } 1471 1472 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1473 return reinterpret_cast<uintptr_t>(nullptr); 1474 } 1475 1476 protected: 1477 uint32_t num_stack_entries_; 1478}; 1479 1480class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1481 public: 1482 ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {} 1483 1484 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1485 // is at *m = sp. Will update to point to the bottom of the save frame. 1486 // 1487 // Note: assumes ComputeAll() has been run before. 1488 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1489 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1490 ArtMethod* method = **m; 1491 1492 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); 1493 1494 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1495 1496 // First, fix up the layout of the callee-save frame. 1497 // We have to squeeze in the HandleScope, and relocate the method pointer. 1498 1499 // "Free" the slot for the method. 1500 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 1501 1502 // Under the callee saves put handle scope and new method stack reference. 1503 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1504 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 1505 1506 sp8 -= scope_and_method; 1507 // Align by kStackAlignment. 1508 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1509 1510 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 1511 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 1512 num_handle_scope_references_); 1513 1514 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1515 uint8_t* method_pointer = sp8; 1516 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 1517 *new_method_ref = method; 1518 *m = new_method_ref; 1519 } 1520 1521 // Adds space for the cookie. Note: may leave stack unaligned. 1522 void LayoutCookie(uint8_t** sp) const { 1523 // Reference cookie and padding 1524 *sp -= 8; 1525 } 1526 1527 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1528 // Returns the new bottom. Note: this may be unaligned. 1529 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1530 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1531 // First, fix up the layout of the callee-save frame. 1532 // We have to squeeze in the HandleScope, and relocate the method pointer. 1533 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 1534 1535 // The bottom of the callee-save frame is now where the method is, *m. 1536 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1537 1538 // Add space for cookie. 1539 LayoutCookie(&sp8); 1540 1541 return sp8; 1542 } 1543 1544 // WARNING: After this, *sp won't be pointing to the method anymore! 1545 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 1546 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 1547 uint32_t** start_fpr) 1548 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1549 Walk(shorty, shorty_len); 1550 1551 // JNI part. 1552 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 1553 1554 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1555 1556 // Return the new bottom. 1557 return sp8; 1558 } 1559 1560 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1561 1562 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1563 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1564 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1565 1566 private: 1567 uint32_t num_handle_scope_references_; 1568}; 1569 1570uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1571 num_handle_scope_references_++; 1572 return reinterpret_cast<uintptr_t>(nullptr); 1573} 1574 1575void ComputeGenericJniFrameSize::WalkHeader( 1576 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1577 // JNIEnv 1578 sm->AdvancePointer(nullptr); 1579 1580 // Class object or this as first argument 1581 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1582} 1583 1584// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1585// the template requirements of BuildGenericJniFrameStateMachine. 1586class FillNativeCall { 1587 public: 1588 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1589 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1590 1591 virtual ~FillNativeCall() {} 1592 1593 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1594 cur_gpr_reg_ = gpr_regs; 1595 cur_fpr_reg_ = fpr_regs; 1596 cur_stack_arg_ = stack_args; 1597 } 1598 1599 void PushGpr(uintptr_t val) { 1600 *cur_gpr_reg_ = val; 1601 cur_gpr_reg_++; 1602 } 1603 1604 void PushFpr4(float val) { 1605 *cur_fpr_reg_ = val; 1606 cur_fpr_reg_++; 1607 } 1608 1609 void PushFpr8(uint64_t val) { 1610 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1611 *tmp = val; 1612 cur_fpr_reg_ += 2; 1613 } 1614 1615 void PushStack(uintptr_t val) { 1616 *cur_stack_arg_ = val; 1617 cur_stack_arg_++; 1618 } 1619 1620 virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1621 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1622 UNREACHABLE(); 1623 } 1624 1625 private: 1626 uintptr_t* cur_gpr_reg_; 1627 uint32_t* cur_fpr_reg_; 1628 uintptr_t* cur_stack_arg_; 1629}; 1630 1631// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1632// of transitioning into native code. 1633class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1634 public: 1635 BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len, 1636 ArtMethod*** sp) 1637 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1638 jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) { 1639 ComputeGenericJniFrameSize fsc; 1640 uintptr_t* start_gpr_reg; 1641 uint32_t* start_fpr_reg; 1642 uintptr_t* start_stack_arg; 1643 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 1644 &handle_scope_, 1645 &start_stack_arg, 1646 &start_gpr_reg, &start_fpr_reg); 1647 1648 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1649 1650 // jni environment is always first argument 1651 sm_.AdvancePointer(self->GetJniEnv()); 1652 1653 if (is_static) { 1654 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 1655 } 1656 } 1657 1658 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 1659 1660 void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1661 1662 StackReference<mirror::Object>* GetFirstHandleScopeEntry() 1663 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1664 return handle_scope_->GetHandle(0).GetReference(); 1665 } 1666 1667 jobject GetFirstHandleScopeJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1668 return handle_scope_->GetHandle(0).ToJObject(); 1669 } 1670 1671 void* GetBottomOfUsedArea() const { 1672 return bottom_of_used_area_; 1673 } 1674 1675 private: 1676 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 1677 class FillJniCall FINAL : public FillNativeCall { 1678 public: 1679 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 1680 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args), 1681 handle_scope_(handle_scope), cur_entry_(0) {} 1682 1683 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1684 1685 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 1686 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 1687 handle_scope_ = scope; 1688 cur_entry_ = 0U; 1689 } 1690 1691 void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1692 // Initialize padding entries. 1693 size_t expected_slots = handle_scope_->NumberOfReferences(); 1694 while (cur_entry_ < expected_slots) { 1695 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 1696 } 1697 DCHECK_NE(cur_entry_, 0U); 1698 } 1699 1700 private: 1701 HandleScope* handle_scope_; 1702 size_t cur_entry_; 1703 }; 1704 1705 HandleScope* handle_scope_; 1706 FillJniCall jni_call_; 1707 void* bottom_of_used_area_; 1708 1709 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 1710 1711 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1712}; 1713 1714uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 1715 uintptr_t tmp; 1716 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 1717 h.Assign(ref); 1718 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 1719 cur_entry_++; 1720 return tmp; 1721} 1722 1723void BuildGenericJniFrameVisitor::Visit() { 1724 Primitive::Type type = GetParamPrimitiveType(); 1725 switch (type) { 1726 case Primitive::kPrimLong: { 1727 jlong long_arg; 1728 if (IsSplitLongOrDouble()) { 1729 long_arg = ReadSplitLongParam(); 1730 } else { 1731 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1732 } 1733 sm_.AdvanceLong(long_arg); 1734 break; 1735 } 1736 case Primitive::kPrimDouble: { 1737 uint64_t double_arg; 1738 if (IsSplitLongOrDouble()) { 1739 // Read into union so that we don't case to a double. 1740 double_arg = ReadSplitLongParam(); 1741 } else { 1742 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1743 } 1744 sm_.AdvanceDouble(double_arg); 1745 break; 1746 } 1747 case Primitive::kPrimNot: { 1748 StackReference<mirror::Object>* stack_ref = 1749 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1750 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 1751 break; 1752 } 1753 case Primitive::kPrimFloat: 1754 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1755 break; 1756 case Primitive::kPrimBoolean: // Fall-through. 1757 case Primitive::kPrimByte: // Fall-through. 1758 case Primitive::kPrimChar: // Fall-through. 1759 case Primitive::kPrimShort: // Fall-through. 1760 case Primitive::kPrimInt: // Fall-through. 1761 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1762 break; 1763 case Primitive::kPrimVoid: 1764 LOG(FATAL) << "UNREACHABLE"; 1765 UNREACHABLE(); 1766 } 1767} 1768 1769void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 1770 // Clear out rest of the scope. 1771 jni_call_.ResetRemainingScopeSlots(); 1772 // Install HandleScope. 1773 self->PushHandleScope(handle_scope_); 1774} 1775 1776#if defined(__arm__) || defined(__aarch64__) 1777extern "C" void* artFindNativeMethod(); 1778#else 1779extern "C" void* artFindNativeMethod(Thread* self); 1780#endif 1781 1782uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { 1783 if (lock != nullptr) { 1784 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1785 } else { 1786 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1787 } 1788} 1789 1790void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { 1791 if (lock != nullptr) { 1792 JniMethodEndSynchronized(cookie, lock, self); 1793 } else { 1794 JniMethodEnd(cookie, self); 1795 } 1796} 1797 1798/* 1799 * Initializes an alloca region assumed to be directly below sp for a native call: 1800 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 1801 * The final element on the stack is a pointer to the native code. 1802 * 1803 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1804 * We need to fix this, as the handle scope needs to go into the callee-save frame. 1805 * 1806 * The return of this function denotes: 1807 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1808 * 2) An error, if the value is negative. 1809 */ 1810extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 1811 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1812 ArtMethod* called = *sp; 1813 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1814 uint32_t shorty_len = 0; 1815 const char* shorty = called->GetShorty(&shorty_len); 1816 1817 // Run the visitor and update sp. 1818 BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp); 1819 visitor.VisitArguments(); 1820 visitor.FinalizeHandleScope(self); 1821 1822 // Fix up managed-stack things in Thread. 1823 self->SetTopOfStack(sp); 1824 1825 self->VerifyStack(); 1826 1827 // Start JNI, save the cookie. 1828 uint32_t cookie; 1829 if (called->IsSynchronized()) { 1830 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 1831 if (self->IsExceptionPending()) { 1832 self->PopHandleScope(); 1833 // A negative value denotes an error. 1834 return GetTwoWordFailureValue(); 1835 } 1836 } else { 1837 cookie = JniMethodStart(self); 1838 } 1839 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1840 *(sp32 - 1) = cookie; 1841 1842 // Retrieve the stored native code. 1843 void* nativeCode = called->GetEntryPointFromJni(); 1844 1845 // There are two cases for the content of nativeCode: 1846 // 1) Pointer to the native function. 1847 // 2) Pointer to the trampoline for native code binding. 1848 // In the second case, we need to execute the binding and continue with the actual native function 1849 // pointer. 1850 DCHECK(nativeCode != nullptr); 1851 if (nativeCode == GetJniDlsymLookupStub()) { 1852#if defined(__arm__) || defined(__aarch64__) 1853 nativeCode = artFindNativeMethod(); 1854#else 1855 nativeCode = artFindNativeMethod(self); 1856#endif 1857 1858 if (nativeCode == nullptr) { 1859 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 1860 1861 // End JNI, as the assembly will move to deliver the exception. 1862 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 1863 if (shorty[0] == 'L') { 1864 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 1865 } else { 1866 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1867 } 1868 1869 return GetTwoWordFailureValue(); 1870 } 1871 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 1872 } 1873 1874 // Return native code addr(lo) and bottom of alloca address(hi). 1875 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 1876 reinterpret_cast<uintptr_t>(nativeCode)); 1877} 1878 1879/* 1880 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 1881 * unlocking. 1882 */ 1883extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f) 1884 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1885 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 1886 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1887 ArtMethod* called = *sp; 1888 uint32_t cookie = *(sp32 - 1); 1889 1890 jobject lock = nullptr; 1891 if (called->IsSynchronized()) { 1892 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) 1893 + sizeof(*sp)); 1894 lock = table->GetHandle(0).ToJObject(); 1895 } 1896 1897 char return_shorty_char = called->GetShorty()[0]; 1898 1899 if (return_shorty_char == 'L') { 1900 return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock); 1901 } else { 1902 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1903 1904 switch (return_shorty_char) { 1905 case 'F': { 1906 if (kRuntimeISA == kX86) { 1907 // Convert back the result to float. 1908 double d = bit_cast<double, uint64_t>(result_f); 1909 return bit_cast<uint32_t, float>(static_cast<float>(d)); 1910 } else { 1911 return result_f; 1912 } 1913 } 1914 case 'D': 1915 return result_f; 1916 case 'Z': 1917 return result.z; 1918 case 'B': 1919 return result.b; 1920 case 'C': 1921 return result.c; 1922 case 'S': 1923 return result.s; 1924 case 'I': 1925 return result.i; 1926 case 'J': 1927 return result.j; 1928 case 'V': 1929 return 0; 1930 default: 1931 LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char; 1932 return 0; 1933 } 1934 } 1935} 1936 1937// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 1938// for the method pointer. 1939// 1940// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 1941// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations). 1942 1943template<InvokeType type, bool access_check> 1944static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1945 ArtMethod* caller_method, Thread* self, ArtMethod** sp); 1946 1947template<InvokeType type, bool access_check> 1948static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, 1949 ArtMethod* caller_method, Thread* self, ArtMethod** sp) { 1950 ScopedQuickEntrypointChecks sqec(self); 1951 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)); 1952 ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); 1953 if (UNLIKELY(method == nullptr)) { 1954 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1955 uint32_t shorty_len; 1956 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 1957 { 1958 // Remember the args in case a GC happens in FindMethodFromCode. 1959 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1960 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 1961 visitor.VisitArguments(); 1962 method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method, 1963 self); 1964 visitor.FixupReferences(); 1965 } 1966 1967 if (UNLIKELY(method == nullptr)) { 1968 CHECK(self->IsExceptionPending()); 1969 return GetTwoWordFailureValue(); // Failure. 1970 } 1971 } 1972 DCHECK(!self->IsExceptionPending()); 1973 const void* code = method->GetEntryPointFromQuickCompiledCode(); 1974 1975 // When we return, the caller will branch to this address, so it had better not be 0! 1976 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 1977 << " location: " 1978 << method->GetDexFile()->GetLocation(); 1979 1980 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 1981 reinterpret_cast<uintptr_t>(method)); 1982} 1983 1984// Explicit artInvokeCommon template function declarations to please analysis tool. 1985#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 1986 template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ 1987 TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx, \ 1988 mirror::Object* this_object, \ 1989 ArtMethod* caller_method, \ 1990 Thread* self, \ 1991 ArtMethod** sp) \ 1992 1993EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 1994EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 1995EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 1996EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 1997EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 1998EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 1999EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2000EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2001EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2002EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2003#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2004 2005// See comments in runtime_support_asm.S 2006extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2007 uint32_t method_idx, mirror::Object* this_object, 2008 ArtMethod* caller_method, Thread* self, ArtMethod** sp) 2009 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2010 return artInvokeCommon<kInterface, true>(method_idx, this_object, 2011 caller_method, self, sp); 2012} 2013 2014extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2015 uint32_t method_idx, mirror::Object* this_object, 2016 ArtMethod* caller_method, Thread* self, ArtMethod** sp) 2017 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2018 return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, 2019 self, sp); 2020} 2021 2022extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2023 uint32_t method_idx, mirror::Object* this_object, 2024 ArtMethod* caller_method, Thread* self, ArtMethod** sp) 2025 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2026 return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, 2027 self, sp); 2028} 2029 2030extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2031 uint32_t method_idx, mirror::Object* this_object, 2032 ArtMethod* caller_method, Thread* self, ArtMethod** sp) 2033 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2034 return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, 2035 self, sp); 2036} 2037 2038extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2039 uint32_t method_idx, mirror::Object* this_object, 2040 ArtMethod* caller_method, Thread* self, ArtMethod** sp) 2041 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2042 return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, 2043 self, sp); 2044} 2045 2046// Determine target of interface dispatch. This object is known non-null. 2047extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method, 2048 mirror::Object* this_object, 2049 ArtMethod* caller_method, 2050 Thread* self, 2051 ArtMethod** sp) 2052 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2053 ScopedQuickEntrypointChecks sqec(self); 2054 ArtMethod* method; 2055 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 2056 method = this_object->GetClass()->FindVirtualMethodForInterface( 2057 interface_method, sizeof(void*)); 2058 if (UNLIKELY(method == nullptr)) { 2059 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2060 interface_method, this_object, caller_method); 2061 return GetTwoWordFailureValue(); // Failure. 2062 } 2063 } else { 2064 DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod()); 2065 2066 // Find the caller PC. 2067 constexpr size_t pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsAndArgs); 2068 uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + pc_offset); 2069 2070 // Map the caller PC to a dex PC. 2071 uint32_t dex_pc = caller_method->ToDexPc(caller_pc); 2072 const DexFile::CodeItem* code = caller_method->GetCodeItem(); 2073 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 2074 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 2075 Instruction::Code instr_code = instr->Opcode(); 2076 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 2077 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2078 << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr); 2079 uint32_t dex_method_idx; 2080 if (instr_code == Instruction::INVOKE_INTERFACE) { 2081 dex_method_idx = instr->VRegB_35c(); 2082 } else { 2083 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2084 dex_method_idx = instr->VRegB_3rc(); 2085 } 2086 2087 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache() 2088 ->GetDexFile(); 2089 uint32_t shorty_len; 2090 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), 2091 &shorty_len); 2092 { 2093 // Remember the args in case a GC happens in FindMethodFromCode. 2094 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2095 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2096 visitor.VisitArguments(); 2097 method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method, 2098 self); 2099 visitor.FixupReferences(); 2100 } 2101 2102 if (UNLIKELY(method == nullptr)) { 2103 CHECK(self->IsExceptionPending()); 2104 return GetTwoWordFailureValue(); // Failure. 2105 } 2106 } 2107 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2108 2109 // When we return, the caller will branch to this address, so it had better not be 0! 2110 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 2111 << " location: " << method->GetDexFile()->GetLocation(); 2112 2113 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2114 reinterpret_cast<uintptr_t>(method)); 2115} 2116 2117} // namespace art 2118