quick_trampoline_entrypoints.cc revision 1f2d3ba6af52cf6f566deb38b7e07735c9a08fb6
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "art_method-inl.h" 18#include "callee_save_frame.h" 19#include "common_throws.h" 20#include "dex_file-inl.h" 21#include "dex_instruction-inl.h" 22#include "entrypoints/entrypoint_utils-inl.h" 23#include "entrypoints/runtime_asm_entrypoints.h" 24#include "gc/accounting/card_table-inl.h" 25#include "interpreter/interpreter.h" 26#include "method_reference.h" 27#include "mirror/class-inl.h" 28#include "mirror/dex_cache-inl.h" 29#include "mirror/method.h" 30#include "mirror/object-inl.h" 31#include "mirror/object_array-inl.h" 32#include "runtime.h" 33#include "scoped_thread_state_change.h" 34#include "debugger.h" 35 36namespace art { 37 38// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 39class QuickArgumentVisitor { 40 // Number of bytes for each out register in the caller method's frame. 41 static constexpr size_t kBytesStackArgLocation = 4; 42 // Frame size in bytes of a callee-save frame for RefsAndArgs. 43 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 44 GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); 45#if defined(__arm__) 46 // The callee save frame is pointed to by SP. 47 // | argN | | 48 // | ... | | 49 // | arg4 | | 50 // | arg3 spill | | Caller's frame 51 // | arg2 spill | | 52 // | arg1 spill | | 53 // | Method* | --- 54 // | LR | 55 // | ... | 4x6 bytes callee saves 56 // | R3 | 57 // | R2 | 58 // | R1 | 59 // | S15 | 60 // | : | 61 // | S0 | 62 // | | 4x2 bytes padding 63 // | Method* | <- sp 64 static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat; 65 static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat; 66 static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat; 67 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat; 68 static constexpr size_t kNumQuickGprArgs = 3; 69 static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16; 70 static constexpr bool kGprFprLockstep = false; 71 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 72 arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg. 73 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 74 arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg. 75 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 76 arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address. 77 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 78 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 79 } 80#elif defined(__aarch64__) 81 // The callee save frame is pointed to by SP. 82 // | argN | | 83 // | ... | | 84 // | arg4 | | 85 // | arg3 spill | | Caller's frame 86 // | arg2 spill | | 87 // | arg1 spill | | 88 // | Method* | --- 89 // | LR | 90 // | X29 | 91 // | : | 92 // | X20 | 93 // | X7 | 94 // | : | 95 // | X1 | 96 // | D7 | 97 // | : | 98 // | D0 | 99 // | | padding 100 // | Method* | <- sp 101 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 102 static constexpr bool kAlignPairRegister = false; 103 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 104 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 105 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 106 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 107 static constexpr bool kGprFprLockstep = false; 108 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 109 arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg. 110 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 111 arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg. 112 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 113 arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address. 114 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 115 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 116 } 117#elif defined(__mips__) && !defined(__LP64__) 118 // The callee save frame is pointed to by SP. 119 // | argN | | 120 // | ... | | 121 // | arg4 | | 122 // | arg3 spill | | Caller's frame 123 // | arg2 spill | | 124 // | arg1 spill | | 125 // | Method* | --- 126 // | RA | 127 // | ... | callee saves 128 // | A3 | arg3 129 // | A2 | arg2 130 // | A1 | arg1 131 // | A0/Method* | <- sp 132 static constexpr bool kSplitPairAcrossRegisterAndStack = true; 133 static constexpr bool kAlignPairRegister = false; 134 static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI. 135 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 136 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 137 static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs. 138 static constexpr bool kGprFprLockstep = false; 139 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg. 140 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 16; // Offset of first GPR arg. 141 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60; // Offset of return address. 142 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 143 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 144 } 145#elif defined(__mips__) && defined(__LP64__) 146 // The callee save frame is pointed to by SP. 147 // | argN | | 148 // | ... | | 149 // | arg4 | | 150 // | arg3 spill | | Caller's frame 151 // | arg2 spill | | 152 // | arg1 spill | | 153 // | Method* | --- 154 // | RA | 155 // | ... | callee saves 156 // | F7 | f_arg7 157 // | F6 | f_arg6 158 // | F5 | f_arg5 159 // | F4 | f_arg4 160 // | F3 | f_arg3 161 // | F2 | f_arg2 162 // | F1 | f_arg1 163 // | F0 | f_arg0 164 // | A7 | arg7 165 // | A6 | arg6 166 // | A5 | arg5 167 // | A4 | arg4 168 // | A3 | arg3 169 // | A2 | arg2 170 // | A1 | arg1 171 // | | padding 172 // | A0/Method* | <- sp 173 // NOTE: for Mip64, when A0 is skipped, F0 is also skipped. 174 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 175 static constexpr bool kAlignPairRegister = false; 176 static constexpr bool kQuickSoftFloatAbi = false; 177 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 178 // These values are set to zeros because GPR and FPR register 179 // assignments for Mips64 are interleaved, which the current VisitArguments() 180 // function does not support. 181 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 182 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 183 static constexpr bool kGprFprLockstep = true; 184 185 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F1). 186 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 187 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 188 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 189 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 190 } 191#elif defined(__i386__) 192 // The callee save frame is pointed to by SP. 193 // | argN | | 194 // | ... | | 195 // | arg4 | | 196 // | arg3 spill | | Caller's frame 197 // | arg2 spill | | 198 // | arg1 spill | | 199 // | Method* | --- 200 // | Return | 201 // | EBP,ESI,EDI | callee saves 202 // | EBX | arg3 203 // | EDX | arg2 204 // | ECX | arg1 205 // | XMM3 | float arg 4 206 // | XMM2 | float arg 3 207 // | XMM1 | float arg 2 208 // | XMM0 | float arg 1 209 // | EAX/Method* | <- sp 210 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 211 static constexpr bool kAlignPairRegister = false; 212 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 213 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 214 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 215 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 216 static constexpr bool kGprFprLockstep = false; 217 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 218 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 219 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 220 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 221 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 222 } 223#elif defined(__x86_64__) 224 // The callee save frame is pointed to by SP. 225 // | argN | | 226 // | ... | | 227 // | reg. arg spills | | Caller's frame 228 // | Method* | --- 229 // | Return | 230 // | R15 | callee save 231 // | R14 | callee save 232 // | R13 | callee save 233 // | R12 | callee save 234 // | R9 | arg5 235 // | R8 | arg4 236 // | RSI/R6 | arg1 237 // | RBP/R5 | callee save 238 // | RBX/R3 | callee save 239 // | RDX/R2 | arg2 240 // | RCX/R1 | arg3 241 // | XMM7 | float arg 8 242 // | XMM6 | float arg 7 243 // | XMM5 | float arg 6 244 // | XMM4 | float arg 5 245 // | XMM3 | float arg 4 246 // | XMM2 | float arg 3 247 // | XMM1 | float arg 2 248 // | XMM0 | float arg 1 249 // | Padding | 250 // | RDI/Method* | <- sp 251 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 252 static constexpr bool kAlignPairRegister = false; 253 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 254 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 255 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 256 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 257 static constexpr bool kGprFprLockstep = false; 258 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 259 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 260 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 261 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 262 switch (gpr_index) { 263 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 264 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 265 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 266 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 267 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 268 default: 269 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 270 return 0; 271 } 272 } 273#else 274#error "Unsupported architecture" 275#endif 276 277 public: 278 // Special handling for proxy methods. Proxy methods are instance methods so the 279 // 'this' object is the 1st argument. They also have the same frame layout as the 280 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 281 // 1st GPR. 282 static mirror::Object* GetProxyThisObject(ArtMethod** sp) 283 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 284 CHECK((*sp)->IsProxyMethod()); 285 CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes()); 286 CHECK_GT(kNumQuickGprArgs, 0u); 287 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 288 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 289 GprIndexToGprOffset(kThisGprIndex); 290 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 291 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); 292 } 293 294 static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 295 DCHECK((*sp)->IsCalleeSaveMethod()); 296 return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs); 297 } 298 299 static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 300 DCHECK((*sp)->IsCalleeSaveMethod()); 301 uint8_t* previous_sp = 302 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 303 return *reinterpret_cast<ArtMethod**>(previous_sp); 304 } 305 306 static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 307 DCHECK((*sp)->IsCalleeSaveMethod()); 308 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); 309 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 310 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 311 ArtMethod* outer_method = *caller_sp; 312 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 313 uintptr_t outer_pc_offset = outer_method->NativeQuickPcOffset(outer_pc); 314 315 if (outer_method->IsOptimized(sizeof(void*))) { 316 CodeInfo code_info = outer_method->GetOptimizedCodeInfo(); 317 StackMapEncoding encoding = code_info.ExtractEncoding(); 318 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding); 319 DCHECK(stack_map.IsValid()); 320 if (stack_map.HasInlineInfo(encoding)) { 321 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 322 return inline_info.GetDexPcAtDepth(inline_info.GetDepth() - 1); 323 } else { 324 return stack_map.GetDexPc(encoding); 325 } 326 } else { 327 return outer_method->ToDexPc(outer_pc); 328 } 329 } 330 331 // For the given quick ref and args quick frame, return the caller's PC. 332 static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 333 DCHECK((*sp)->IsCalleeSaveMethod()); 334 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 335 return *reinterpret_cast<uintptr_t*>(lr); 336 } 337 338 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 339 uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : 340 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 341 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 342 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 343 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 344 + sizeof(ArtMethod*)), // Skip ArtMethod*. 345 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 346 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 347 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 348 "Number of Quick FPR arguments unexpected"); 349 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 350 "Double alignment unexpected"); 351 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 352 // next register is even. 353 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 354 "Number of Quick FPR arguments not even"); 355 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); 356 } 357 358 virtual ~QuickArgumentVisitor() {} 359 360 virtual void Visit() = 0; 361 362 Primitive::Type GetParamPrimitiveType() const { 363 return cur_type_; 364 } 365 366 uint8_t* GetParamAddress() const { 367 if (!kQuickSoftFloatAbi) { 368 Primitive::Type type = GetParamPrimitiveType(); 369 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 370 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 371 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 372 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 373 } 374 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 375 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 376 } 377 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 378 } 379 } 380 if (gpr_index_ < kNumQuickGprArgs) { 381 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 382 } 383 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 384 } 385 386 bool IsSplitLongOrDouble() const { 387 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 388 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 389 return is_split_long_or_double_; 390 } else { 391 return false; // An optimization for when GPR and FPRs are 64bit. 392 } 393 } 394 395 bool IsParamAReference() const { 396 return GetParamPrimitiveType() == Primitive::kPrimNot; 397 } 398 399 bool IsParamALongOrDouble() const { 400 Primitive::Type type = GetParamPrimitiveType(); 401 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 402 } 403 404 uint64_t ReadSplitLongParam() const { 405 // The splitted long is always available through the stack. 406 return *reinterpret_cast<uint64_t*>(stack_args_ 407 + stack_index_ * kBytesStackArgLocation); 408 } 409 410 void IncGprIndex() { 411 gpr_index_++; 412 if (kGprFprLockstep) { 413 fpr_index_++; 414 } 415 } 416 417 void IncFprIndex() { 418 fpr_index_++; 419 if (kGprFprLockstep) { 420 gpr_index_++; 421 } 422 } 423 424 void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 425 // (a) 'stack_args_' should point to the first method's argument 426 // (b) whatever the argument type it is, the 'stack_index_' should 427 // be moved forward along with every visiting. 428 gpr_index_ = 0; 429 fpr_index_ = 0; 430 if (kQuickDoubleRegAlignedFloatBackFilled) { 431 fpr_double_index_ = 0; 432 } 433 stack_index_ = 0; 434 if (!is_static_) { // Handle this. 435 cur_type_ = Primitive::kPrimNot; 436 is_split_long_or_double_ = false; 437 Visit(); 438 stack_index_++; 439 if (kNumQuickGprArgs > 0) { 440 IncGprIndex(); 441 } 442 } 443 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 444 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 445 switch (cur_type_) { 446 case Primitive::kPrimNot: 447 case Primitive::kPrimBoolean: 448 case Primitive::kPrimByte: 449 case Primitive::kPrimChar: 450 case Primitive::kPrimShort: 451 case Primitive::kPrimInt: 452 is_split_long_or_double_ = false; 453 Visit(); 454 stack_index_++; 455 if (gpr_index_ < kNumQuickGprArgs) { 456 IncGprIndex(); 457 } 458 break; 459 case Primitive::kPrimFloat: 460 is_split_long_or_double_ = false; 461 Visit(); 462 stack_index_++; 463 if (kQuickSoftFloatAbi) { 464 if (gpr_index_ < kNumQuickGprArgs) { 465 IncGprIndex(); 466 } 467 } else { 468 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 469 IncFprIndex(); 470 if (kQuickDoubleRegAlignedFloatBackFilled) { 471 // Double should not overlap with float. 472 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 473 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 474 // Float should not overlap with double. 475 if (fpr_index_ % 2 == 0) { 476 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 477 } 478 } 479 } 480 } 481 break; 482 case Primitive::kPrimDouble: 483 case Primitive::kPrimLong: 484 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 485 if (cur_type_ == Primitive::kPrimLong && kAlignPairRegister && gpr_index_ == 0) { 486 // Currently, this is only for ARM, where the first available parameter register 487 // is R1. So we skip it, and use R2 instead. 488 IncGprIndex(); 489 } 490 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 491 ((gpr_index_ + 1) == kNumQuickGprArgs); 492 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 493 // We don't want to split this. Pass over this register. 494 gpr_index_++; 495 is_split_long_or_double_ = false; 496 } 497 Visit(); 498 if (kBytesStackArgLocation == 4) { 499 stack_index_+= 2; 500 } else { 501 CHECK_EQ(kBytesStackArgLocation, 8U); 502 stack_index_++; 503 } 504 if (gpr_index_ < kNumQuickGprArgs) { 505 IncGprIndex(); 506 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 507 if (gpr_index_ < kNumQuickGprArgs) { 508 IncGprIndex(); 509 } 510 } 511 } 512 } else { 513 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 514 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 515 Visit(); 516 if (kBytesStackArgLocation == 4) { 517 stack_index_+= 2; 518 } else { 519 CHECK_EQ(kBytesStackArgLocation, 8U); 520 stack_index_++; 521 } 522 if (kQuickDoubleRegAlignedFloatBackFilled) { 523 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 524 fpr_double_index_ += 2; 525 // Float should not overlap with double. 526 if (fpr_index_ % 2 == 0) { 527 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 528 } 529 } 530 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 531 IncFprIndex(); 532 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 533 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 534 IncFprIndex(); 535 } 536 } 537 } 538 } 539 break; 540 default: 541 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 542 } 543 } 544 } 545 546 protected: 547 const bool is_static_; 548 const char* const shorty_; 549 const uint32_t shorty_len_; 550 551 private: 552 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 553 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 554 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 555 uint32_t gpr_index_; // Index into spilled GPRs. 556 // Index into spilled FPRs. 557 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 558 // holds a higher register number. 559 uint32_t fpr_index_; 560 // Index into spilled FPRs for aligned double. 561 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 562 // terms of singles, may be behind fpr_index. 563 uint32_t fpr_double_index_; 564 uint32_t stack_index_; // Index into arguments on the stack. 565 // The current type of argument during VisitArguments. 566 Primitive::Type cur_type_; 567 // Does a 64bit parameter straddle the register and stack arguments? 568 bool is_split_long_or_double_; 569}; 570 571// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 572// allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 573extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 574 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 575 return QuickArgumentVisitor::GetProxyThisObject(sp); 576} 577 578// Visits arguments on the stack placing them into the shadow frame. 579class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 580 public: 581 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 582 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 583 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 584 585 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 586 587 private: 588 ShadowFrame* const sf_; 589 uint32_t cur_reg_; 590 591 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 592}; 593 594void BuildQuickShadowFrameVisitor::Visit() { 595 Primitive::Type type = GetParamPrimitiveType(); 596 switch (type) { 597 case Primitive::kPrimLong: // Fall-through. 598 case Primitive::kPrimDouble: 599 if (IsSplitLongOrDouble()) { 600 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 601 } else { 602 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 603 } 604 ++cur_reg_; 605 break; 606 case Primitive::kPrimNot: { 607 StackReference<mirror::Object>* stack_ref = 608 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 609 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 610 } 611 break; 612 case Primitive::kPrimBoolean: // Fall-through. 613 case Primitive::kPrimByte: // Fall-through. 614 case Primitive::kPrimChar: // Fall-through. 615 case Primitive::kPrimShort: // Fall-through. 616 case Primitive::kPrimInt: // Fall-through. 617 case Primitive::kPrimFloat: 618 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 619 break; 620 case Primitive::kPrimVoid: 621 LOG(FATAL) << "UNREACHABLE"; 622 UNREACHABLE(); 623 } 624 ++cur_reg_; 625} 626 627extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 628 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 629 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 630 // frame. 631 ScopedQuickEntrypointChecks sqec(self); 632 633 if (method->IsAbstract()) { 634 ThrowAbstractMethodError(method); 635 return 0; 636 } else { 637 DCHECK(!method->IsNative()) << PrettyMethod(method); 638 const char* old_cause = self->StartAssertNoThreadSuspension( 639 "Building interpreter shadow frame"); 640 const DexFile::CodeItem* code_item = method->GetCodeItem(); 641 DCHECK(code_item != nullptr) << PrettyMethod(method); 642 uint16_t num_regs = code_item->registers_size_; 643 void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); 644 // No last shadow coming from quick. 645 ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory)); 646 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 647 uint32_t shorty_len = 0; 648 auto* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*)); 649 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 650 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 651 shadow_frame, first_arg_reg); 652 shadow_frame_builder.VisitArguments(); 653 const bool needs_initialization = 654 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 655 // Push a transition back into managed code onto the linked list in thread. 656 ManagedStack fragment; 657 self->PushManagedStackFragment(&fragment); 658 self->PushShadowFrame(shadow_frame); 659 self->EndAssertNoThreadSuspension(old_cause); 660 661 if (needs_initialization) { 662 // Ensure static method's class is initialized. 663 StackHandleScope<1> hs(self); 664 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 665 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 666 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(shadow_frame->GetMethod()); 667 self->PopManagedStackFragment(fragment); 668 return 0; 669 } 670 } 671 JValue result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); 672 // Pop transition. 673 self->PopManagedStackFragment(fragment); 674 675 // Request a stack deoptimization if needed 676 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 677 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { 678 self->SetException(Thread::GetDeoptimizationException()); 679 self->SetDeoptimizationReturnValue(result, shorty[0] == 'L'); 680 } 681 682 // No need to restore the args since the method has already been run by the interpreter. 683 return result.GetJ(); 684 } 685} 686 687// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 688// to jobjects. 689class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 690 public: 691 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 692 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 693 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 694 695 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 696 697 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 698 699 private: 700 ScopedObjectAccessUnchecked* const soa_; 701 std::vector<jvalue>* const args_; 702 // References which we must update when exiting in case the GC moved the objects. 703 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 704 705 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 706}; 707 708void BuildQuickArgumentVisitor::Visit() { 709 jvalue val; 710 Primitive::Type type = GetParamPrimitiveType(); 711 switch (type) { 712 case Primitive::kPrimNot: { 713 StackReference<mirror::Object>* stack_ref = 714 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 715 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 716 references_.push_back(std::make_pair(val.l, stack_ref)); 717 break; 718 } 719 case Primitive::kPrimLong: // Fall-through. 720 case Primitive::kPrimDouble: 721 if (IsSplitLongOrDouble()) { 722 val.j = ReadSplitLongParam(); 723 } else { 724 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 725 } 726 break; 727 case Primitive::kPrimBoolean: // Fall-through. 728 case Primitive::kPrimByte: // Fall-through. 729 case Primitive::kPrimChar: // Fall-through. 730 case Primitive::kPrimShort: // Fall-through. 731 case Primitive::kPrimInt: // Fall-through. 732 case Primitive::kPrimFloat: 733 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 734 break; 735 case Primitive::kPrimVoid: 736 LOG(FATAL) << "UNREACHABLE"; 737 UNREACHABLE(); 738 } 739 args_->push_back(val); 740} 741 742void BuildQuickArgumentVisitor::FixupReferences() { 743 // Fixup any references which may have changed. 744 for (const auto& pair : references_) { 745 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 746 soa_->Env()->DeleteLocalRef(pair.first); 747 } 748} 749 750// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 751// which is responsible for recording callee save registers. We explicitly place into jobjects the 752// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 753// field within the proxy object, which will box the primitive arguments and deal with error cases. 754extern "C" uint64_t artQuickProxyInvokeHandler( 755 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 756 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 757 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 758 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 759 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 760 const char* old_cause = 761 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 762 // Register the top of the managed stack, making stack crawlable. 763 DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method); 764 DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), 765 Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()) 766 << PrettyMethod(proxy_method); 767 self->VerifyStack(); 768 // Start new JNI local reference state. 769 JNIEnvExt* env = self->GetJniEnv(); 770 ScopedObjectAccessUnchecked soa(env); 771 ScopedJniEnvLocalRefState env_state(env); 772 // Create local ref. copies of proxy method and the receiver. 773 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 774 775 // Placing arguments into args vector and remove the receiver. 776 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*)); 777 CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " 778 << PrettyMethod(non_proxy_method); 779 std::vector<jvalue> args; 780 uint32_t shorty_len = 0; 781 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 782 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 783 784 local_ref_visitor.VisitArguments(); 785 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 786 args.erase(args.begin()); 787 788 // Convert proxy method into expected interface method. 789 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(sizeof(void*)); 790 DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method); 791 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 792 self->EndAssertNoThreadSuspension(old_cause); 793 jobject interface_method_jobj = soa.AddLocalReference<jobject>( 794 mirror::Method::CreateFromArtMethod(soa.Self(), interface_method)); 795 796 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 797 // that performs allocations. 798 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 799 // Restore references which might have moved. 800 local_ref_visitor.FixupReferences(); 801 return result.GetJ(); 802} 803 804// Read object references held in arguments from quick frames and place in a JNI local references, 805// so they don't get garbage collected. 806class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 807 public: 808 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 809 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 810 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 811 812 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 813 814 void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 815 816 private: 817 ScopedObjectAccessUnchecked* const soa_; 818 // References which we must update when exiting in case the GC moved the objects. 819 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 820 821 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 822}; 823 824void RememberForGcArgumentVisitor::Visit() { 825 if (IsParamAReference()) { 826 StackReference<mirror::Object>* stack_ref = 827 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 828 jobject reference = 829 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 830 references_.push_back(std::make_pair(reference, stack_ref)); 831 } 832} 833 834void RememberForGcArgumentVisitor::FixupReferences() { 835 // Fixup any references which may have changed. 836 for (const auto& pair : references_) { 837 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 838 soa_->Env()->DeleteLocalRef(pair.first); 839 } 840} 841 842// Lazily resolve a method for quick. Called by stub code. 843extern "C" const void* artQuickResolutionTrampoline( 844 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 845 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 846 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 847 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 848 // does not have the same stack layout as the callee-save method). 849 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 850 // Start new JNI local reference state 851 JNIEnvExt* env = self->GetJniEnv(); 852 ScopedObjectAccessUnchecked soa(env); 853 ScopedJniEnvLocalRefState env_state(env); 854 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 855 856 // Compute details about the called method (avoid GCs) 857 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 858 InvokeType invoke_type; 859 MethodReference called_method(nullptr, 0); 860 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 861 ArtMethod* caller = nullptr; 862 if (!called_method_known_on_entry) { 863 caller = QuickArgumentVisitor::GetCallingMethod(sp); 864 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 865 const DexFile::CodeItem* code; 866 called_method.dex_file = caller->GetDexFile(); 867 code = caller->GetCodeItem(); 868 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 869 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 870 Instruction::Code instr_code = instr->Opcode(); 871 bool is_range; 872 switch (instr_code) { 873 case Instruction::INVOKE_DIRECT: 874 invoke_type = kDirect; 875 is_range = false; 876 break; 877 case Instruction::INVOKE_DIRECT_RANGE: 878 invoke_type = kDirect; 879 is_range = true; 880 break; 881 case Instruction::INVOKE_STATIC: 882 invoke_type = kStatic; 883 is_range = false; 884 break; 885 case Instruction::INVOKE_STATIC_RANGE: 886 invoke_type = kStatic; 887 is_range = true; 888 break; 889 case Instruction::INVOKE_SUPER: 890 invoke_type = kSuper; 891 is_range = false; 892 break; 893 case Instruction::INVOKE_SUPER_RANGE: 894 invoke_type = kSuper; 895 is_range = true; 896 break; 897 case Instruction::INVOKE_VIRTUAL: 898 invoke_type = kVirtual; 899 is_range = false; 900 break; 901 case Instruction::INVOKE_VIRTUAL_RANGE: 902 invoke_type = kVirtual; 903 is_range = true; 904 break; 905 case Instruction::INVOKE_INTERFACE: 906 invoke_type = kInterface; 907 is_range = false; 908 break; 909 case Instruction::INVOKE_INTERFACE_RANGE: 910 invoke_type = kInterface; 911 is_range = true; 912 break; 913 default: 914 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr); 915 UNREACHABLE(); 916 } 917 called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 918 } else { 919 invoke_type = kStatic; 920 called_method.dex_file = called->GetDexFile(); 921 called_method.dex_method_index = called->GetDexMethodIndex(); 922 } 923 uint32_t shorty_len; 924 const char* shorty = 925 called_method.dex_file->GetMethodShorty( 926 called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len); 927 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 928 visitor.VisitArguments(); 929 self->EndAssertNoThreadSuspension(old_cause); 930 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 931 // Resolve method filling in dex cache. 932 if (!called_method_known_on_entry) { 933 StackHandleScope<1> hs(self); 934 mirror::Object* dummy = nullptr; 935 HandleWrapper<mirror::Object> h_receiver( 936 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 937 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 938 called = linker->ResolveMethod(self, called_method.dex_method_index, caller, invoke_type); 939 } 940 const void* code = nullptr; 941 if (LIKELY(!self->IsExceptionPending())) { 942 // Incompatible class change should have been handled in resolve method. 943 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 944 << PrettyMethod(called) << " " << invoke_type; 945 if (virtual_or_interface) { 946 // Refine called method based on receiver. 947 CHECK(receiver != nullptr) << invoke_type; 948 949 ArtMethod* orig_called = called; 950 if (invoke_type == kVirtual) { 951 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, sizeof(void*)); 952 } else { 953 called = receiver->GetClass()->FindVirtualMethodForInterface(called, sizeof(void*)); 954 } 955 956 CHECK(called != nullptr) << PrettyMethod(orig_called) << " " 957 << PrettyTypeOf(receiver) << " " 958 << invoke_type << " " << orig_called->GetVtableIndex(); 959 960 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 961 // of the sharpened method avoiding dirtying the dex cache if possible. 962 // Note, called_method.dex_method_index references the dex method before the 963 // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares 964 // about the name and signature. 965 uint32_t update_dex_cache_method_index = called->GetDexMethodIndex(); 966 if (!called->HasSameDexCacheResolvedMethods(caller)) { 967 // Calling from one dex file to another, need to compute the method index appropriate to 968 // the caller's dex file. Since we get here only if the original called was a runtime 969 // method, we've got the correct dex_file and a dex_method_idx from above. 970 DCHECK(!called_method_known_on_entry); 971 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 972 const DexFile* caller_dex_file = called_method.dex_file; 973 uint32_t caller_method_name_and_sig_index = called_method.dex_method_index; 974 update_dex_cache_method_index = 975 called->FindDexMethodIndexInOtherDexFile(*caller_dex_file, 976 caller_method_name_and_sig_index); 977 } 978 if ((update_dex_cache_method_index != DexFile::kDexNoIndex) && 979 (caller->GetDexCacheResolvedMethod( 980 update_dex_cache_method_index, sizeof(void*)) != called)) { 981 caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called, sizeof(void*)); 982 } 983 } else if (invoke_type == kStatic) { 984 const auto called_dex_method_idx = called->GetDexMethodIndex(); 985 // For static invokes, we may dispatch to the static method in the superclass but resolve 986 // using the subclass. To prevent getting slow paths on each invoke, we force set the 987 // resolved method for the super class dex method index if we are in the same dex file. 988 // b/19175856 989 if (called->GetDexFile() == called_method.dex_file && 990 called_method.dex_method_index != called_dex_method_idx) { 991 called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called, sizeof(void*)); 992 } 993 } 994 995 // Ensure that the called method's class is initialized. 996 StackHandleScope<1> hs(soa.Self()); 997 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 998 linker->EnsureInitialized(soa.Self(), called_class, true, true); 999 if (LIKELY(called_class->IsInitialized())) { 1000 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1001 // If we are single-stepping or the called method is deoptimized (by a 1002 // breakpoint, for example), then we have to execute the called method 1003 // with the interpreter. 1004 code = GetQuickToInterpreterBridge(); 1005 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1006 // If the caller is deoptimized (by a breakpoint, for example), we have to 1007 // continue its execution with interpreter when returning from the called 1008 // method. Because we do not want to execute the called method with the 1009 // interpreter, we wrap its execution into the instrumentation stubs. 1010 // When the called method returns, it will execute the instrumentation 1011 // exit hook that will determine the need of the interpreter with a call 1012 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1013 // it is needed. 1014 code = GetQuickInstrumentationEntryPoint(); 1015 } else { 1016 code = called->GetEntryPointFromQuickCompiledCode(); 1017 } 1018 } else if (called_class->IsInitializing()) { 1019 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1020 // If we are single-stepping or the called method is deoptimized (by a 1021 // breakpoint, for example), then we have to execute the called method 1022 // with the interpreter. 1023 code = GetQuickToInterpreterBridge(); 1024 } else if (invoke_type == kStatic) { 1025 // Class is still initializing, go to oat and grab code (trampoline must be left in place 1026 // until class is initialized to stop races between threads). 1027 code = linker->GetQuickOatCodeFor(called); 1028 } else { 1029 // No trampoline for non-static methods. 1030 code = called->GetEntryPointFromQuickCompiledCode(); 1031 } 1032 } else { 1033 DCHECK(called_class->IsErroneous()); 1034 } 1035 } 1036 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1037 // Fixup any locally saved objects may have moved during a GC. 1038 visitor.FixupReferences(); 1039 // Place called method in callee-save frame to be placed as first argument to quick method. 1040 *sp = called; 1041 1042 return code; 1043} 1044 1045/* 1046 * This class uses a couple of observations to unite the different calling conventions through 1047 * a few constants. 1048 * 1049 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1050 * possible alignment. 1051 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1052 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1053 * when we have to split things 1054 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1055 * and we can use Int handling directly. 1056 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1057 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1058 * extension should be compatible with Aarch64, which mandates copying the available bits 1059 * into LSB and leaving the rest unspecified. 1060 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1061 * the stack. 1062 * 6) There is only little endian. 1063 * 1064 * 1065 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1066 * follows: 1067 * 1068 * void PushGpr(uintptr_t): Add a value for the next GPR 1069 * 1070 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1071 * padding, that is, think the architecture is 32b and aligns 64b. 1072 * 1073 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1074 * split this if necessary. The current state will have aligned, if 1075 * necessary. 1076 * 1077 * void PushStack(uintptr_t): Push a value to the stack. 1078 * 1079 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1080 * as this might be important for null initialization. 1081 * Must return the jobject, that is, the reference to the 1082 * entry in the HandleScope (nullptr if necessary). 1083 * 1084 */ 1085template<class T> class BuildNativeCallFrameStateMachine { 1086 public: 1087#if defined(__arm__) 1088 // TODO: These are all dummy values! 1089 static constexpr bool kNativeSoftFloatAbi = true; 1090 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1091 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1092 1093 static constexpr size_t kRegistersNeededForLong = 2; 1094 static constexpr size_t kRegistersNeededForDouble = 2; 1095 static constexpr bool kMultiRegistersAligned = true; 1096 static constexpr bool kMultiFPRegistersWidened = false; 1097 static constexpr bool kMultiGPRegistersWidened = false; 1098 static constexpr bool kAlignLongOnStack = true; 1099 static constexpr bool kAlignDoubleOnStack = true; 1100#elif defined(__aarch64__) 1101 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1102 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1103 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1104 1105 static constexpr size_t kRegistersNeededForLong = 1; 1106 static constexpr size_t kRegistersNeededForDouble = 1; 1107 static constexpr bool kMultiRegistersAligned = false; 1108 static constexpr bool kMultiFPRegistersWidened = false; 1109 static constexpr bool kMultiGPRegistersWidened = false; 1110 static constexpr bool kAlignLongOnStack = false; 1111 static constexpr bool kAlignDoubleOnStack = false; 1112#elif defined(__mips__) && !defined(__LP64__) 1113 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1114 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1115 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1116 1117 static constexpr size_t kRegistersNeededForLong = 2; 1118 static constexpr size_t kRegistersNeededForDouble = 2; 1119 static constexpr bool kMultiRegistersAligned = true; 1120 static constexpr bool kMultiFPRegistersWidened = true; 1121 static constexpr bool kMultiGPRegistersWidened = false; 1122 static constexpr bool kAlignLongOnStack = true; 1123 static constexpr bool kAlignDoubleOnStack = true; 1124#elif defined(__mips__) && defined(__LP64__) 1125 // Let the code prepare GPRs only and we will load the FPRs with same data. 1126 static constexpr bool kNativeSoftFloatAbi = true; 1127 static constexpr size_t kNumNativeGprArgs = 8; 1128 static constexpr size_t kNumNativeFprArgs = 0; 1129 1130 static constexpr size_t kRegistersNeededForLong = 1; 1131 static constexpr size_t kRegistersNeededForDouble = 1; 1132 static constexpr bool kMultiRegistersAligned = false; 1133 static constexpr bool kMultiFPRegistersWidened = false; 1134 static constexpr bool kMultiGPRegistersWidened = true; 1135 static constexpr bool kAlignLongOnStack = false; 1136 static constexpr bool kAlignDoubleOnStack = false; 1137#elif defined(__i386__) 1138 // TODO: Check these! 1139 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1140 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1141 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1142 1143 static constexpr size_t kRegistersNeededForLong = 2; 1144 static constexpr size_t kRegistersNeededForDouble = 2; 1145 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1146 static constexpr bool kMultiFPRegistersWidened = false; 1147 static constexpr bool kMultiGPRegistersWidened = false; 1148 static constexpr bool kAlignLongOnStack = false; 1149 static constexpr bool kAlignDoubleOnStack = false; 1150#elif defined(__x86_64__) 1151 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1152 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1153 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1154 1155 static constexpr size_t kRegistersNeededForLong = 1; 1156 static constexpr size_t kRegistersNeededForDouble = 1; 1157 static constexpr bool kMultiRegistersAligned = false; 1158 static constexpr bool kMultiFPRegistersWidened = false; 1159 static constexpr bool kMultiGPRegistersWidened = false; 1160 static constexpr bool kAlignLongOnStack = false; 1161 static constexpr bool kAlignDoubleOnStack = false; 1162#else 1163#error "Unsupported architecture" 1164#endif 1165 1166 public: 1167 explicit BuildNativeCallFrameStateMachine(T* delegate) 1168 : gpr_index_(kNumNativeGprArgs), 1169 fpr_index_(kNumNativeFprArgs), 1170 stack_entries_(0), 1171 delegate_(delegate) { 1172 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1173 // the next register is even; counting down is just to make the compiler happy... 1174 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1175 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1176 } 1177 1178 virtual ~BuildNativeCallFrameStateMachine() {} 1179 1180 bool HavePointerGpr() const { 1181 return gpr_index_ > 0; 1182 } 1183 1184 void AdvancePointer(const void* val) { 1185 if (HavePointerGpr()) { 1186 gpr_index_--; 1187 PushGpr(reinterpret_cast<uintptr_t>(val)); 1188 } else { 1189 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1190 PushStack(reinterpret_cast<uintptr_t>(val)); 1191 gpr_index_ = 0; 1192 } 1193 } 1194 1195 bool HaveHandleScopeGpr() const { 1196 return gpr_index_ > 0; 1197 } 1198 1199 void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1200 uintptr_t handle = PushHandle(ptr); 1201 if (HaveHandleScopeGpr()) { 1202 gpr_index_--; 1203 PushGpr(handle); 1204 } else { 1205 stack_entries_++; 1206 PushStack(handle); 1207 gpr_index_ = 0; 1208 } 1209 } 1210 1211 bool HaveIntGpr() const { 1212 return gpr_index_ > 0; 1213 } 1214 1215 void AdvanceInt(uint32_t val) { 1216 if (HaveIntGpr()) { 1217 gpr_index_--; 1218 if (kMultiGPRegistersWidened) { 1219 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1220 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1221 } else { 1222 PushGpr(val); 1223 } 1224 } else { 1225 stack_entries_++; 1226 if (kMultiGPRegistersWidened) { 1227 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1228 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1229 } else { 1230 PushStack(val); 1231 } 1232 gpr_index_ = 0; 1233 } 1234 } 1235 1236 bool HaveLongGpr() const { 1237 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1238 } 1239 1240 bool LongGprNeedsPadding() const { 1241 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1242 kAlignLongOnStack && // and when it needs alignment 1243 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1244 } 1245 1246 bool LongStackNeedsPadding() const { 1247 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1248 kAlignLongOnStack && // and when it needs 8B alignment 1249 (stack_entries_ & 1) == 1; // counter is odd 1250 } 1251 1252 void AdvanceLong(uint64_t val) { 1253 if (HaveLongGpr()) { 1254 if (LongGprNeedsPadding()) { 1255 PushGpr(0); 1256 gpr_index_--; 1257 } 1258 if (kRegistersNeededForLong == 1) { 1259 PushGpr(static_cast<uintptr_t>(val)); 1260 } else { 1261 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1262 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1263 } 1264 gpr_index_ -= kRegistersNeededForLong; 1265 } else { 1266 if (LongStackNeedsPadding()) { 1267 PushStack(0); 1268 stack_entries_++; 1269 } 1270 if (kRegistersNeededForLong == 1) { 1271 PushStack(static_cast<uintptr_t>(val)); 1272 stack_entries_++; 1273 } else { 1274 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1275 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1276 stack_entries_ += 2; 1277 } 1278 gpr_index_ = 0; 1279 } 1280 } 1281 1282 bool HaveFloatFpr() const { 1283 return fpr_index_ > 0; 1284 } 1285 1286 void AdvanceFloat(float val) { 1287 if (kNativeSoftFloatAbi) { 1288 AdvanceInt(bit_cast<uint32_t, float>(val)); 1289 } else { 1290 if (HaveFloatFpr()) { 1291 fpr_index_--; 1292 if (kRegistersNeededForDouble == 1) { 1293 if (kMultiFPRegistersWidened) { 1294 PushFpr8(bit_cast<uint64_t, double>(val)); 1295 } else { 1296 // No widening, just use the bits. 1297 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1298 } 1299 } else { 1300 PushFpr4(val); 1301 } 1302 } else { 1303 stack_entries_++; 1304 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1305 // Need to widen before storing: Note the "double" in the template instantiation. 1306 // Note: We need to jump through those hoops to make the compiler happy. 1307 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1308 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1309 } else { 1310 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1311 } 1312 fpr_index_ = 0; 1313 } 1314 } 1315 } 1316 1317 bool HaveDoubleFpr() const { 1318 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1319 } 1320 1321 bool DoubleFprNeedsPadding() const { 1322 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1323 kAlignDoubleOnStack && // and when it needs alignment 1324 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1325 } 1326 1327 bool DoubleStackNeedsPadding() const { 1328 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1329 kAlignDoubleOnStack && // and when it needs 8B alignment 1330 (stack_entries_ & 1) == 1; // counter is odd 1331 } 1332 1333 void AdvanceDouble(uint64_t val) { 1334 if (kNativeSoftFloatAbi) { 1335 AdvanceLong(val); 1336 } else { 1337 if (HaveDoubleFpr()) { 1338 if (DoubleFprNeedsPadding()) { 1339 PushFpr4(0); 1340 fpr_index_--; 1341 } 1342 PushFpr8(val); 1343 fpr_index_ -= kRegistersNeededForDouble; 1344 } else { 1345 if (DoubleStackNeedsPadding()) { 1346 PushStack(0); 1347 stack_entries_++; 1348 } 1349 if (kRegistersNeededForDouble == 1) { 1350 PushStack(static_cast<uintptr_t>(val)); 1351 stack_entries_++; 1352 } else { 1353 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1354 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1355 stack_entries_ += 2; 1356 } 1357 fpr_index_ = 0; 1358 } 1359 } 1360 } 1361 1362 uint32_t GetStackEntries() const { 1363 return stack_entries_; 1364 } 1365 1366 uint32_t GetNumberOfUsedGprs() const { 1367 return kNumNativeGprArgs - gpr_index_; 1368 } 1369 1370 uint32_t GetNumberOfUsedFprs() const { 1371 return kNumNativeFprArgs - fpr_index_; 1372 } 1373 1374 private: 1375 void PushGpr(uintptr_t val) { 1376 delegate_->PushGpr(val); 1377 } 1378 void PushFpr4(float val) { 1379 delegate_->PushFpr4(val); 1380 } 1381 void PushFpr8(uint64_t val) { 1382 delegate_->PushFpr8(val); 1383 } 1384 void PushStack(uintptr_t val) { 1385 delegate_->PushStack(val); 1386 } 1387 uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1388 return delegate_->PushHandle(ref); 1389 } 1390 1391 uint32_t gpr_index_; // Number of free GPRs 1392 uint32_t fpr_index_; // Number of free FPRs 1393 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1394 // extended 1395 T* const delegate_; // What Push implementation gets called 1396}; 1397 1398// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1399// in subclasses. 1400// 1401// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1402// them with handles. 1403class ComputeNativeCallFrameSize { 1404 public: 1405 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1406 1407 virtual ~ComputeNativeCallFrameSize() {} 1408 1409 uint32_t GetStackSize() const { 1410 return num_stack_entries_ * sizeof(uintptr_t); 1411 } 1412 1413 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1414 sp8 -= GetStackSize(); 1415 // Align by kStackAlignment. 1416 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1417 return sp8; 1418 } 1419 1420 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1421 const { 1422 // Assumption is OK right now, as we have soft-float arm 1423 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1424 sp8 -= fregs * sizeof(uintptr_t); 1425 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1426 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1427 sp8 -= iregs * sizeof(uintptr_t); 1428 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1429 return sp8; 1430 } 1431 1432 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1433 uint32_t** start_fpr) const { 1434 // Native call stack. 1435 sp8 = LayoutCallStack(sp8); 1436 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1437 1438 // Put fprs and gprs below. 1439 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1440 1441 // Return the new bottom. 1442 return sp8; 1443 } 1444 1445 virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) 1446 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1447 UNUSED(sm); 1448 } 1449 1450 void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1451 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1452 1453 WalkHeader(&sm); 1454 1455 for (uint32_t i = 1; i < shorty_len; ++i) { 1456 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1457 switch (cur_type_) { 1458 case Primitive::kPrimNot: 1459 // TODO: fix abuse of mirror types. 1460 sm.AdvanceHandleScope( 1461 reinterpret_cast<mirror::Object*>(0x12345678)); 1462 break; 1463 1464 case Primitive::kPrimBoolean: 1465 case Primitive::kPrimByte: 1466 case Primitive::kPrimChar: 1467 case Primitive::kPrimShort: 1468 case Primitive::kPrimInt: 1469 sm.AdvanceInt(0); 1470 break; 1471 case Primitive::kPrimFloat: 1472 sm.AdvanceFloat(0); 1473 break; 1474 case Primitive::kPrimDouble: 1475 sm.AdvanceDouble(0); 1476 break; 1477 case Primitive::kPrimLong: 1478 sm.AdvanceLong(0); 1479 break; 1480 default: 1481 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1482 UNREACHABLE(); 1483 } 1484 } 1485 1486 num_stack_entries_ = sm.GetStackEntries(); 1487 } 1488 1489 void PushGpr(uintptr_t /* val */) { 1490 // not optimizing registers, yet 1491 } 1492 1493 void PushFpr4(float /* val */) { 1494 // not optimizing registers, yet 1495 } 1496 1497 void PushFpr8(uint64_t /* val */) { 1498 // not optimizing registers, yet 1499 } 1500 1501 void PushStack(uintptr_t /* val */) { 1502 // counting is already done in the superclass 1503 } 1504 1505 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1506 return reinterpret_cast<uintptr_t>(nullptr); 1507 } 1508 1509 protected: 1510 uint32_t num_stack_entries_; 1511}; 1512 1513class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1514 public: 1515 ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {} 1516 1517 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1518 // is at *m = sp. Will update to point to the bottom of the save frame. 1519 // 1520 // Note: assumes ComputeAll() has been run before. 1521 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1522 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1523 ArtMethod* method = **m; 1524 1525 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); 1526 1527 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1528 1529 // First, fix up the layout of the callee-save frame. 1530 // We have to squeeze in the HandleScope, and relocate the method pointer. 1531 1532 // "Free" the slot for the method. 1533 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 1534 1535 // Under the callee saves put handle scope and new method stack reference. 1536 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1537 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 1538 1539 sp8 -= scope_and_method; 1540 // Align by kStackAlignment. 1541 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1542 1543 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 1544 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 1545 num_handle_scope_references_); 1546 1547 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1548 uint8_t* method_pointer = sp8; 1549 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 1550 *new_method_ref = method; 1551 *m = new_method_ref; 1552 } 1553 1554 // Adds space for the cookie. Note: may leave stack unaligned. 1555 void LayoutCookie(uint8_t** sp) const { 1556 // Reference cookie and padding 1557 *sp -= 8; 1558 } 1559 1560 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1561 // Returns the new bottom. Note: this may be unaligned. 1562 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1563 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1564 // First, fix up the layout of the callee-save frame. 1565 // We have to squeeze in the HandleScope, and relocate the method pointer. 1566 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 1567 1568 // The bottom of the callee-save frame is now where the method is, *m. 1569 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1570 1571 // Add space for cookie. 1572 LayoutCookie(&sp8); 1573 1574 return sp8; 1575 } 1576 1577 // WARNING: After this, *sp won't be pointing to the method anymore! 1578 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 1579 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 1580 uint32_t** start_fpr) 1581 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1582 Walk(shorty, shorty_len); 1583 1584 // JNI part. 1585 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 1586 1587 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1588 1589 // Return the new bottom. 1590 return sp8; 1591 } 1592 1593 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1594 1595 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1596 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1597 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1598 1599 private: 1600 uint32_t num_handle_scope_references_; 1601}; 1602 1603uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1604 num_handle_scope_references_++; 1605 return reinterpret_cast<uintptr_t>(nullptr); 1606} 1607 1608void ComputeGenericJniFrameSize::WalkHeader( 1609 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1610 // JNIEnv 1611 sm->AdvancePointer(nullptr); 1612 1613 // Class object or this as first argument 1614 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1615} 1616 1617// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1618// the template requirements of BuildGenericJniFrameStateMachine. 1619class FillNativeCall { 1620 public: 1621 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1622 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1623 1624 virtual ~FillNativeCall() {} 1625 1626 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1627 cur_gpr_reg_ = gpr_regs; 1628 cur_fpr_reg_ = fpr_regs; 1629 cur_stack_arg_ = stack_args; 1630 } 1631 1632 void PushGpr(uintptr_t val) { 1633 *cur_gpr_reg_ = val; 1634 cur_gpr_reg_++; 1635 } 1636 1637 void PushFpr4(float val) { 1638 *cur_fpr_reg_ = val; 1639 cur_fpr_reg_++; 1640 } 1641 1642 void PushFpr8(uint64_t val) { 1643 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1644 *tmp = val; 1645 cur_fpr_reg_ += 2; 1646 } 1647 1648 void PushStack(uintptr_t val) { 1649 *cur_stack_arg_ = val; 1650 cur_stack_arg_++; 1651 } 1652 1653 virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1654 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1655 UNREACHABLE(); 1656 } 1657 1658 private: 1659 uintptr_t* cur_gpr_reg_; 1660 uint32_t* cur_fpr_reg_; 1661 uintptr_t* cur_stack_arg_; 1662}; 1663 1664// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1665// of transitioning into native code. 1666class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1667 public: 1668 BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len, 1669 ArtMethod*** sp) 1670 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1671 jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) { 1672 ComputeGenericJniFrameSize fsc; 1673 uintptr_t* start_gpr_reg; 1674 uint32_t* start_fpr_reg; 1675 uintptr_t* start_stack_arg; 1676 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 1677 &handle_scope_, 1678 &start_stack_arg, 1679 &start_gpr_reg, &start_fpr_reg); 1680 1681 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1682 1683 // jni environment is always first argument 1684 sm_.AdvancePointer(self->GetJniEnv()); 1685 1686 if (is_static) { 1687 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 1688 } 1689 } 1690 1691 void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; 1692 1693 void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1694 1695 StackReference<mirror::Object>* GetFirstHandleScopeEntry() 1696 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1697 return handle_scope_->GetHandle(0).GetReference(); 1698 } 1699 1700 jobject GetFirstHandleScopeJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1701 return handle_scope_->GetHandle(0).ToJObject(); 1702 } 1703 1704 void* GetBottomOfUsedArea() const { 1705 return bottom_of_used_area_; 1706 } 1707 1708 private: 1709 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 1710 class FillJniCall FINAL : public FillNativeCall { 1711 public: 1712 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 1713 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args), 1714 handle_scope_(handle_scope), cur_entry_(0) {} 1715 1716 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 1717 1718 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 1719 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 1720 handle_scope_ = scope; 1721 cur_entry_ = 0U; 1722 } 1723 1724 void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1725 // Initialize padding entries. 1726 size_t expected_slots = handle_scope_->NumberOfReferences(); 1727 while (cur_entry_ < expected_slots) { 1728 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 1729 } 1730 DCHECK_NE(cur_entry_, 0U); 1731 } 1732 1733 private: 1734 HandleScope* handle_scope_; 1735 size_t cur_entry_; 1736 }; 1737 1738 HandleScope* handle_scope_; 1739 FillJniCall jni_call_; 1740 void* bottom_of_used_area_; 1741 1742 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 1743 1744 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1745}; 1746 1747uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 1748 uintptr_t tmp; 1749 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 1750 h.Assign(ref); 1751 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 1752 cur_entry_++; 1753 return tmp; 1754} 1755 1756void BuildGenericJniFrameVisitor::Visit() { 1757 Primitive::Type type = GetParamPrimitiveType(); 1758 switch (type) { 1759 case Primitive::kPrimLong: { 1760 jlong long_arg; 1761 if (IsSplitLongOrDouble()) { 1762 long_arg = ReadSplitLongParam(); 1763 } else { 1764 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1765 } 1766 sm_.AdvanceLong(long_arg); 1767 break; 1768 } 1769 case Primitive::kPrimDouble: { 1770 uint64_t double_arg; 1771 if (IsSplitLongOrDouble()) { 1772 // Read into union so that we don't case to a double. 1773 double_arg = ReadSplitLongParam(); 1774 } else { 1775 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1776 } 1777 sm_.AdvanceDouble(double_arg); 1778 break; 1779 } 1780 case Primitive::kPrimNot: { 1781 StackReference<mirror::Object>* stack_ref = 1782 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1783 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 1784 break; 1785 } 1786 case Primitive::kPrimFloat: 1787 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1788 break; 1789 case Primitive::kPrimBoolean: // Fall-through. 1790 case Primitive::kPrimByte: // Fall-through. 1791 case Primitive::kPrimChar: // Fall-through. 1792 case Primitive::kPrimShort: // Fall-through. 1793 case Primitive::kPrimInt: // Fall-through. 1794 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1795 break; 1796 case Primitive::kPrimVoid: 1797 LOG(FATAL) << "UNREACHABLE"; 1798 UNREACHABLE(); 1799 } 1800} 1801 1802void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 1803 // Clear out rest of the scope. 1804 jni_call_.ResetRemainingScopeSlots(); 1805 // Install HandleScope. 1806 self->PushHandleScope(handle_scope_); 1807} 1808 1809#if defined(__arm__) || defined(__aarch64__) 1810extern "C" void* artFindNativeMethod(); 1811#else 1812extern "C" void* artFindNativeMethod(Thread* self); 1813#endif 1814 1815uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { 1816 if (lock != nullptr) { 1817 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1818 } else { 1819 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1820 } 1821} 1822 1823void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { 1824 if (lock != nullptr) { 1825 JniMethodEndSynchronized(cookie, lock, self); 1826 } else { 1827 JniMethodEnd(cookie, self); 1828 } 1829} 1830 1831/* 1832 * Initializes an alloca region assumed to be directly below sp for a native call: 1833 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 1834 * The final element on the stack is a pointer to the native code. 1835 * 1836 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1837 * We need to fix this, as the handle scope needs to go into the callee-save frame. 1838 * 1839 * The return of this function denotes: 1840 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1841 * 2) An error, if the value is negative. 1842 */ 1843extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 1844 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1845 ArtMethod* called = *sp; 1846 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1847 uint32_t shorty_len = 0; 1848 const char* shorty = called->GetShorty(&shorty_len); 1849 1850 // Run the visitor and update sp. 1851 BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp); 1852 visitor.VisitArguments(); 1853 visitor.FinalizeHandleScope(self); 1854 1855 // Fix up managed-stack things in Thread. 1856 self->SetTopOfStack(sp); 1857 1858 self->VerifyStack(); 1859 1860 // Start JNI, save the cookie. 1861 uint32_t cookie; 1862 if (called->IsSynchronized()) { 1863 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 1864 if (self->IsExceptionPending()) { 1865 self->PopHandleScope(); 1866 // A negative value denotes an error. 1867 return GetTwoWordFailureValue(); 1868 } 1869 } else { 1870 cookie = JniMethodStart(self); 1871 } 1872 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1873 *(sp32 - 1) = cookie; 1874 1875 // Retrieve the stored native code. 1876 void* nativeCode = called->GetEntryPointFromJni(); 1877 1878 // There are two cases for the content of nativeCode: 1879 // 1) Pointer to the native function. 1880 // 2) Pointer to the trampoline for native code binding. 1881 // In the second case, we need to execute the binding and continue with the actual native function 1882 // pointer. 1883 DCHECK(nativeCode != nullptr); 1884 if (nativeCode == GetJniDlsymLookupStub()) { 1885#if defined(__arm__) || defined(__aarch64__) 1886 nativeCode = artFindNativeMethod(); 1887#else 1888 nativeCode = artFindNativeMethod(self); 1889#endif 1890 1891 if (nativeCode == nullptr) { 1892 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 1893 1894 // End JNI, as the assembly will move to deliver the exception. 1895 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 1896 if (shorty[0] == 'L') { 1897 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 1898 } else { 1899 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1900 } 1901 1902 return GetTwoWordFailureValue(); 1903 } 1904 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 1905 } 1906 1907 // Return native code addr(lo) and bottom of alloca address(hi). 1908 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 1909 reinterpret_cast<uintptr_t>(nativeCode)); 1910} 1911 1912/* 1913 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 1914 * unlocking. 1915 */ 1916extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f) 1917 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1918 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 1919 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1920 ArtMethod* called = *sp; 1921 uint32_t cookie = *(sp32 - 1); 1922 1923 jobject lock = nullptr; 1924 if (called->IsSynchronized()) { 1925 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) 1926 + sizeof(*sp)); 1927 lock = table->GetHandle(0).ToJObject(); 1928 } 1929 1930 char return_shorty_char = called->GetShorty()[0]; 1931 1932 if (return_shorty_char == 'L') { 1933 return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock); 1934 } else { 1935 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1936 1937 switch (return_shorty_char) { 1938 case 'F': { 1939 if (kRuntimeISA == kX86) { 1940 // Convert back the result to float. 1941 double d = bit_cast<double, uint64_t>(result_f); 1942 return bit_cast<uint32_t, float>(static_cast<float>(d)); 1943 } else { 1944 return result_f; 1945 } 1946 } 1947 case 'D': 1948 return result_f; 1949 case 'Z': 1950 return result.z; 1951 case 'B': 1952 return result.b; 1953 case 'C': 1954 return result.c; 1955 case 'S': 1956 return result.s; 1957 case 'I': 1958 return result.i; 1959 case 'J': 1960 return result.j; 1961 case 'V': 1962 return 0; 1963 default: 1964 LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char; 1965 return 0; 1966 } 1967 } 1968} 1969 1970// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 1971// for the method pointer. 1972// 1973// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 1974// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations). 1975 1976template<InvokeType type, bool access_check> 1977static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self, 1978 ArtMethod** sp) { 1979 ScopedQuickEntrypointChecks sqec(self); 1980 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)); 1981 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 1982 ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); 1983 if (UNLIKELY(method == nullptr)) { 1984 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1985 uint32_t shorty_len; 1986 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 1987 { 1988 // Remember the args in case a GC happens in FindMethodFromCode. 1989 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 1990 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 1991 visitor.VisitArguments(); 1992 method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method, 1993 self); 1994 visitor.FixupReferences(); 1995 } 1996 1997 if (UNLIKELY(method == nullptr)) { 1998 CHECK(self->IsExceptionPending()); 1999 return GetTwoWordFailureValue(); // Failure. 2000 } 2001 } 2002 DCHECK(!self->IsExceptionPending()); 2003 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2004 2005 // When we return, the caller will branch to this address, so it had better not be 0! 2006 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 2007 << " location: " 2008 << method->GetDexFile()->GetLocation(); 2009 2010 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2011 reinterpret_cast<uintptr_t>(method)); 2012} 2013 2014// Explicit artInvokeCommon template function declarations to please analysis tool. 2015#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2016 template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ 2017 TwoWordReturn artInvokeCommon<type, access_check>( \ 2018 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2019 2020EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2021EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2022EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2023EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2024EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2025EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2026EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2027EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2028EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2029EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2030#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2031 2032// See comments in runtime_support_asm.S 2033extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2034 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2035 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2036 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2037} 2038 2039extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2040 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2041 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2042 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2043} 2044 2045extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2046 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2047 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2048 return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp); 2049} 2050 2051extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2052 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2053 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2054 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2055} 2056 2057extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2058 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2059 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2060 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2061} 2062 2063// Determine target of interface dispatch. This object is known non-null. 2064extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t dex_method_idx, 2065 mirror::Object* this_object, 2066 Thread* self, ArtMethod** sp) 2067 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2068 ScopedQuickEntrypointChecks sqec(self); 2069 // The optimizing compiler currently does not inline methods that have an interface 2070 // invocation. We use the outer method directly to avoid fetching a stack map, which is 2071 // more expensive. 2072 ArtMethod* caller_method = QuickArgumentVisitor::GetOuterMethod(sp); 2073 DCHECK_EQ(caller_method, QuickArgumentVisitor::GetCallingMethod(sp)); 2074 ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod( 2075 dex_method_idx, sizeof(void*)); 2076 DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method); 2077 ArtMethod* method; 2078 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 2079 method = this_object->GetClass()->FindVirtualMethodForInterface( 2080 interface_method, sizeof(void*)); 2081 if (UNLIKELY(method == nullptr)) { 2082 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2083 interface_method, this_object, caller_method); 2084 return GetTwoWordFailureValue(); // Failure. 2085 } 2086 } else { 2087 DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod()); 2088 if (kIsDebugBuild) { 2089 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2090 const DexFile::CodeItem* code = caller_method->GetCodeItem(); 2091 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 2092 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 2093 Instruction::Code instr_code = instr->Opcode(); 2094 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 2095 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2096 << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr); 2097 if (instr_code == Instruction::INVOKE_INTERFACE) { 2098 CHECK_EQ(dex_method_idx, instr->VRegB_35c()); 2099 } else { 2100 CHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2101 CHECK_EQ(dex_method_idx, instr->VRegB_3rc()); 2102 } 2103 } 2104 2105 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache() 2106 ->GetDexFile(); 2107 uint32_t shorty_len; 2108 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), 2109 &shorty_len); 2110 { 2111 // Remember the args in case a GC happens in FindMethodFromCode. 2112 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2113 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2114 visitor.VisitArguments(); 2115 method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method, 2116 self); 2117 visitor.FixupReferences(); 2118 } 2119 2120 if (UNLIKELY(method == nullptr)) { 2121 CHECK(self->IsExceptionPending()); 2122 return GetTwoWordFailureValue(); // Failure. 2123 } 2124 } 2125 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2126 2127 // When we return, the caller will branch to this address, so it had better not be 0! 2128 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 2129 << " location: " << method->GetDexFile()->GetLocation(); 2130 2131 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2132 reinterpret_cast<uintptr_t>(method)); 2133} 2134 2135} // namespace art 2136