quick_trampoline_entrypoints.cc revision 960d4f7c5f6a464aa00b8f393cc88996c55464f3
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "art_method-inl.h" 18#include "base/callee_save_type.h" 19#include "base/enums.h" 20#include "callee_save_frame.h" 21#include "common_throws.h" 22#include "debugger.h" 23#include "dex_file-inl.h" 24#include "dex_file_types.h" 25#include "dex_instruction-inl.h" 26#include "entrypoints/entrypoint_utils-inl.h" 27#include "entrypoints/runtime_asm_entrypoints.h" 28#include "gc/accounting/card_table-inl.h" 29#include "imt_conflict_table.h" 30#include "imtable-inl.h" 31#include "instrumentation.h" 32#include "interpreter/interpreter.h" 33#include "linear_alloc.h" 34#include "method_bss_mapping.h" 35#include "method_handles.h" 36#include "method_reference.h" 37#include "mirror/class-inl.h" 38#include "mirror/dex_cache-inl.h" 39#include "mirror/method.h" 40#include "mirror/method_handle_impl.h" 41#include "mirror/object-inl.h" 42#include "mirror/object_array-inl.h" 43#include "oat_file.h" 44#include "oat_quick_method_header.h" 45#include "quick_exception_handler.h" 46#include "runtime.h" 47#include "scoped_thread_state_change-inl.h" 48#include "stack.h" 49#include "thread-inl.h" 50#include "well_known_classes.h" 51 52namespace art { 53 54// Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame. 55class QuickArgumentVisitor { 56 // Number of bytes for each out register in the caller method's frame. 57 static constexpr size_t kBytesStackArgLocation = 4; 58 // Frame size in bytes of a callee-save frame for RefsAndArgs. 59 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 60 GetCalleeSaveFrameSize(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs); 61#if defined(__arm__) 62 // The callee save frame is pointed to by SP. 63 // | argN | | 64 // | ... | | 65 // | arg4 | | 66 // | arg3 spill | | Caller's frame 67 // | arg2 spill | | 68 // | arg1 spill | | 69 // | Method* | --- 70 // | LR | 71 // | ... | 4x6 bytes callee saves 72 // | R3 | 73 // | R2 | 74 // | R1 | 75 // | S15 | 76 // | : | 77 // | S0 | 78 // | | 4x2 bytes padding 79 // | Method* | <- sp 80 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 81 static constexpr bool kAlignPairRegister = true; 82 static constexpr bool kQuickSoftFloatAbi = false; 83 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = true; 84 static constexpr bool kQuickSkipOddFpRegisters = false; 85 static constexpr size_t kNumQuickGprArgs = 3; 86 static constexpr size_t kNumQuickFprArgs = 16; 87 static constexpr bool kGprFprLockstep = false; 88 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 89 arm::ArmCalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first FPR arg. 90 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 91 arm::ArmCalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first GPR arg. 92 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 93 arm::ArmCalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); // Offset of return address. 94 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 95 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 96 } 97#elif defined(__aarch64__) 98 // The callee save frame is pointed to by SP. 99 // | argN | | 100 // | ... | | 101 // | arg4 | | 102 // | arg3 spill | | Caller's frame 103 // | arg2 spill | | 104 // | arg1 spill | | 105 // | Method* | --- 106 // | LR | 107 // | X29 | 108 // | : | 109 // | X20 | 110 // | X7 | 111 // | : | 112 // | X1 | 113 // | D7 | 114 // | : | 115 // | D0 | 116 // | | padding 117 // | Method* | <- sp 118 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 119 static constexpr bool kAlignPairRegister = false; 120 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 121 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 122 static constexpr bool kQuickSkipOddFpRegisters = false; 123 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 124 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 125 static constexpr bool kGprFprLockstep = false; 126 // Offset of first FPR arg. 127 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 128 arm64::Arm64CalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 129 // Offset of first GPR arg. 130 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 131 arm64::Arm64CalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 132 // Offset of return address. 133 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 134 arm64::Arm64CalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); 135 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 136 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 137 } 138#elif defined(__mips__) && !defined(__LP64__) 139 // The callee save frame is pointed to by SP. 140 // | argN | | 141 // | ... | | 142 // | arg4 | | 143 // | arg3 spill | | Caller's frame 144 // | arg2 spill | | 145 // | arg1 spill | | 146 // | Method* | --- 147 // | RA | 148 // | ... | callee saves 149 // | T1 | arg5 150 // | T0 | arg4 151 // | A3 | arg3 152 // | A2 | arg2 153 // | A1 | arg1 154 // | F19 | 155 // | F18 | f_arg5 156 // | F17 | 157 // | F16 | f_arg4 158 // | F15 | 159 // | F14 | f_arg3 160 // | F13 | 161 // | F12 | f_arg2 162 // | F11 | 163 // | F10 | f_arg1 164 // | F9 | 165 // | F8 | f_arg0 166 // | | padding 167 // | A0/Method* | <- sp 168 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 169 static constexpr bool kAlignPairRegister = true; 170 static constexpr bool kQuickSoftFloatAbi = false; 171 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 172 static constexpr bool kQuickSkipOddFpRegisters = true; 173 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 174 static constexpr size_t kNumQuickFprArgs = 12; // 6 arguments passed in FPRs. Floats can be 175 // passed only in even numbered registers and each 176 // double occupies two registers. 177 static constexpr bool kGprFprLockstep = false; 178 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 8; // Offset of first FPR arg. 179 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 56; // Offset of first GPR arg. 180 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 108; // Offset of return address. 181 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 182 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 183 } 184#elif defined(__mips__) && defined(__LP64__) 185 // The callee save frame is pointed to by SP. 186 // | argN | | 187 // | ... | | 188 // | arg4 | | 189 // | arg3 spill | | Caller's frame 190 // | arg2 spill | | 191 // | arg1 spill | | 192 // | Method* | --- 193 // | RA | 194 // | ... | callee saves 195 // | A7 | arg7 196 // | A6 | arg6 197 // | A5 | arg5 198 // | A4 | arg4 199 // | A3 | arg3 200 // | A2 | arg2 201 // | A1 | arg1 202 // | F19 | f_arg7 203 // | F18 | f_arg6 204 // | F17 | f_arg5 205 // | F16 | f_arg4 206 // | F15 | f_arg3 207 // | F14 | f_arg2 208 // | F13 | f_arg1 209 // | F12 | f_arg0 210 // | | padding 211 // | A0/Method* | <- sp 212 // NOTE: for Mip64, when A0 is skipped, F12 is also skipped. 213 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 214 static constexpr bool kAlignPairRegister = false; 215 static constexpr bool kQuickSoftFloatAbi = false; 216 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 217 static constexpr bool kQuickSkipOddFpRegisters = false; 218 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 219 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 220 static constexpr bool kGprFprLockstep = true; 221 222 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F13). 223 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 224 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 225 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 226 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 227 } 228#elif defined(__i386__) 229 // The callee save frame is pointed to by SP. 230 // | argN | | 231 // | ... | | 232 // | arg4 | | 233 // | arg3 spill | | Caller's frame 234 // | arg2 spill | | 235 // | arg1 spill | | 236 // | Method* | --- 237 // | Return | 238 // | EBP,ESI,EDI | callee saves 239 // | EBX | arg3 240 // | EDX | arg2 241 // | ECX | arg1 242 // | XMM3 | float arg 4 243 // | XMM2 | float arg 3 244 // | XMM1 | float arg 2 245 // | XMM0 | float arg 1 246 // | EAX/Method* | <- sp 247 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 248 static constexpr bool kAlignPairRegister = false; 249 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 250 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 251 static constexpr bool kQuickSkipOddFpRegisters = false; 252 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 253 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 254 static constexpr bool kGprFprLockstep = false; 255 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 256 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 257 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 258 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 259 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 260 } 261#elif defined(__x86_64__) 262 // The callee save frame is pointed to by SP. 263 // | argN | | 264 // | ... | | 265 // | reg. arg spills | | Caller's frame 266 // | Method* | --- 267 // | Return | 268 // | R15 | callee save 269 // | R14 | callee save 270 // | R13 | callee save 271 // | R12 | callee save 272 // | R9 | arg5 273 // | R8 | arg4 274 // | RSI/R6 | arg1 275 // | RBP/R5 | callee save 276 // | RBX/R3 | callee save 277 // | RDX/R2 | arg2 278 // | RCX/R1 | arg3 279 // | XMM7 | float arg 8 280 // | XMM6 | float arg 7 281 // | XMM5 | float arg 6 282 // | XMM4 | float arg 5 283 // | XMM3 | float arg 4 284 // | XMM2 | float arg 3 285 // | XMM1 | float arg 2 286 // | XMM0 | float arg 1 287 // | Padding | 288 // | RDI/Method* | <- sp 289 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 290 static constexpr bool kAlignPairRegister = false; 291 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 292 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 293 static constexpr bool kQuickSkipOddFpRegisters = false; 294 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 295 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 296 static constexpr bool kGprFprLockstep = false; 297 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 298 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 299 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 300 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 301 switch (gpr_index) { 302 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 303 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 304 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 305 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 306 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 307 default: 308 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 309 return 0; 310 } 311 } 312#else 313#error "Unsupported architecture" 314#endif 315 316 public: 317 // Special handling for proxy methods. Proxy methods are instance methods so the 318 // 'this' object is the 1st argument. They also have the same frame layout as the 319 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 320 // 1st GPR. 321 static mirror::Object* GetProxyThisObject(ArtMethod** sp) 322 REQUIRES_SHARED(Locks::mutator_lock_) { 323 CHECK((*sp)->IsProxyMethod()); 324 CHECK_GT(kNumQuickGprArgs, 0u); 325 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 326 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 327 GprIndexToGprOffset(kThisGprIndex); 328 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 329 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); 330 } 331 332 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 333 DCHECK((*sp)->IsCalleeSaveMethod()); 334 return GetCalleeSaveMethodCaller(sp, CalleeSaveType::kSaveRefsAndArgs); 335 } 336 337 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 338 DCHECK((*sp)->IsCalleeSaveMethod()); 339 uint8_t* previous_sp = 340 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 341 return *reinterpret_cast<ArtMethod**>(previous_sp); 342 } 343 344 static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 345 DCHECK((*sp)->IsCalleeSaveMethod()); 346 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, 347 CalleeSaveType::kSaveRefsAndArgs); 348 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 349 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 350 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 351 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 352 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 353 354 if (current_code->IsOptimized()) { 355 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 356 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 357 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding); 358 DCHECK(stack_map.IsValid()); 359 if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) { 360 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 361 return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 362 inline_info.GetDepth(encoding.inline_info.encoding)-1); 363 } else { 364 return stack_map.GetDexPc(encoding.stack_map.encoding); 365 } 366 } else { 367 return current_code->ToDexPc(*caller_sp, outer_pc); 368 } 369 } 370 371 static bool GetInvokeType(ArtMethod** sp, InvokeType* invoke_type, uint32_t* dex_method_index) 372 REQUIRES_SHARED(Locks::mutator_lock_) { 373 DCHECK((*sp)->IsCalleeSaveMethod()); 374 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, 375 CalleeSaveType::kSaveRefsAndArgs); 376 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 377 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 378 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 379 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 380 if (!current_code->IsOptimized()) { 381 return false; 382 } 383 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 384 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 385 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 386 MethodInfo method_info = current_code->GetOptimizedMethodInfo(); 387 InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding)); 388 if (invoke.IsValid()) { 389 *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding)); 390 *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info); 391 return true; 392 } 393 return false; 394 } 395 396 // For the given quick ref and args quick frame, return the caller's PC. 397 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 398 DCHECK((*sp)->IsCalleeSaveMethod()); 399 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 400 return *reinterpret_cast<uintptr_t*>(lr); 401 } 402 403 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 404 uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) : 405 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 406 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 407 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 408 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 409 + sizeof(ArtMethod*)), // Skip ArtMethod*. 410 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 411 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 412 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 413 "Number of Quick FPR arguments unexpected"); 414 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 415 "Double alignment unexpected"); 416 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 417 // next register is even. 418 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 419 "Number of Quick FPR arguments not even"); 420 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 421 } 422 423 virtual ~QuickArgumentVisitor() {} 424 425 virtual void Visit() = 0; 426 427 Primitive::Type GetParamPrimitiveType() const { 428 return cur_type_; 429 } 430 431 uint8_t* GetParamAddress() const { 432 if (!kQuickSoftFloatAbi) { 433 Primitive::Type type = GetParamPrimitiveType(); 434 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 435 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 436 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 437 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 438 } 439 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 440 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 441 } 442 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 443 } 444 } 445 if (gpr_index_ < kNumQuickGprArgs) { 446 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 447 } 448 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 449 } 450 451 bool IsSplitLongOrDouble() const { 452 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 453 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 454 return is_split_long_or_double_; 455 } else { 456 return false; // An optimization for when GPR and FPRs are 64bit. 457 } 458 } 459 460 bool IsParamAReference() const { 461 return GetParamPrimitiveType() == Primitive::kPrimNot; 462 } 463 464 bool IsParamALongOrDouble() const { 465 Primitive::Type type = GetParamPrimitiveType(); 466 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 467 } 468 469 uint64_t ReadSplitLongParam() const { 470 // The splitted long is always available through the stack. 471 return *reinterpret_cast<uint64_t*>(stack_args_ 472 + stack_index_ * kBytesStackArgLocation); 473 } 474 475 void IncGprIndex() { 476 gpr_index_++; 477 if (kGprFprLockstep) { 478 fpr_index_++; 479 } 480 } 481 482 void IncFprIndex() { 483 fpr_index_++; 484 if (kGprFprLockstep) { 485 gpr_index_++; 486 } 487 } 488 489 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) { 490 // (a) 'stack_args_' should point to the first method's argument 491 // (b) whatever the argument type it is, the 'stack_index_' should 492 // be moved forward along with every visiting. 493 gpr_index_ = 0; 494 fpr_index_ = 0; 495 if (kQuickDoubleRegAlignedFloatBackFilled) { 496 fpr_double_index_ = 0; 497 } 498 stack_index_ = 0; 499 if (!is_static_) { // Handle this. 500 cur_type_ = Primitive::kPrimNot; 501 is_split_long_or_double_ = false; 502 Visit(); 503 stack_index_++; 504 if (kNumQuickGprArgs > 0) { 505 IncGprIndex(); 506 } 507 } 508 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 509 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 510 switch (cur_type_) { 511 case Primitive::kPrimNot: 512 case Primitive::kPrimBoolean: 513 case Primitive::kPrimByte: 514 case Primitive::kPrimChar: 515 case Primitive::kPrimShort: 516 case Primitive::kPrimInt: 517 is_split_long_or_double_ = false; 518 Visit(); 519 stack_index_++; 520 if (gpr_index_ < kNumQuickGprArgs) { 521 IncGprIndex(); 522 } 523 break; 524 case Primitive::kPrimFloat: 525 is_split_long_or_double_ = false; 526 Visit(); 527 stack_index_++; 528 if (kQuickSoftFloatAbi) { 529 if (gpr_index_ < kNumQuickGprArgs) { 530 IncGprIndex(); 531 } 532 } else { 533 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 534 IncFprIndex(); 535 if (kQuickDoubleRegAlignedFloatBackFilled) { 536 // Double should not overlap with float. 537 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 538 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 539 // Float should not overlap with double. 540 if (fpr_index_ % 2 == 0) { 541 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 542 } 543 } else if (kQuickSkipOddFpRegisters) { 544 IncFprIndex(); 545 } 546 } 547 } 548 break; 549 case Primitive::kPrimDouble: 550 case Primitive::kPrimLong: 551 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 552 if (cur_type_ == Primitive::kPrimLong && 553#if defined(__mips__) && !defined(__LP64__) 554 (gpr_index_ == 0 || gpr_index_ == 2) && 555#else 556 gpr_index_ == 0 && 557#endif 558 kAlignPairRegister) { 559 // Currently, this is only for ARM and MIPS, where we align long parameters with 560 // even-numbered registers by skipping R1 (on ARM) or A1(A3) (on MIPS) and using 561 // R2 (on ARM) or A2(T0) (on MIPS) instead. 562 IncGprIndex(); 563 } 564 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 565 ((gpr_index_ + 1) == kNumQuickGprArgs); 566 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 567 // We don't want to split this. Pass over this register. 568 gpr_index_++; 569 is_split_long_or_double_ = false; 570 } 571 Visit(); 572 if (kBytesStackArgLocation == 4) { 573 stack_index_+= 2; 574 } else { 575 CHECK_EQ(kBytesStackArgLocation, 8U); 576 stack_index_++; 577 } 578 if (gpr_index_ < kNumQuickGprArgs) { 579 IncGprIndex(); 580 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 581 if (gpr_index_ < kNumQuickGprArgs) { 582 IncGprIndex(); 583 } 584 } 585 } 586 } else { 587 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 588 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 589 Visit(); 590 if (kBytesStackArgLocation == 4) { 591 stack_index_+= 2; 592 } else { 593 CHECK_EQ(kBytesStackArgLocation, 8U); 594 stack_index_++; 595 } 596 if (kQuickDoubleRegAlignedFloatBackFilled) { 597 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 598 fpr_double_index_ += 2; 599 // Float should not overlap with double. 600 if (fpr_index_ % 2 == 0) { 601 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 602 } 603 } 604 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 605 IncFprIndex(); 606 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 607 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 608 IncFprIndex(); 609 } 610 } 611 } 612 } 613 break; 614 default: 615 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 616 } 617 } 618 } 619 620 protected: 621 const bool is_static_; 622 const char* const shorty_; 623 const uint32_t shorty_len_; 624 625 private: 626 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 627 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 628 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 629 uint32_t gpr_index_; // Index into spilled GPRs. 630 // Index into spilled FPRs. 631 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 632 // holds a higher register number. 633 uint32_t fpr_index_; 634 // Index into spilled FPRs for aligned double. 635 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 636 // terms of singles, may be behind fpr_index. 637 uint32_t fpr_double_index_; 638 uint32_t stack_index_; // Index into arguments on the stack. 639 // The current type of argument during VisitArguments. 640 Primitive::Type cur_type_; 641 // Does a 64bit parameter straddle the register and stack arguments? 642 bool is_split_long_or_double_; 643}; 644 645// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 646// allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 647extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 648 REQUIRES_SHARED(Locks::mutator_lock_) { 649 return QuickArgumentVisitor::GetProxyThisObject(sp); 650} 651 652// Visits arguments on the stack placing them into the shadow frame. 653class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 654 public: 655 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 656 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 657 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 658 659 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 660 661 private: 662 ShadowFrame* const sf_; 663 uint32_t cur_reg_; 664 665 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 666}; 667 668void BuildQuickShadowFrameVisitor::Visit() { 669 Primitive::Type type = GetParamPrimitiveType(); 670 switch (type) { 671 case Primitive::kPrimLong: // Fall-through. 672 case Primitive::kPrimDouble: 673 if (IsSplitLongOrDouble()) { 674 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 675 } else { 676 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 677 } 678 ++cur_reg_; 679 break; 680 case Primitive::kPrimNot: { 681 StackReference<mirror::Object>* stack_ref = 682 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 683 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 684 } 685 break; 686 case Primitive::kPrimBoolean: // Fall-through. 687 case Primitive::kPrimByte: // Fall-through. 688 case Primitive::kPrimChar: // Fall-through. 689 case Primitive::kPrimShort: // Fall-through. 690 case Primitive::kPrimInt: // Fall-through. 691 case Primitive::kPrimFloat: 692 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 693 break; 694 case Primitive::kPrimVoid: 695 LOG(FATAL) << "UNREACHABLE"; 696 UNREACHABLE(); 697 } 698 ++cur_reg_; 699} 700 701// Don't inline. See b/65159206. 702NO_INLINE 703static void HandleDeoptimization(JValue* result, 704 ArtMethod* method, 705 ShadowFrame* deopt_frame, 706 ManagedStack* fragment) 707 REQUIRES_SHARED(Locks::mutator_lock_) { 708 // Coming from partial-fragment deopt. 709 Thread* self = Thread::Current(); 710 if (kIsDebugBuild) { 711 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom 712 // of the call-stack) corresponds to the called method. 713 ShadowFrame* linked = deopt_frame; 714 while (linked->GetLink() != nullptr) { 715 linked = linked->GetLink(); 716 } 717 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " " 718 << ArtMethod::PrettyMethod(linked->GetMethod()); 719 } 720 721 if (VLOG_IS_ON(deopt)) { 722 // Print out the stack to verify that it was a partial-fragment deopt. 723 LOG(INFO) << "Continue-ing from deopt. Stack is:"; 724 QuickExceptionHandler::DumpFramesWithType(self, true); 725 } 726 727 ObjPtr<mirror::Throwable> pending_exception; 728 bool from_code = false; 729 DeoptimizationMethodType method_type; 730 self->PopDeoptimizationContext(/* out */ result, 731 /* out */ &pending_exception, 732 /* out */ &from_code, 733 /* out */ &method_type); 734 735 // Push a transition back into managed code onto the linked list in thread. 736 self->PushManagedStackFragment(fragment); 737 738 // Ensure that the stack is still in order. 739 if (kIsDebugBuild) { 740 class DummyStackVisitor : public StackVisitor { 741 public: 742 explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_) 743 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 744 745 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 746 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking 747 // logic. Just always say we want to continue. 748 return true; 749 } 750 }; 751 DummyStackVisitor dsv(self); 752 dsv.WalkStack(); 753 } 754 755 // Restore the exception that was pending before deoptimization then interpret the 756 // deoptimized frames. 757 if (pending_exception != nullptr) { 758 self->SetException(pending_exception); 759 } 760 interpreter::EnterInterpreterFromDeoptimize(self, 761 deopt_frame, 762 result, 763 from_code, 764 DeoptimizationMethodType::kDefault); 765} 766 767extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 768 REQUIRES_SHARED(Locks::mutator_lock_) { 769 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 770 // frame. 771 ScopedQuickEntrypointChecks sqec(self); 772 773 if (UNLIKELY(!method->IsInvokable())) { 774 method->ThrowInvocationTimeError(); 775 return 0; 776 } 777 778 JValue tmp_value; 779 ShadowFrame* deopt_frame = self->PopStackedShadowFrame( 780 StackedShadowFrameType::kDeoptimizationShadowFrame, false); 781 ManagedStack fragment; 782 783 DCHECK(!method->IsNative()) << method->PrettyMethod(); 784 uint32_t shorty_len = 0; 785 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 786 const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem(); 787 DCHECK(code_item != nullptr) << method->PrettyMethod(); 788 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 789 790 JValue result; 791 792 if (UNLIKELY(deopt_frame != nullptr)) { 793 HandleDeoptimization(&result, method, deopt_frame, &fragment); 794 } else { 795 const char* old_cause = self->StartAssertNoThreadSuspension( 796 "Building interpreter shadow frame"); 797 uint16_t num_regs = code_item->registers_size_; 798 // No last shadow coming from quick. 799 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 800 CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0); 801 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 802 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 803 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 804 shadow_frame, first_arg_reg); 805 shadow_frame_builder.VisitArguments(); 806 const bool needs_initialization = 807 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 808 // Push a transition back into managed code onto the linked list in thread. 809 self->PushManagedStackFragment(&fragment); 810 self->PushShadowFrame(shadow_frame); 811 self->EndAssertNoThreadSuspension(old_cause); 812 813 if (needs_initialization) { 814 // Ensure static method's class is initialized. 815 StackHandleScope<1> hs(self); 816 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 817 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 818 DCHECK(Thread::Current()->IsExceptionPending()) 819 << shadow_frame->GetMethod()->PrettyMethod(); 820 self->PopManagedStackFragment(fragment); 821 return 0; 822 } 823 } 824 825 result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); 826 } 827 828 // Pop transition. 829 self->PopManagedStackFragment(fragment); 830 831 // Request a stack deoptimization if needed 832 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 833 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp); 834 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization 835 // should be done and it knows the real return pc. 836 if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) && 837 Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { 838 if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) { 839 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 840 << caller->PrettyMethod(); 841 } else { 842 // Push the context of the deoptimization stack so we can restore the return value and the 843 // exception before executing the deoptimized frames. 844 self->PushDeoptimizationContext( 845 result, 846 shorty[0] == 'L' || shorty[0] == '[', /* class or array */ 847 self->GetException(), 848 false /* from_code */, 849 DeoptimizationMethodType::kDefault); 850 851 // Set special exception to cause deoptimization. 852 self->SetException(Thread::GetDeoptimizationException()); 853 } 854 } 855 856 // No need to restore the args since the method has already been run by the interpreter. 857 return result.GetJ(); 858} 859 860// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 861// to jobjects. 862class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 863 public: 864 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 865 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 866 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 867 868 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 869 870 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 871 872 private: 873 ScopedObjectAccessUnchecked* const soa_; 874 std::vector<jvalue>* const args_; 875 // References which we must update when exiting in case the GC moved the objects. 876 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 877 878 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 879}; 880 881void BuildQuickArgumentVisitor::Visit() { 882 jvalue val; 883 Primitive::Type type = GetParamPrimitiveType(); 884 switch (type) { 885 case Primitive::kPrimNot: { 886 StackReference<mirror::Object>* stack_ref = 887 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 888 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 889 references_.push_back(std::make_pair(val.l, stack_ref)); 890 break; 891 } 892 case Primitive::kPrimLong: // Fall-through. 893 case Primitive::kPrimDouble: 894 if (IsSplitLongOrDouble()) { 895 val.j = ReadSplitLongParam(); 896 } else { 897 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 898 } 899 break; 900 case Primitive::kPrimBoolean: // Fall-through. 901 case Primitive::kPrimByte: // Fall-through. 902 case Primitive::kPrimChar: // Fall-through. 903 case Primitive::kPrimShort: // Fall-through. 904 case Primitive::kPrimInt: // Fall-through. 905 case Primitive::kPrimFloat: 906 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 907 break; 908 case Primitive::kPrimVoid: 909 LOG(FATAL) << "UNREACHABLE"; 910 UNREACHABLE(); 911 } 912 args_->push_back(val); 913} 914 915void BuildQuickArgumentVisitor::FixupReferences() { 916 // Fixup any references which may have changed. 917 for (const auto& pair : references_) { 918 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 919 soa_->Env()->DeleteLocalRef(pair.first); 920 } 921} 922// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 923// which is responsible for recording callee save registers. We explicitly place into jobjects the 924// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 925// field within the proxy object, which will box the primitive arguments and deal with error cases. 926extern "C" uint64_t artQuickProxyInvokeHandler( 927 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 928 REQUIRES_SHARED(Locks::mutator_lock_) { 929 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod(); 930 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod(); 931 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 932 const char* old_cause = 933 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 934 // Register the top of the managed stack, making stack crawlable. 935 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod(); 936 self->VerifyStack(); 937 // Start new JNI local reference state. 938 JNIEnvExt* env = self->GetJniEnv(); 939 ScopedObjectAccessUnchecked soa(env); 940 ScopedJniEnvLocalRefState env_state(env); 941 // Create local ref. copies of proxy method and the receiver. 942 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 943 944 // Placing arguments into args vector and remove the receiver. 945 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 946 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " " 947 << non_proxy_method->PrettyMethod(); 948 std::vector<jvalue> args; 949 uint32_t shorty_len = 0; 950 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 951 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 952 953 local_ref_visitor.VisitArguments(); 954 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod(); 955 args.erase(args.begin()); 956 957 // Convert proxy method into expected interface method. 958 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize); 959 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod(); 960 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod(); 961 self->EndAssertNoThreadSuspension(old_cause); 962 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 963 DCHECK(!Runtime::Current()->IsActiveTransaction()); 964 ObjPtr<mirror::Method> interface_reflect_method = 965 mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), interface_method); 966 if (interface_reflect_method == nullptr) { 967 soa.Self()->AssertPendingOOMException(); 968 return 0; 969 } 970 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_reflect_method); 971 972 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 973 // that performs allocations. 974 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 975 // Restore references which might have moved. 976 local_ref_visitor.FixupReferences(); 977 return result.GetJ(); 978} 979 980// Read object references held in arguments from quick frames and place in a JNI local references, 981// so they don't get garbage collected. 982class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 983 public: 984 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 985 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 986 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 987 988 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 989 990 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 991 992 private: 993 ScopedObjectAccessUnchecked* const soa_; 994 // References which we must update when exiting in case the GC moved the objects. 995 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 996 997 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 998}; 999 1000void RememberForGcArgumentVisitor::Visit() { 1001 if (IsParamAReference()) { 1002 StackReference<mirror::Object>* stack_ref = 1003 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1004 jobject reference = 1005 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 1006 references_.push_back(std::make_pair(reference, stack_ref)); 1007 } 1008} 1009 1010void RememberForGcArgumentVisitor::FixupReferences() { 1011 // Fixup any references which may have changed. 1012 for (const auto& pair : references_) { 1013 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 1014 soa_->Env()->DeleteLocalRef(pair.first); 1015 } 1016} 1017 1018extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, 1019 mirror::Object* this_object, 1020 Thread* self, 1021 ArtMethod** sp) 1022 REQUIRES_SHARED(Locks::mutator_lock_) { 1023 const void* result; 1024 // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip 1025 // that part. 1026 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1027 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1028 if (instrumentation->IsDeoptimized(method)) { 1029 result = GetQuickToInterpreterBridge(); 1030 } else { 1031 result = instrumentation->GetQuickCodeFor(method, kRuntimePointerSize); 1032 DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result)); 1033 } 1034 1035 bool interpreter_entry = (result == GetQuickToInterpreterBridge()); 1036 bool is_static = method->IsStatic(); 1037 uint32_t shorty_len; 1038 const char* shorty = 1039 method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len); 1040 1041 ScopedObjectAccessUnchecked soa(self); 1042 RememberForGcArgumentVisitor visitor(sp, is_static, shorty, shorty_len, &soa); 1043 visitor.VisitArguments(); 1044 1045 instrumentation->PushInstrumentationStackFrame(self, 1046 is_static ? nullptr : this_object, 1047 method, 1048 QuickArgumentVisitor::GetCallingPc(sp), 1049 interpreter_entry); 1050 1051 visitor.FixupReferences(); 1052 if (UNLIKELY(self->IsExceptionPending())) { 1053 return nullptr; 1054 } 1055 CHECK(result != nullptr) << method->PrettyMethod(); 1056 return result; 1057} 1058 1059extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, 1060 ArtMethod** sp, 1061 uint64_t* gpr_result, 1062 uint64_t* fpr_result) 1063 REQUIRES_SHARED(Locks::mutator_lock_) { 1064 DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current())); 1065 CHECK(gpr_result != nullptr); 1066 CHECK(fpr_result != nullptr); 1067 // Instrumentation exit stub must not be entered with a pending exception. 1068 CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception " 1069 << self->GetException()->Dump(); 1070 // Compute address of return PC and sanity check that it currently holds 0. 1071 size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, 1072 CalleeSaveType::kSaveEverything); 1073 uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + 1074 return_pc_offset); 1075 CHECK_EQ(*return_pc, 0U); 1076 1077 // Pop the frame filling in the return pc. The low half of the return value is 0 when 1078 // deoptimization shouldn't be performed with the high-half having the return address. When 1079 // deoptimization should be performed the low half is zero and the high-half the address of the 1080 // deoptimization entry point. 1081 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1082 TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame( 1083 self, return_pc, gpr_result, fpr_result); 1084 if (self->IsExceptionPending()) { 1085 return GetTwoWordFailureValue(); 1086 } 1087 return return_or_deoptimize_pc; 1088} 1089 1090// Lazily resolve a method for quick. Called by stub code. 1091extern "C" const void* artQuickResolutionTrampoline( 1092 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 1093 REQUIRES_SHARED(Locks::mutator_lock_) { 1094 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 1095 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 1096 // does not have the same stack layout as the callee-save method). 1097 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1098 // Start new JNI local reference state 1099 JNIEnvExt* env = self->GetJniEnv(); 1100 ScopedObjectAccessUnchecked soa(env); 1101 ScopedJniEnvLocalRefState env_state(env); 1102 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 1103 1104 // Compute details about the called method (avoid GCs) 1105 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 1106 InvokeType invoke_type; 1107 MethodReference called_method(nullptr, 0); 1108 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 1109 ArtMethod* caller = nullptr; 1110 if (!called_method_known_on_entry) { 1111 caller = QuickArgumentVisitor::GetCallingMethod(sp); 1112 called_method.dex_file = caller->GetDexFile(); 1113 1114 InvokeType stack_map_invoke_type; 1115 uint32_t stack_map_dex_method_idx; 1116 const bool found_stack_map = QuickArgumentVisitor::GetInvokeType(sp, 1117 &stack_map_invoke_type, 1118 &stack_map_dex_method_idx); 1119 // For debug builds, we make sure both of the paths are consistent by also looking at the dex 1120 // code. 1121 if (!found_stack_map || kIsDebugBuild) { 1122 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 1123 const DexFile::CodeItem* code; 1124 code = caller->GetCodeItem(); 1125 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 1126 const Instruction& instr = code->InstructionAt(dex_pc); 1127 Instruction::Code instr_code = instr.Opcode(); 1128 bool is_range; 1129 switch (instr_code) { 1130 case Instruction::INVOKE_DIRECT: 1131 invoke_type = kDirect; 1132 is_range = false; 1133 break; 1134 case Instruction::INVOKE_DIRECT_RANGE: 1135 invoke_type = kDirect; 1136 is_range = true; 1137 break; 1138 case Instruction::INVOKE_STATIC: 1139 invoke_type = kStatic; 1140 is_range = false; 1141 break; 1142 case Instruction::INVOKE_STATIC_RANGE: 1143 invoke_type = kStatic; 1144 is_range = true; 1145 break; 1146 case Instruction::INVOKE_SUPER: 1147 invoke_type = kSuper; 1148 is_range = false; 1149 break; 1150 case Instruction::INVOKE_SUPER_RANGE: 1151 invoke_type = kSuper; 1152 is_range = true; 1153 break; 1154 case Instruction::INVOKE_VIRTUAL: 1155 invoke_type = kVirtual; 1156 is_range = false; 1157 break; 1158 case Instruction::INVOKE_VIRTUAL_RANGE: 1159 invoke_type = kVirtual; 1160 is_range = true; 1161 break; 1162 case Instruction::INVOKE_INTERFACE: 1163 invoke_type = kInterface; 1164 is_range = false; 1165 break; 1166 case Instruction::INVOKE_INTERFACE_RANGE: 1167 invoke_type = kInterface; 1168 is_range = true; 1169 break; 1170 default: 1171 LOG(FATAL) << "Unexpected call into trampoline: " << instr.DumpString(nullptr); 1172 UNREACHABLE(); 1173 } 1174 called_method.index = (is_range) ? instr.VRegB_3rc() : instr.VRegB_35c(); 1175 // Check that the invoke matches what we expected, note that this path only happens for debug 1176 // builds. 1177 if (found_stack_map) { 1178 DCHECK_EQ(stack_map_invoke_type, invoke_type); 1179 if (invoke_type != kSuper) { 1180 // Super may be sharpened. 1181 DCHECK_EQ(stack_map_dex_method_idx, called_method.index) 1182 << called_method.dex_file->PrettyMethod(stack_map_dex_method_idx) << " " 1183 << called_method.PrettyMethod(); 1184 } 1185 } else { 1186 VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " " 1187 << called_method.index; 1188 } 1189 } else { 1190 invoke_type = stack_map_invoke_type; 1191 called_method.index = stack_map_dex_method_idx; 1192 } 1193 } else { 1194 invoke_type = kStatic; 1195 called_method.dex_file = called->GetDexFile(); 1196 called_method.index = called->GetDexMethodIndex(); 1197 } 1198 uint32_t shorty_len; 1199 const char* shorty = 1200 called_method.dex_file->GetMethodShorty(called_method.GetMethodId(), &shorty_len); 1201 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 1202 visitor.VisitArguments(); 1203 self->EndAssertNoThreadSuspension(old_cause); 1204 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 1205 // Resolve method filling in dex cache. 1206 if (!called_method_known_on_entry) { 1207 StackHandleScope<1> hs(self); 1208 mirror::Object* dummy = nullptr; 1209 HandleWrapper<mirror::Object> h_receiver( 1210 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 1211 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1212 called = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( 1213 self, called_method.index, caller, invoke_type); 1214 1215 // Update .bss entry in oat file if any. 1216 if (called != nullptr && called_method.dex_file->GetOatDexFile() != nullptr) { 1217 const MethodBssMapping* mapping = 1218 called_method.dex_file->GetOatDexFile()->GetMethodBssMapping(); 1219 if (mapping != nullptr) { 1220 auto pp = std::partition_point( 1221 mapping->begin(), 1222 mapping->end(), 1223 [called_method](const MethodBssMappingEntry& entry) { 1224 return entry.method_index < called_method.index; 1225 }); 1226 if (pp != mapping->end() && pp->CoversIndex(called_method.index)) { 1227 size_t bss_offset = pp->GetBssOffset(called_method.index, 1228 static_cast<size_t>(kRuntimePointerSize)); 1229 DCHECK_ALIGNED(bss_offset, static_cast<size_t>(kRuntimePointerSize)); 1230 const OatFile* oat_file = called_method.dex_file->GetOatDexFile()->GetOatFile(); 1231 ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(const_cast<uint8_t*>( 1232 oat_file->BssBegin() + bss_offset)); 1233 DCHECK_GE(method_entry, oat_file->GetBssMethods().data()); 1234 DCHECK_LT(method_entry, 1235 oat_file->GetBssMethods().data() + oat_file->GetBssMethods().size()); 1236 *method_entry = called; 1237 } 1238 } 1239 } 1240 } 1241 const void* code = nullptr; 1242 if (LIKELY(!self->IsExceptionPending())) { 1243 // Incompatible class change should have been handled in resolve method. 1244 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 1245 << called->PrettyMethod() << " " << invoke_type; 1246 if (virtual_or_interface || invoke_type == kSuper) { 1247 // Refine called method based on receiver for kVirtual/kInterface, and 1248 // caller for kSuper. 1249 ArtMethod* orig_called = called; 1250 if (invoke_type == kVirtual) { 1251 CHECK(receiver != nullptr) << invoke_type; 1252 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize); 1253 } else if (invoke_type == kInterface) { 1254 CHECK(receiver != nullptr) << invoke_type; 1255 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize); 1256 } else { 1257 DCHECK_EQ(invoke_type, kSuper); 1258 CHECK(caller != nullptr) << invoke_type; 1259 StackHandleScope<2> hs(self); 1260 Handle<mirror::DexCache> dex_cache( 1261 hs.NewHandle(caller->GetDeclaringClass()->GetDexCache())); 1262 Handle<mirror::ClassLoader> class_loader( 1263 hs.NewHandle(caller->GetDeclaringClass()->GetClassLoader())); 1264 // TODO Maybe put this into a mirror::Class function. 1265 ObjPtr<mirror::Class> ref_class = linker->LookupResolvedType( 1266 *dex_cache->GetDexFile(), 1267 dex_cache->GetDexFile()->GetMethodId(called_method.index).class_idx_, 1268 dex_cache.Get(), 1269 class_loader.Get()); 1270 if (ref_class->IsInterface()) { 1271 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize); 1272 } else { 1273 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry( 1274 called->GetMethodIndex(), kRuntimePointerSize); 1275 } 1276 } 1277 1278 CHECK(called != nullptr) << orig_called->PrettyMethod() << " " 1279 << mirror::Object::PrettyTypeOf(receiver) << " " 1280 << invoke_type << " " << orig_called->GetVtableIndex(); 1281 } 1282 1283 // Ensure that the called method's class is initialized. 1284 StackHandleScope<1> hs(soa.Self()); 1285 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 1286 linker->EnsureInitialized(soa.Self(), called_class, true, true); 1287 if (LIKELY(called_class->IsInitialized())) { 1288 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1289 // If we are single-stepping or the called method is deoptimized (by a 1290 // breakpoint, for example), then we have to execute the called method 1291 // with the interpreter. 1292 code = GetQuickToInterpreterBridge(); 1293 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1294 // If the caller is deoptimized (by a breakpoint, for example), we have to 1295 // continue its execution with interpreter when returning from the called 1296 // method. Because we do not want to execute the called method with the 1297 // interpreter, we wrap its execution into the instrumentation stubs. 1298 // When the called method returns, it will execute the instrumentation 1299 // exit hook that will determine the need of the interpreter with a call 1300 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1301 // it is needed. 1302 code = GetQuickInstrumentationEntryPoint(); 1303 } else { 1304 code = called->GetEntryPointFromQuickCompiledCode(); 1305 } 1306 } else if (called_class->IsInitializing()) { 1307 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1308 // If we are single-stepping or the called method is deoptimized (by a 1309 // breakpoint, for example), then we have to execute the called method 1310 // with the interpreter. 1311 code = GetQuickToInterpreterBridge(); 1312 } else if (invoke_type == kStatic) { 1313 // Class is still initializing, go to oat and grab code (trampoline must be left in place 1314 // until class is initialized to stop races between threads). 1315 code = linker->GetQuickOatCodeFor(called); 1316 } else { 1317 // No trampoline for non-static methods. 1318 code = called->GetEntryPointFromQuickCompiledCode(); 1319 } 1320 } else { 1321 DCHECK(called_class->IsErroneous()); 1322 } 1323 } 1324 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1325 // Fixup any locally saved objects may have moved during a GC. 1326 visitor.FixupReferences(); 1327 // Place called method in callee-save frame to be placed as first argument to quick method. 1328 *sp = called; 1329 1330 return code; 1331} 1332 1333/* 1334 * This class uses a couple of observations to unite the different calling conventions through 1335 * a few constants. 1336 * 1337 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1338 * possible alignment. 1339 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1340 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1341 * when we have to split things 1342 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1343 * and we can use Int handling directly. 1344 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1345 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1346 * extension should be compatible with Aarch64, which mandates copying the available bits 1347 * into LSB and leaving the rest unspecified. 1348 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1349 * the stack. 1350 * 6) There is only little endian. 1351 * 1352 * 1353 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1354 * follows: 1355 * 1356 * void PushGpr(uintptr_t): Add a value for the next GPR 1357 * 1358 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1359 * padding, that is, think the architecture is 32b and aligns 64b. 1360 * 1361 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1362 * split this if necessary. The current state will have aligned, if 1363 * necessary. 1364 * 1365 * void PushStack(uintptr_t): Push a value to the stack. 1366 * 1367 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1368 * as this might be important for null initialization. 1369 * Must return the jobject, that is, the reference to the 1370 * entry in the HandleScope (nullptr if necessary). 1371 * 1372 */ 1373template<class T> class BuildNativeCallFrameStateMachine { 1374 public: 1375#if defined(__arm__) 1376 // TODO: These are all dummy values! 1377 static constexpr bool kNativeSoftFloatAbi = true; 1378 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1379 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1380 1381 static constexpr size_t kRegistersNeededForLong = 2; 1382 static constexpr size_t kRegistersNeededForDouble = 2; 1383 static constexpr bool kMultiRegistersAligned = true; 1384 static constexpr bool kMultiFPRegistersWidened = false; 1385 static constexpr bool kMultiGPRegistersWidened = false; 1386 static constexpr bool kAlignLongOnStack = true; 1387 static constexpr bool kAlignDoubleOnStack = true; 1388#elif defined(__aarch64__) 1389 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1390 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1391 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1392 1393 static constexpr size_t kRegistersNeededForLong = 1; 1394 static constexpr size_t kRegistersNeededForDouble = 1; 1395 static constexpr bool kMultiRegistersAligned = false; 1396 static constexpr bool kMultiFPRegistersWidened = false; 1397 static constexpr bool kMultiGPRegistersWidened = false; 1398 static constexpr bool kAlignLongOnStack = false; 1399 static constexpr bool kAlignDoubleOnStack = false; 1400#elif defined(__mips__) && !defined(__LP64__) 1401 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1402 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1403 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1404 1405 static constexpr size_t kRegistersNeededForLong = 2; 1406 static constexpr size_t kRegistersNeededForDouble = 2; 1407 static constexpr bool kMultiRegistersAligned = true; 1408 static constexpr bool kMultiFPRegistersWidened = true; 1409 static constexpr bool kMultiGPRegistersWidened = false; 1410 static constexpr bool kAlignLongOnStack = true; 1411 static constexpr bool kAlignDoubleOnStack = true; 1412#elif defined(__mips__) && defined(__LP64__) 1413 // Let the code prepare GPRs only and we will load the FPRs with same data. 1414 static constexpr bool kNativeSoftFloatAbi = true; 1415 static constexpr size_t kNumNativeGprArgs = 8; 1416 static constexpr size_t kNumNativeFprArgs = 0; 1417 1418 static constexpr size_t kRegistersNeededForLong = 1; 1419 static constexpr size_t kRegistersNeededForDouble = 1; 1420 static constexpr bool kMultiRegistersAligned = false; 1421 static constexpr bool kMultiFPRegistersWidened = false; 1422 static constexpr bool kMultiGPRegistersWidened = true; 1423 static constexpr bool kAlignLongOnStack = false; 1424 static constexpr bool kAlignDoubleOnStack = false; 1425#elif defined(__i386__) 1426 // TODO: Check these! 1427 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1428 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1429 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1430 1431 static constexpr size_t kRegistersNeededForLong = 2; 1432 static constexpr size_t kRegistersNeededForDouble = 2; 1433 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1434 static constexpr bool kMultiFPRegistersWidened = false; 1435 static constexpr bool kMultiGPRegistersWidened = false; 1436 static constexpr bool kAlignLongOnStack = false; 1437 static constexpr bool kAlignDoubleOnStack = false; 1438#elif defined(__x86_64__) 1439 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1440 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1441 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1442 1443 static constexpr size_t kRegistersNeededForLong = 1; 1444 static constexpr size_t kRegistersNeededForDouble = 1; 1445 static constexpr bool kMultiRegistersAligned = false; 1446 static constexpr bool kMultiFPRegistersWidened = false; 1447 static constexpr bool kMultiGPRegistersWidened = false; 1448 static constexpr bool kAlignLongOnStack = false; 1449 static constexpr bool kAlignDoubleOnStack = false; 1450#else 1451#error "Unsupported architecture" 1452#endif 1453 1454 public: 1455 explicit BuildNativeCallFrameStateMachine(T* delegate) 1456 : gpr_index_(kNumNativeGprArgs), 1457 fpr_index_(kNumNativeFprArgs), 1458 stack_entries_(0), 1459 delegate_(delegate) { 1460 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1461 // the next register is even; counting down is just to make the compiler happy... 1462 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1463 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1464 } 1465 1466 virtual ~BuildNativeCallFrameStateMachine() {} 1467 1468 bool HavePointerGpr() const { 1469 return gpr_index_ > 0; 1470 } 1471 1472 void AdvancePointer(const void* val) { 1473 if (HavePointerGpr()) { 1474 gpr_index_--; 1475 PushGpr(reinterpret_cast<uintptr_t>(val)); 1476 } else { 1477 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1478 PushStack(reinterpret_cast<uintptr_t>(val)); 1479 gpr_index_ = 0; 1480 } 1481 } 1482 1483 bool HaveHandleScopeGpr() const { 1484 return gpr_index_ > 0; 1485 } 1486 1487 void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) { 1488 uintptr_t handle = PushHandle(ptr); 1489 if (HaveHandleScopeGpr()) { 1490 gpr_index_--; 1491 PushGpr(handle); 1492 } else { 1493 stack_entries_++; 1494 PushStack(handle); 1495 gpr_index_ = 0; 1496 } 1497 } 1498 1499 bool HaveIntGpr() const { 1500 return gpr_index_ > 0; 1501 } 1502 1503 void AdvanceInt(uint32_t val) { 1504 if (HaveIntGpr()) { 1505 gpr_index_--; 1506 if (kMultiGPRegistersWidened) { 1507 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1508 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1509 } else { 1510 PushGpr(val); 1511 } 1512 } else { 1513 stack_entries_++; 1514 if (kMultiGPRegistersWidened) { 1515 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1516 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1517 } else { 1518 PushStack(val); 1519 } 1520 gpr_index_ = 0; 1521 } 1522 } 1523 1524 bool HaveLongGpr() const { 1525 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1526 } 1527 1528 bool LongGprNeedsPadding() const { 1529 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1530 kAlignLongOnStack && // and when it needs alignment 1531 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1532 } 1533 1534 bool LongStackNeedsPadding() const { 1535 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1536 kAlignLongOnStack && // and when it needs 8B alignment 1537 (stack_entries_ & 1) == 1; // counter is odd 1538 } 1539 1540 void AdvanceLong(uint64_t val) { 1541 if (HaveLongGpr()) { 1542 if (LongGprNeedsPadding()) { 1543 PushGpr(0); 1544 gpr_index_--; 1545 } 1546 if (kRegistersNeededForLong == 1) { 1547 PushGpr(static_cast<uintptr_t>(val)); 1548 } else { 1549 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1550 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1551 } 1552 gpr_index_ -= kRegistersNeededForLong; 1553 } else { 1554 if (LongStackNeedsPadding()) { 1555 PushStack(0); 1556 stack_entries_++; 1557 } 1558 if (kRegistersNeededForLong == 1) { 1559 PushStack(static_cast<uintptr_t>(val)); 1560 stack_entries_++; 1561 } else { 1562 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1563 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1564 stack_entries_ += 2; 1565 } 1566 gpr_index_ = 0; 1567 } 1568 } 1569 1570 bool HaveFloatFpr() const { 1571 return fpr_index_ > 0; 1572 } 1573 1574 void AdvanceFloat(float val) { 1575 if (kNativeSoftFloatAbi) { 1576 AdvanceInt(bit_cast<uint32_t, float>(val)); 1577 } else { 1578 if (HaveFloatFpr()) { 1579 fpr_index_--; 1580 if (kRegistersNeededForDouble == 1) { 1581 if (kMultiFPRegistersWidened) { 1582 PushFpr8(bit_cast<uint64_t, double>(val)); 1583 } else { 1584 // No widening, just use the bits. 1585 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1586 } 1587 } else { 1588 PushFpr4(val); 1589 } 1590 } else { 1591 stack_entries_++; 1592 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1593 // Need to widen before storing: Note the "double" in the template instantiation. 1594 // Note: We need to jump through those hoops to make the compiler happy. 1595 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1596 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1597 } else { 1598 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1599 } 1600 fpr_index_ = 0; 1601 } 1602 } 1603 } 1604 1605 bool HaveDoubleFpr() const { 1606 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1607 } 1608 1609 bool DoubleFprNeedsPadding() const { 1610 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1611 kAlignDoubleOnStack && // and when it needs alignment 1612 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1613 } 1614 1615 bool DoubleStackNeedsPadding() const { 1616 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1617 kAlignDoubleOnStack && // and when it needs 8B alignment 1618 (stack_entries_ & 1) == 1; // counter is odd 1619 } 1620 1621 void AdvanceDouble(uint64_t val) { 1622 if (kNativeSoftFloatAbi) { 1623 AdvanceLong(val); 1624 } else { 1625 if (HaveDoubleFpr()) { 1626 if (DoubleFprNeedsPadding()) { 1627 PushFpr4(0); 1628 fpr_index_--; 1629 } 1630 PushFpr8(val); 1631 fpr_index_ -= kRegistersNeededForDouble; 1632 } else { 1633 if (DoubleStackNeedsPadding()) { 1634 PushStack(0); 1635 stack_entries_++; 1636 } 1637 if (kRegistersNeededForDouble == 1) { 1638 PushStack(static_cast<uintptr_t>(val)); 1639 stack_entries_++; 1640 } else { 1641 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1642 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1643 stack_entries_ += 2; 1644 } 1645 fpr_index_ = 0; 1646 } 1647 } 1648 } 1649 1650 uint32_t GetStackEntries() const { 1651 return stack_entries_; 1652 } 1653 1654 uint32_t GetNumberOfUsedGprs() const { 1655 return kNumNativeGprArgs - gpr_index_; 1656 } 1657 1658 uint32_t GetNumberOfUsedFprs() const { 1659 return kNumNativeFprArgs - fpr_index_; 1660 } 1661 1662 private: 1663 void PushGpr(uintptr_t val) { 1664 delegate_->PushGpr(val); 1665 } 1666 void PushFpr4(float val) { 1667 delegate_->PushFpr4(val); 1668 } 1669 void PushFpr8(uint64_t val) { 1670 delegate_->PushFpr8(val); 1671 } 1672 void PushStack(uintptr_t val) { 1673 delegate_->PushStack(val); 1674 } 1675 uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { 1676 return delegate_->PushHandle(ref); 1677 } 1678 1679 uint32_t gpr_index_; // Number of free GPRs 1680 uint32_t fpr_index_; // Number of free FPRs 1681 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1682 // extended 1683 T* const delegate_; // What Push implementation gets called 1684}; 1685 1686// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1687// in subclasses. 1688// 1689// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1690// them with handles. 1691class ComputeNativeCallFrameSize { 1692 public: 1693 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1694 1695 virtual ~ComputeNativeCallFrameSize() {} 1696 1697 uint32_t GetStackSize() const { 1698 return num_stack_entries_ * sizeof(uintptr_t); 1699 } 1700 1701 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1702 sp8 -= GetStackSize(); 1703 // Align by kStackAlignment. 1704 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1705 return sp8; 1706 } 1707 1708 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1709 const { 1710 // Assumption is OK right now, as we have soft-float arm 1711 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1712 sp8 -= fregs * sizeof(uintptr_t); 1713 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1714 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1715 sp8 -= iregs * sizeof(uintptr_t); 1716 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1717 return sp8; 1718 } 1719 1720 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1721 uint32_t** start_fpr) const { 1722 // Native call stack. 1723 sp8 = LayoutCallStack(sp8); 1724 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1725 1726 // Put fprs and gprs below. 1727 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1728 1729 // Return the new bottom. 1730 return sp8; 1731 } 1732 1733 virtual void WalkHeader( 1734 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED) 1735 REQUIRES_SHARED(Locks::mutator_lock_) { 1736 } 1737 1738 void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) { 1739 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1740 1741 WalkHeader(&sm); 1742 1743 for (uint32_t i = 1; i < shorty_len; ++i) { 1744 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1745 switch (cur_type_) { 1746 case Primitive::kPrimNot: 1747 // TODO: fix abuse of mirror types. 1748 sm.AdvanceHandleScope( 1749 reinterpret_cast<mirror::Object*>(0x12345678)); 1750 break; 1751 1752 case Primitive::kPrimBoolean: 1753 case Primitive::kPrimByte: 1754 case Primitive::kPrimChar: 1755 case Primitive::kPrimShort: 1756 case Primitive::kPrimInt: 1757 sm.AdvanceInt(0); 1758 break; 1759 case Primitive::kPrimFloat: 1760 sm.AdvanceFloat(0); 1761 break; 1762 case Primitive::kPrimDouble: 1763 sm.AdvanceDouble(0); 1764 break; 1765 case Primitive::kPrimLong: 1766 sm.AdvanceLong(0); 1767 break; 1768 default: 1769 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1770 UNREACHABLE(); 1771 } 1772 } 1773 1774 num_stack_entries_ = sm.GetStackEntries(); 1775 } 1776 1777 void PushGpr(uintptr_t /* val */) { 1778 // not optimizing registers, yet 1779 } 1780 1781 void PushFpr4(float /* val */) { 1782 // not optimizing registers, yet 1783 } 1784 1785 void PushFpr8(uint64_t /* val */) { 1786 // not optimizing registers, yet 1787 } 1788 1789 void PushStack(uintptr_t /* val */) { 1790 // counting is already done in the superclass 1791 } 1792 1793 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1794 return reinterpret_cast<uintptr_t>(nullptr); 1795 } 1796 1797 protected: 1798 uint32_t num_stack_entries_; 1799}; 1800 1801class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1802 public: 1803 explicit ComputeGenericJniFrameSize(bool critical_native) 1804 : num_handle_scope_references_(0), critical_native_(critical_native) {} 1805 1806 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1807 // is at *m = sp. Will update to point to the bottom of the save frame. 1808 // 1809 // Note: assumes ComputeAll() has been run before. 1810 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1811 REQUIRES_SHARED(Locks::mutator_lock_) { 1812 ArtMethod* method = **m; 1813 1814 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 1815 1816 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1817 1818 // First, fix up the layout of the callee-save frame. 1819 // We have to squeeze in the HandleScope, and relocate the method pointer. 1820 1821 // "Free" the slot for the method. 1822 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 1823 1824 // Under the callee saves put handle scope and new method stack reference. 1825 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1826 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 1827 1828 sp8 -= scope_and_method; 1829 // Align by kStackAlignment. 1830 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1831 1832 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 1833 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 1834 num_handle_scope_references_); 1835 1836 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1837 uint8_t* method_pointer = sp8; 1838 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 1839 *new_method_ref = method; 1840 *m = new_method_ref; 1841 } 1842 1843 // Adds space for the cookie. Note: may leave stack unaligned. 1844 void LayoutCookie(uint8_t** sp) const { 1845 // Reference cookie and padding 1846 *sp -= 8; 1847 } 1848 1849 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1850 // Returns the new bottom. Note: this may be unaligned. 1851 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1852 REQUIRES_SHARED(Locks::mutator_lock_) { 1853 // First, fix up the layout of the callee-save frame. 1854 // We have to squeeze in the HandleScope, and relocate the method pointer. 1855 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 1856 1857 // The bottom of the callee-save frame is now where the method is, *m. 1858 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1859 1860 // Add space for cookie. 1861 LayoutCookie(&sp8); 1862 1863 return sp8; 1864 } 1865 1866 // WARNING: After this, *sp won't be pointing to the method anymore! 1867 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 1868 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 1869 uint32_t** start_fpr) 1870 REQUIRES_SHARED(Locks::mutator_lock_) { 1871 Walk(shorty, shorty_len); 1872 1873 // JNI part. 1874 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 1875 1876 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1877 1878 // Return the new bottom. 1879 return sp8; 1880 } 1881 1882 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1883 1884 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1885 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1886 REQUIRES_SHARED(Locks::mutator_lock_); 1887 1888 private: 1889 uint32_t num_handle_scope_references_; 1890 const bool critical_native_; 1891}; 1892 1893uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1894 num_handle_scope_references_++; 1895 return reinterpret_cast<uintptr_t>(nullptr); 1896} 1897 1898void ComputeGenericJniFrameSize::WalkHeader( 1899 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1900 // First 2 parameters are always excluded for @CriticalNative. 1901 if (UNLIKELY(critical_native_)) { 1902 return; 1903 } 1904 1905 // JNIEnv 1906 sm->AdvancePointer(nullptr); 1907 1908 // Class object or this as first argument 1909 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1910} 1911 1912// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1913// the template requirements of BuildGenericJniFrameStateMachine. 1914class FillNativeCall { 1915 public: 1916 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1917 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1918 1919 virtual ~FillNativeCall() {} 1920 1921 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1922 cur_gpr_reg_ = gpr_regs; 1923 cur_fpr_reg_ = fpr_regs; 1924 cur_stack_arg_ = stack_args; 1925 } 1926 1927 void PushGpr(uintptr_t val) { 1928 *cur_gpr_reg_ = val; 1929 cur_gpr_reg_++; 1930 } 1931 1932 void PushFpr4(float val) { 1933 *cur_fpr_reg_ = val; 1934 cur_fpr_reg_++; 1935 } 1936 1937 void PushFpr8(uint64_t val) { 1938 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1939 *tmp = val; 1940 cur_fpr_reg_ += 2; 1941 } 1942 1943 void PushStack(uintptr_t val) { 1944 *cur_stack_arg_ = val; 1945 cur_stack_arg_++; 1946 } 1947 1948 virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) { 1949 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1950 UNREACHABLE(); 1951 } 1952 1953 private: 1954 uintptr_t* cur_gpr_reg_; 1955 uint32_t* cur_fpr_reg_; 1956 uintptr_t* cur_stack_arg_; 1957}; 1958 1959// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1960// of transitioning into native code. 1961class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1962 public: 1963 BuildGenericJniFrameVisitor(Thread* self, 1964 bool is_static, 1965 bool critical_native, 1966 const char* shorty, 1967 uint32_t shorty_len, 1968 ArtMethod*** sp) 1969 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1970 jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native), 1971 sm_(&jni_call_) { 1972 ComputeGenericJniFrameSize fsc(critical_native); 1973 uintptr_t* start_gpr_reg; 1974 uint32_t* start_fpr_reg; 1975 uintptr_t* start_stack_arg; 1976 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 1977 &handle_scope_, 1978 &start_stack_arg, 1979 &start_gpr_reg, &start_fpr_reg); 1980 1981 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1982 1983 // First 2 parameters are always excluded for CriticalNative methods. 1984 if (LIKELY(!critical_native)) { 1985 // jni environment is always first argument 1986 sm_.AdvancePointer(self->GetJniEnv()); 1987 1988 if (is_static) { 1989 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 1990 } // else "this" reference is already handled by QuickArgumentVisitor. 1991 } 1992 } 1993 1994 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 1995 1996 void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 1997 1998 StackReference<mirror::Object>* GetFirstHandleScopeEntry() { 1999 return handle_scope_->GetHandle(0).GetReference(); 2000 } 2001 2002 jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) { 2003 return handle_scope_->GetHandle(0).ToJObject(); 2004 } 2005 2006 void* GetBottomOfUsedArea() const { 2007 return bottom_of_used_area_; 2008 } 2009 2010 private: 2011 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 2012 class FillJniCall FINAL : public FillNativeCall { 2013 public: 2014 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 2015 HandleScope* handle_scope, bool critical_native) 2016 : FillNativeCall(gpr_regs, fpr_regs, stack_args), 2017 handle_scope_(handle_scope), 2018 cur_entry_(0), 2019 critical_native_(critical_native) {} 2020 2021 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); 2022 2023 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 2024 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 2025 handle_scope_ = scope; 2026 cur_entry_ = 0U; 2027 } 2028 2029 void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) { 2030 // Initialize padding entries. 2031 size_t expected_slots = handle_scope_->NumberOfReferences(); 2032 while (cur_entry_ < expected_slots) { 2033 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 2034 } 2035 2036 if (!critical_native_) { 2037 // Non-critical natives have at least the self class (jclass) or this (jobject). 2038 DCHECK_NE(cur_entry_, 0U); 2039 } 2040 } 2041 2042 bool CriticalNative() const { 2043 return critical_native_; 2044 } 2045 2046 private: 2047 HandleScope* handle_scope_; 2048 size_t cur_entry_; 2049 const bool critical_native_; 2050 }; 2051 2052 HandleScope* handle_scope_; 2053 FillJniCall jni_call_; 2054 void* bottom_of_used_area_; 2055 2056 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 2057 2058 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 2059}; 2060 2061uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 2062 uintptr_t tmp; 2063 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 2064 h.Assign(ref); 2065 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 2066 cur_entry_++; 2067 return tmp; 2068} 2069 2070void BuildGenericJniFrameVisitor::Visit() { 2071 Primitive::Type type = GetParamPrimitiveType(); 2072 switch (type) { 2073 case Primitive::kPrimLong: { 2074 jlong long_arg; 2075 if (IsSplitLongOrDouble()) { 2076 long_arg = ReadSplitLongParam(); 2077 } else { 2078 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 2079 } 2080 sm_.AdvanceLong(long_arg); 2081 break; 2082 } 2083 case Primitive::kPrimDouble: { 2084 uint64_t double_arg; 2085 if (IsSplitLongOrDouble()) { 2086 // Read into union so that we don't case to a double. 2087 double_arg = ReadSplitLongParam(); 2088 } else { 2089 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 2090 } 2091 sm_.AdvanceDouble(double_arg); 2092 break; 2093 } 2094 case Primitive::kPrimNot: { 2095 StackReference<mirror::Object>* stack_ref = 2096 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 2097 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 2098 break; 2099 } 2100 case Primitive::kPrimFloat: 2101 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 2102 break; 2103 case Primitive::kPrimBoolean: // Fall-through. 2104 case Primitive::kPrimByte: // Fall-through. 2105 case Primitive::kPrimChar: // Fall-through. 2106 case Primitive::kPrimShort: // Fall-through. 2107 case Primitive::kPrimInt: // Fall-through. 2108 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 2109 break; 2110 case Primitive::kPrimVoid: 2111 LOG(FATAL) << "UNREACHABLE"; 2112 UNREACHABLE(); 2113 } 2114} 2115 2116void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 2117 // Clear out rest of the scope. 2118 jni_call_.ResetRemainingScopeSlots(); 2119 if (!jni_call_.CriticalNative()) { 2120 // Install HandleScope. 2121 self->PushHandleScope(handle_scope_); 2122 } 2123} 2124 2125#if defined(__arm__) || defined(__aarch64__) 2126extern "C" const void* artFindNativeMethod(); 2127#else 2128extern "C" const void* artFindNativeMethod(Thread* self); 2129#endif 2130 2131static uint64_t artQuickGenericJniEndJNIRef(Thread* self, 2132 uint32_t cookie, 2133 bool fast_native ATTRIBUTE_UNUSED, 2134 jobject l, 2135 jobject lock) { 2136 // TODO: add entrypoints for @FastNative returning objects. 2137 if (lock != nullptr) { 2138 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 2139 } else { 2140 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 2141 } 2142} 2143 2144static void artQuickGenericJniEndJNINonRef(Thread* self, 2145 uint32_t cookie, 2146 bool fast_native, 2147 jobject lock) { 2148 if (lock != nullptr) { 2149 JniMethodEndSynchronized(cookie, lock, self); 2150 // Ignore "fast_native" here because synchronized functions aren't very fast. 2151 } else { 2152 if (UNLIKELY(fast_native)) { 2153 JniMethodFastEnd(cookie, self); 2154 } else { 2155 JniMethodEnd(cookie, self); 2156 } 2157 } 2158} 2159 2160/* 2161 * Initializes an alloca region assumed to be directly below sp for a native call: 2162 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 2163 * The final element on the stack is a pointer to the native code. 2164 * 2165 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 2166 * We need to fix this, as the handle scope needs to go into the callee-save frame. 2167 * 2168 * The return of this function denotes: 2169 * 1) How many bytes of the alloca can be released, if the value is non-negative. 2170 * 2) An error, if the value is negative. 2171 */ 2172extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 2173 REQUIRES_SHARED(Locks::mutator_lock_) { 2174 // Note: We cannot walk the stack properly until fixed up below. 2175 ArtMethod* called = *sp; 2176 DCHECK(called->IsNative()) << called->PrettyMethod(true); 2177 uint32_t shorty_len = 0; 2178 const char* shorty = called->GetShorty(&shorty_len); 2179 bool critical_native = called->IsCriticalNative(); 2180 bool fast_native = called->IsFastNative(); 2181 bool normal_native = !critical_native && !fast_native; 2182 2183 // Run the visitor and update sp. 2184 BuildGenericJniFrameVisitor visitor(self, 2185 called->IsStatic(), 2186 critical_native, 2187 shorty, 2188 shorty_len, 2189 &sp); 2190 { 2191 ScopedAssertNoThreadSuspension sants(__FUNCTION__); 2192 visitor.VisitArguments(); 2193 // FinalizeHandleScope pushes the handle scope on the thread. 2194 visitor.FinalizeHandleScope(self); 2195 } 2196 2197 // Fix up managed-stack things in Thread. After this we can walk the stack. 2198 self->SetTopOfStack(sp); 2199 2200 self->VerifyStack(); 2201 2202 uint32_t cookie; 2203 uint32_t* sp32; 2204 // Skip calling JniMethodStart for @CriticalNative. 2205 if (LIKELY(!critical_native)) { 2206 // Start JNI, save the cookie. 2207 if (called->IsSynchronized()) { 2208 DCHECK(normal_native) << " @FastNative and synchronize is not supported"; 2209 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 2210 if (self->IsExceptionPending()) { 2211 self->PopHandleScope(); 2212 // A negative value denotes an error. 2213 return GetTwoWordFailureValue(); 2214 } 2215 } else { 2216 if (fast_native) { 2217 cookie = JniMethodFastStart(self); 2218 } else { 2219 DCHECK(normal_native); 2220 cookie = JniMethodStart(self); 2221 } 2222 } 2223 sp32 = reinterpret_cast<uint32_t*>(sp); 2224 *(sp32 - 1) = cookie; 2225 } 2226 2227 // Retrieve the stored native code. 2228 void const* nativeCode = called->GetEntryPointFromJni(); 2229 2230 // There are two cases for the content of nativeCode: 2231 // 1) Pointer to the native function. 2232 // 2) Pointer to the trampoline for native code binding. 2233 // In the second case, we need to execute the binding and continue with the actual native function 2234 // pointer. 2235 DCHECK(nativeCode != nullptr); 2236 if (nativeCode == GetJniDlsymLookupStub()) { 2237#if defined(__arm__) || defined(__aarch64__) 2238 nativeCode = artFindNativeMethod(); 2239#else 2240 nativeCode = artFindNativeMethod(self); 2241#endif 2242 2243 if (nativeCode == nullptr) { 2244 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 2245 2246 // @CriticalNative calls do not need to call back into JniMethodEnd. 2247 if (LIKELY(!critical_native)) { 2248 // End JNI, as the assembly will move to deliver the exception. 2249 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 2250 if (shorty[0] == 'L') { 2251 artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock); 2252 } else { 2253 artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock); 2254 } 2255 } 2256 2257 return GetTwoWordFailureValue(); 2258 } 2259 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 2260 } 2261 2262#if defined(__mips__) && !defined(__LP64__) 2263 // On MIPS32 if the first two arguments are floating-point, we need to know their types 2264 // so that art_quick_generic_jni_trampoline can correctly extract them from the stack 2265 // and load into floating-point registers. 2266 // Possible arrangements of first two floating-point arguments on the stack (32-bit FPU 2267 // view): 2268 // (1) 2269 // | DOUBLE | DOUBLE | other args, if any 2270 // | F12 | F13 | F14 | F15 | 2271 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2272 // (2) 2273 // | DOUBLE | FLOAT | (PAD) | other args, if any 2274 // | F12 | F13 | F14 | | 2275 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2276 // (3) 2277 // | FLOAT | (PAD) | DOUBLE | other args, if any 2278 // | F12 | | F14 | F15 | 2279 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2280 // (4) 2281 // | FLOAT | FLOAT | other args, if any 2282 // | F12 | F14 | 2283 // | SP+0 | SP+4 | SP+8 2284 // As you can see, only the last case (4) is special. In all others we can just 2285 // load F12/F13 and F14/F15 in the same manner. 2286 // Set bit 0 of the native code address to 1 in this case (valid code addresses 2287 // are always a multiple of 4 on MIPS32, so we have 2 spare bits available). 2288 if (nativeCode != nullptr && 2289 shorty != nullptr && 2290 shorty_len >= 3 && 2291 shorty[1] == 'F' && 2292 shorty[2] == 'F') { 2293 nativeCode = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(nativeCode) | 1); 2294 } 2295#endif 2296 2297 // Return native code addr(lo) and bottom of alloca address(hi). 2298 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 2299 reinterpret_cast<uintptr_t>(nativeCode)); 2300} 2301 2302// Defined in quick_jni_entrypoints.cc. 2303extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie, 2304 jvalue result, uint64_t result_f, ArtMethod* called, 2305 HandleScope* handle_scope); 2306/* 2307 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 2308 * unlocking. 2309 */ 2310extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, 2311 jvalue result, 2312 uint64_t result_f) { 2313 // We're here just back from a native call. We don't have the shared mutator lock at this point 2314 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing 2315 // anything that requires a mutator lock before that would cause problems as GC may have the 2316 // exclusive mutator lock and may be moving objects, etc. 2317 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 2318 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 2319 ArtMethod* called = *sp; 2320 uint32_t cookie = *(sp32 - 1); 2321 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp)); 2322 return GenericJniMethodEnd(self, cookie, result, result_f, called, table); 2323} 2324 2325// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 2326// for the method pointer. 2327// 2328// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 2329// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations). 2330 2331template <InvokeType type, bool access_check> 2332static TwoWordReturn artInvokeCommon(uint32_t method_idx, 2333 ObjPtr<mirror::Object> this_object, 2334 Thread* self, 2335 ArtMethod** sp) { 2336 ScopedQuickEntrypointChecks sqec(self); 2337 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2338 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2339 ArtMethod* method = FindMethodFast<type, access_check>(method_idx, this_object, caller_method); 2340 if (UNLIKELY(method == nullptr)) { 2341 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 2342 uint32_t shorty_len; 2343 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 2344 { 2345 // Remember the args in case a GC happens in FindMethodFromCode. 2346 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2347 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 2348 visitor.VisitArguments(); 2349 method = FindMethodFromCode<type, access_check>(method_idx, 2350 &this_object, 2351 caller_method, 2352 self); 2353 visitor.FixupReferences(); 2354 } 2355 2356 if (UNLIKELY(method == nullptr)) { 2357 CHECK(self->IsExceptionPending()); 2358 return GetTwoWordFailureValue(); // Failure. 2359 } 2360 } 2361 DCHECK(!self->IsExceptionPending()); 2362 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2363 2364 // When we return, the caller will branch to this address, so it had better not be 0! 2365 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2366 << " location: " 2367 << method->GetDexFile()->GetLocation(); 2368 2369 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2370 reinterpret_cast<uintptr_t>(method)); 2371} 2372 2373// Explicit artInvokeCommon template function declarations to please analysis tool. 2374#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2375 template REQUIRES_SHARED(Locks::mutator_lock_) \ 2376 TwoWordReturn artInvokeCommon<type, access_check>( \ 2377 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp) 2378 2379EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2380EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2381EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2382EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2383EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2384EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2385EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2386EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2387EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2388EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2389#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2390 2391// See comments in runtime_support_asm.S 2392extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2393 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2394 REQUIRES_SHARED(Locks::mutator_lock_) { 2395 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2396} 2397 2398extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2399 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2400 REQUIRES_SHARED(Locks::mutator_lock_) { 2401 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2402} 2403 2404extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2405 uint32_t method_idx, 2406 mirror::Object* this_object ATTRIBUTE_UNUSED, 2407 Thread* self, 2408 ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 2409 // For static, this_object is not required and may be random garbage. Don't pass it down so that 2410 // it doesn't cause ObjPtr alignment failure check. 2411 return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp); 2412} 2413 2414extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2415 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2416 REQUIRES_SHARED(Locks::mutator_lock_) { 2417 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2418} 2419 2420extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2421 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2422 REQUIRES_SHARED(Locks::mutator_lock_) { 2423 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2424} 2425 2426// Helper function for art_quick_imt_conflict_trampoline to look up the interface method. 2427extern "C" ArtMethod* artLookupResolvedMethod(uint32_t method_index, ArtMethod* referrer) 2428 REQUIRES_SHARED(Locks::mutator_lock_) { 2429 ScopedAssertNoThreadSuspension ants(__FUNCTION__); 2430 DCHECK(!referrer->IsProxyMethod()); 2431 ArtMethod* result = Runtime::Current()->GetClassLinker()->LookupResolvedMethod( 2432 method_index, referrer->GetDexCache(), referrer->GetClassLoader()); 2433 DCHECK(result == nullptr || 2434 result->GetDeclaringClass()->IsInterface() || 2435 result->GetDeclaringClass() == 2436 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object)) 2437 << result->PrettyMethod(); 2438 return result; 2439} 2440 2441// Determine target of interface dispatch. The interface method and this object are known non-null. 2442// The interface method is the method returned by the dex cache in the conflict trampoline. 2443extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method, 2444 mirror::Object* raw_this_object, 2445 Thread* self, 2446 ArtMethod** sp) 2447 REQUIRES_SHARED(Locks::mutator_lock_) { 2448 ScopedQuickEntrypointChecks sqec(self); 2449 StackHandleScope<2> hs(self); 2450 Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object); 2451 Handle<mirror::Class> cls = hs.NewHandle(this_object->GetClass()); 2452 2453 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2454 ArtMethod* method = nullptr; 2455 ImTable* imt = cls->GetImt(kRuntimePointerSize); 2456 2457 if (UNLIKELY(interface_method == nullptr)) { 2458 // The interface method is unresolved, so resolve it in the dex file of the caller. 2459 // Fetch the dex_method_idx of the target interface method from the caller. 2460 uint32_t dex_method_idx; 2461 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2462 const DexFile::CodeItem* code_item = caller_method->GetCodeItem(); 2463 DCHECK_LT(dex_pc, code_item->insns_size_in_code_units_); 2464 const Instruction& instr = code_item->InstructionAt(dex_pc); 2465 Instruction::Code instr_code = instr.Opcode(); 2466 DCHECK(instr_code == Instruction::INVOKE_INTERFACE || 2467 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2468 << "Unexpected call into interface trampoline: " << instr.DumpString(nullptr); 2469 if (instr_code == Instruction::INVOKE_INTERFACE) { 2470 dex_method_idx = instr.VRegB_35c(); 2471 } else { 2472 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2473 dex_method_idx = instr.VRegB_3rc(); 2474 } 2475 2476 const DexFile& dex_file = caller_method->GetDeclaringClass()->GetDexFile(); 2477 uint32_t shorty_len; 2478 const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_idx), 2479 &shorty_len); 2480 { 2481 // Remember the args in case a GC happens in ClassLinker::ResolveMethod(). 2482 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2483 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2484 visitor.VisitArguments(); 2485 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 2486 interface_method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( 2487 self, dex_method_idx, caller_method, kInterface); 2488 visitor.FixupReferences(); 2489 } 2490 2491 if (UNLIKELY(interface_method == nullptr)) { 2492 CHECK(self->IsExceptionPending()); 2493 return GetTwoWordFailureValue(); // Failure. 2494 } 2495 } 2496 2497 DCHECK(!interface_method->IsRuntimeMethod()); 2498 // Look whether we have a match in the ImtConflictTable. 2499 uint32_t imt_index = ImTable::GetImtIndex(interface_method); 2500 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize); 2501 if (LIKELY(conflict_method->IsRuntimeMethod())) { 2502 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize); 2503 DCHECK(current_table != nullptr); 2504 method = current_table->Lookup(interface_method, kRuntimePointerSize); 2505 } else { 2506 // It seems we aren't really a conflict method! 2507 if (kIsDebugBuild) { 2508 ArtMethod* m = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2509 CHECK_EQ(conflict_method, m) 2510 << interface_method->PrettyMethod() << " / " << conflict_method->PrettyMethod() << " / " 2511 << " / " << ArtMethod::PrettyMethod(m) << " / " << cls->PrettyClass(); 2512 } 2513 method = conflict_method; 2514 } 2515 if (method != nullptr) { 2516 return GetTwoWordSuccessValue( 2517 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()), 2518 reinterpret_cast<uintptr_t>(method)); 2519 } 2520 2521 // No match, use the IfTable. 2522 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2523 if (UNLIKELY(method == nullptr)) { 2524 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2525 interface_method, this_object.Get(), caller_method); 2526 return GetTwoWordFailureValue(); // Failure. 2527 } 2528 2529 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. 2530 // We create a new table with the new pair { interface_method, method }. 2531 DCHECK(conflict_method->IsRuntimeMethod()); 2532 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( 2533 cls.Get(), 2534 conflict_method, 2535 interface_method, 2536 method, 2537 /*force_new_conflict_method*/false); 2538 if (new_conflict_method != conflict_method) { 2539 // Update the IMT if we create a new conflict method. No fence needed here, as the 2540 // data is consistent. 2541 imt->Set(imt_index, 2542 new_conflict_method, 2543 kRuntimePointerSize); 2544 } 2545 2546 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2547 2548 // When we return, the caller will branch to this address, so it had better not be 0! 2549 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2550 << " location: " << method->GetDexFile()->GetLocation(); 2551 2552 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2553 reinterpret_cast<uintptr_t>(method)); 2554} 2555 2556// Returns shorty type so the caller can determine how to put |result| 2557// into expected registers. The shorty type is static so the compiler 2558// could call different flavors of this code path depending on the 2559// shorty type though this would require different entry points for 2560// each type. 2561extern "C" uintptr_t artInvokePolymorphic( 2562 JValue* result, 2563 mirror::Object* raw_receiver, 2564 Thread* self, 2565 ArtMethod** sp) 2566 REQUIRES_SHARED(Locks::mutator_lock_) { 2567 ScopedQuickEntrypointChecks sqec(self); 2568 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2569 2570 // Start new JNI local reference state 2571 JNIEnvExt* env = self->GetJniEnv(); 2572 ScopedObjectAccessUnchecked soa(env); 2573 ScopedJniEnvLocalRefState env_state(env); 2574 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe."); 2575 2576 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC. 2577 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2578 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2579 const DexFile::CodeItem* code = caller_method->GetCodeItem(); 2580 const Instruction& inst = code->InstructionAt(dex_pc); 2581 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC || 2582 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2583 const DexFile* dex_file = caller_method->GetDexFile(); 2584 const uint32_t proto_idx = inst.VRegH(); 2585 const char* shorty = dex_file->GetShorty(proto_idx); 2586 const size_t shorty_length = strlen(shorty); 2587 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static. 2588 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa); 2589 gc_visitor.VisitArguments(); 2590 2591 // Wrap raw_receiver in a Handle for safety. 2592 StackHandleScope<3> hs(self); 2593 Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver)); 2594 raw_receiver = nullptr; 2595 self->EndAssertNoThreadSuspension(old_cause); 2596 2597 // Resolve method. 2598 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 2599 ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( 2600 self, inst.VRegB(), caller_method, kVirtual); 2601 2602 if (UNLIKELY(receiver_handle.IsNull())) { 2603 ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual); 2604 return static_cast<uintptr_t>('V'); 2605 } 2606 2607 // TODO(oth): Ensure this path isn't taken for VarHandle accessors (b/65872996). 2608 DCHECK_EQ(resolved_method->GetDeclaringClass(), 2609 WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_MethodHandle)); 2610 2611 Handle<mirror::MethodHandle> method_handle(hs.NewHandle( 2612 ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(receiver_handle.Get())))); 2613 2614 Handle<mirror::MethodType> method_type( 2615 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method))); 2616 2617 // This implies we couldn't resolve one or more types in this method handle. 2618 if (UNLIKELY(method_type.IsNull())) { 2619 CHECK(self->IsExceptionPending()); 2620 return static_cast<uintptr_t>('V'); 2621 } 2622 2623 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA()); 2624 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic); 2625 2626 // Fix references before constructing the shadow frame. 2627 gc_visitor.FixupReferences(); 2628 2629 // Construct shadow frame placing arguments consecutively from |first_arg|. 2630 const bool is_range = (inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2631 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc(); 2632 const size_t first_arg = 0; 2633 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 2634 CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc); 2635 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 2636 ScopedStackedShadowFramePusher 2637 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction); 2638 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, 2639 kMethodIsStatic, 2640 shorty, 2641 strlen(shorty), 2642 shadow_frame, 2643 first_arg); 2644 shadow_frame_builder.VisitArguments(); 2645 2646 // Push a transition back into managed code onto the linked list in thread. 2647 ManagedStack fragment; 2648 self->PushManagedStackFragment(&fragment); 2649 2650 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in 2651 // consecutive order. 2652 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1); 2653 bool isExact = (jni::EncodeArtMethod(resolved_method) == 2654 WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact); 2655 bool success = false; 2656 if (isExact) { 2657 success = MethodHandleInvokeExact(self, 2658 *shadow_frame, 2659 method_handle, 2660 method_type, 2661 &operands, 2662 result); 2663 } else { 2664 success = MethodHandleInvoke(self, 2665 *shadow_frame, 2666 method_handle, 2667 method_type, 2668 &operands, 2669 result); 2670 } 2671 DCHECK(success || self->IsExceptionPending()); 2672 2673 // Pop transition record. 2674 self->PopManagedStackFragment(fragment); 2675 2676 return static_cast<uintptr_t>(shorty[0]); 2677} 2678 2679} // namespace art 2680