quick_trampoline_entrypoints.cc revision 94730ef9ca432b5ede81e928cffc4006911aa650
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "art_method-inl.h" 18#include "base/callee_save_type.h" 19#include "base/enums.h" 20#include "callee_save_frame.h" 21#include "common_throws.h" 22#include "debugger.h" 23#include "dex/dex_file-inl.h" 24#include "dex/dex_file_types.h" 25#include "dex/dex_instruction-inl.h" 26#include "entrypoints/entrypoint_utils-inl.h" 27#include "entrypoints/runtime_asm_entrypoints.h" 28#include "gc/accounting/card_table-inl.h" 29#include "imt_conflict_table.h" 30#include "imtable-inl.h" 31#include "index_bss_mapping.h" 32#include "instrumentation.h" 33#include "interpreter/interpreter.h" 34#include "jit/jit.h" 35#include "linear_alloc.h" 36#include "method_handles.h" 37#include "method_reference.h" 38#include "mirror/class-inl.h" 39#include "mirror/dex_cache-inl.h" 40#include "mirror/method.h" 41#include "mirror/method_handle_impl.h" 42#include "mirror/object-inl.h" 43#include "mirror/object_array-inl.h" 44#include "oat_file.h" 45#include "oat_quick_method_header.h" 46#include "quick_exception_handler.h" 47#include "runtime.h" 48#include "scoped_thread_state_change-inl.h" 49#include "stack.h" 50#include "thread-inl.h" 51#include "well_known_classes.h" 52 53namespace art { 54 55// Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame. 56class QuickArgumentVisitor { 57 // Number of bytes for each out register in the caller method's frame. 58 static constexpr size_t kBytesStackArgLocation = 4; 59 // Frame size in bytes of a callee-save frame for RefsAndArgs. 60 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 61 GetCalleeSaveFrameSize(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs); 62#if defined(__arm__) 63 // The callee save frame is pointed to by SP. 64 // | argN | | 65 // | ... | | 66 // | arg4 | | 67 // | arg3 spill | | Caller's frame 68 // | arg2 spill | | 69 // | arg1 spill | | 70 // | Method* | --- 71 // | LR | 72 // | ... | 4x6 bytes callee saves 73 // | R3 | 74 // | R2 | 75 // | R1 | 76 // | S15 | 77 // | : | 78 // | S0 | 79 // | | 4x2 bytes padding 80 // | Method* | <- sp 81 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 82 static constexpr bool kAlignPairRegister = true; 83 static constexpr bool kQuickSoftFloatAbi = false; 84 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = true; 85 static constexpr bool kQuickSkipOddFpRegisters = false; 86 static constexpr size_t kNumQuickGprArgs = 3; 87 static constexpr size_t kNumQuickFprArgs = 16; 88 static constexpr bool kGprFprLockstep = false; 89 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 90 arm::ArmCalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first FPR arg. 91 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 92 arm::ArmCalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first GPR arg. 93 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 94 arm::ArmCalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); // Offset of return address. 95 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 96 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 97 } 98#elif defined(__aarch64__) 99 // The callee save frame is pointed to by SP. 100 // | argN | | 101 // | ... | | 102 // | arg4 | | 103 // | arg3 spill | | Caller's frame 104 // | arg2 spill | | 105 // | arg1 spill | | 106 // | Method* | --- 107 // | LR | 108 // | X29 | 109 // | : | 110 // | X20 | 111 // | X7 | 112 // | : | 113 // | X1 | 114 // | D7 | 115 // | : | 116 // | D0 | 117 // | | padding 118 // | Method* | <- sp 119 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 120 static constexpr bool kAlignPairRegister = false; 121 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 122 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 123 static constexpr bool kQuickSkipOddFpRegisters = false; 124 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 125 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 126 static constexpr bool kGprFprLockstep = false; 127 // Offset of first FPR arg. 128 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 129 arm64::Arm64CalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 130 // Offset of first GPR arg. 131 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 132 arm64::Arm64CalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 133 // Offset of return address. 134 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 135 arm64::Arm64CalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); 136 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 137 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 138 } 139#elif defined(__mips__) && !defined(__LP64__) 140 // The callee save frame is pointed to by SP. 141 // | argN | | 142 // | ... | | 143 // | arg4 | | 144 // | arg3 spill | | Caller's frame 145 // | arg2 spill | | 146 // | arg1 spill | | 147 // | Method* | --- 148 // | RA | 149 // | ... | callee saves 150 // | T1 | arg5 151 // | T0 | arg4 152 // | A3 | arg3 153 // | A2 | arg2 154 // | A1 | arg1 155 // | F19 | 156 // | F18 | f_arg5 157 // | F17 | 158 // | F16 | f_arg4 159 // | F15 | 160 // | F14 | f_arg3 161 // | F13 | 162 // | F12 | f_arg2 163 // | F11 | 164 // | F10 | f_arg1 165 // | F9 | 166 // | F8 | f_arg0 167 // | | padding 168 // | A0/Method* | <- sp 169 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 170 static constexpr bool kAlignPairRegister = true; 171 static constexpr bool kQuickSoftFloatAbi = false; 172 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 173 static constexpr bool kQuickSkipOddFpRegisters = true; 174 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 175 static constexpr size_t kNumQuickFprArgs = 12; // 6 arguments passed in FPRs. Floats can be 176 // passed only in even numbered registers and each 177 // double occupies two registers. 178 static constexpr bool kGprFprLockstep = false; 179 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 8; // Offset of first FPR arg. 180 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 56; // Offset of first GPR arg. 181 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 108; // Offset of return address. 182 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 183 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 184 } 185#elif defined(__mips__) && defined(__LP64__) 186 // The callee save frame is pointed to by SP. 187 // | argN | | 188 // | ... | | 189 // | arg4 | | 190 // | arg3 spill | | Caller's frame 191 // | arg2 spill | | 192 // | arg1 spill | | 193 // | Method* | --- 194 // | RA | 195 // | ... | callee saves 196 // | A7 | arg7 197 // | A6 | arg6 198 // | A5 | arg5 199 // | A4 | arg4 200 // | A3 | arg3 201 // | A2 | arg2 202 // | A1 | arg1 203 // | F19 | f_arg7 204 // | F18 | f_arg6 205 // | F17 | f_arg5 206 // | F16 | f_arg4 207 // | F15 | f_arg3 208 // | F14 | f_arg2 209 // | F13 | f_arg1 210 // | F12 | f_arg0 211 // | | padding 212 // | A0/Method* | <- sp 213 // NOTE: for Mip64, when A0 is skipped, F12 is also skipped. 214 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 215 static constexpr bool kAlignPairRegister = false; 216 static constexpr bool kQuickSoftFloatAbi = false; 217 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 218 static constexpr bool kQuickSkipOddFpRegisters = false; 219 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 220 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 221 static constexpr bool kGprFprLockstep = true; 222 223 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F13). 224 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 225 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 226 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 227 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 228 } 229#elif defined(__i386__) 230 // The callee save frame is pointed to by SP. 231 // | argN | | 232 // | ... | | 233 // | arg4 | | 234 // | arg3 spill | | Caller's frame 235 // | arg2 spill | | 236 // | arg1 spill | | 237 // | Method* | --- 238 // | Return | 239 // | EBP,ESI,EDI | callee saves 240 // | EBX | arg3 241 // | EDX | arg2 242 // | ECX | arg1 243 // | XMM3 | float arg 4 244 // | XMM2 | float arg 3 245 // | XMM1 | float arg 2 246 // | XMM0 | float arg 1 247 // | EAX/Method* | <- sp 248 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 249 static constexpr bool kAlignPairRegister = false; 250 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 251 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 252 static constexpr bool kQuickSkipOddFpRegisters = false; 253 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 254 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 255 static constexpr bool kGprFprLockstep = false; 256 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 257 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 258 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 259 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 260 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 261 } 262#elif defined(__x86_64__) 263 // The callee save frame is pointed to by SP. 264 // | argN | | 265 // | ... | | 266 // | reg. arg spills | | Caller's frame 267 // | Method* | --- 268 // | Return | 269 // | R15 | callee save 270 // | R14 | callee save 271 // | R13 | callee save 272 // | R12 | callee save 273 // | R9 | arg5 274 // | R8 | arg4 275 // | RSI/R6 | arg1 276 // | RBP/R5 | callee save 277 // | RBX/R3 | callee save 278 // | RDX/R2 | arg2 279 // | RCX/R1 | arg3 280 // | XMM7 | float arg 8 281 // | XMM6 | float arg 7 282 // | XMM5 | float arg 6 283 // | XMM4 | float arg 5 284 // | XMM3 | float arg 4 285 // | XMM2 | float arg 3 286 // | XMM1 | float arg 2 287 // | XMM0 | float arg 1 288 // | Padding | 289 // | RDI/Method* | <- sp 290 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 291 static constexpr bool kAlignPairRegister = false; 292 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 293 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 294 static constexpr bool kQuickSkipOddFpRegisters = false; 295 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 296 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 297 static constexpr bool kGprFprLockstep = false; 298 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 299 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 300 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 301 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 302 switch (gpr_index) { 303 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 304 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 305 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 306 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 307 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 308 default: 309 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 310 return 0; 311 } 312 } 313#else 314#error "Unsupported architecture" 315#endif 316 317 public: 318 // Special handling for proxy methods. Proxy methods are instance methods so the 319 // 'this' object is the 1st argument. They also have the same frame layout as the 320 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 321 // 1st GPR. 322 static mirror::Object* GetProxyThisObject(ArtMethod** sp) 323 REQUIRES_SHARED(Locks::mutator_lock_) { 324 CHECK((*sp)->IsProxyMethod()); 325 CHECK_GT(kNumQuickGprArgs, 0u); 326 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 327 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 328 GprIndexToGprOffset(kThisGprIndex); 329 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 330 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); 331 } 332 333 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 334 DCHECK((*sp)->IsCalleeSaveMethod()); 335 return GetCalleeSaveMethodCaller(sp, CalleeSaveType::kSaveRefsAndArgs); 336 } 337 338 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 339 DCHECK((*sp)->IsCalleeSaveMethod()); 340 uint8_t* previous_sp = 341 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 342 return *reinterpret_cast<ArtMethod**>(previous_sp); 343 } 344 345 static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 346 DCHECK((*sp)->IsCalleeSaveMethod()); 347 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, 348 CalleeSaveType::kSaveRefsAndArgs); 349 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 350 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 351 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 352 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 353 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 354 355 if (current_code->IsOptimized()) { 356 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 357 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 358 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding); 359 DCHECK(stack_map.IsValid()); 360 if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) { 361 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 362 return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 363 inline_info.GetDepth(encoding.inline_info.encoding)-1); 364 } else { 365 return stack_map.GetDexPc(encoding.stack_map.encoding); 366 } 367 } else { 368 return current_code->ToDexPc(*caller_sp, outer_pc); 369 } 370 } 371 372 static bool GetInvokeType(ArtMethod** sp, InvokeType* invoke_type, uint32_t* dex_method_index) 373 REQUIRES_SHARED(Locks::mutator_lock_) { 374 DCHECK((*sp)->IsCalleeSaveMethod()); 375 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, 376 CalleeSaveType::kSaveRefsAndArgs); 377 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 378 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 379 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 380 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 381 if (!current_code->IsOptimized()) { 382 return false; 383 } 384 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 385 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 386 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 387 MethodInfo method_info = current_code->GetOptimizedMethodInfo(); 388 InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding)); 389 if (invoke.IsValid()) { 390 *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding)); 391 *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info); 392 return true; 393 } 394 return false; 395 } 396 397 // For the given quick ref and args quick frame, return the caller's PC. 398 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 399 DCHECK((*sp)->IsCalleeSaveMethod()); 400 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 401 return *reinterpret_cast<uintptr_t*>(lr); 402 } 403 404 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 405 uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) : 406 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 407 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 408 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 409 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 410 + sizeof(ArtMethod*)), // Skip ArtMethod*. 411 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 412 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 413 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 414 "Number of Quick FPR arguments unexpected"); 415 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 416 "Double alignment unexpected"); 417 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 418 // next register is even. 419 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 420 "Number of Quick FPR arguments not even"); 421 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 422 } 423 424 virtual ~QuickArgumentVisitor() {} 425 426 virtual void Visit() = 0; 427 428 Primitive::Type GetParamPrimitiveType() const { 429 return cur_type_; 430 } 431 432 uint8_t* GetParamAddress() const { 433 if (!kQuickSoftFloatAbi) { 434 Primitive::Type type = GetParamPrimitiveType(); 435 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 436 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 437 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 438 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 439 } 440 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 441 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 442 } 443 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 444 } 445 } 446 if (gpr_index_ < kNumQuickGprArgs) { 447 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 448 } 449 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 450 } 451 452 bool IsSplitLongOrDouble() const { 453 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 454 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 455 return is_split_long_or_double_; 456 } else { 457 return false; // An optimization for when GPR and FPRs are 64bit. 458 } 459 } 460 461 bool IsParamAReference() const { 462 return GetParamPrimitiveType() == Primitive::kPrimNot; 463 } 464 465 bool IsParamALongOrDouble() const { 466 Primitive::Type type = GetParamPrimitiveType(); 467 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 468 } 469 470 uint64_t ReadSplitLongParam() const { 471 // The splitted long is always available through the stack. 472 return *reinterpret_cast<uint64_t*>(stack_args_ 473 + stack_index_ * kBytesStackArgLocation); 474 } 475 476 void IncGprIndex() { 477 gpr_index_++; 478 if (kGprFprLockstep) { 479 fpr_index_++; 480 } 481 } 482 483 void IncFprIndex() { 484 fpr_index_++; 485 if (kGprFprLockstep) { 486 gpr_index_++; 487 } 488 } 489 490 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) { 491 // (a) 'stack_args_' should point to the first method's argument 492 // (b) whatever the argument type it is, the 'stack_index_' should 493 // be moved forward along with every visiting. 494 gpr_index_ = 0; 495 fpr_index_ = 0; 496 if (kQuickDoubleRegAlignedFloatBackFilled) { 497 fpr_double_index_ = 0; 498 } 499 stack_index_ = 0; 500 if (!is_static_) { // Handle this. 501 cur_type_ = Primitive::kPrimNot; 502 is_split_long_or_double_ = false; 503 Visit(); 504 stack_index_++; 505 if (kNumQuickGprArgs > 0) { 506 IncGprIndex(); 507 } 508 } 509 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 510 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 511 switch (cur_type_) { 512 case Primitive::kPrimNot: 513 case Primitive::kPrimBoolean: 514 case Primitive::kPrimByte: 515 case Primitive::kPrimChar: 516 case Primitive::kPrimShort: 517 case Primitive::kPrimInt: 518 is_split_long_or_double_ = false; 519 Visit(); 520 stack_index_++; 521 if (gpr_index_ < kNumQuickGprArgs) { 522 IncGprIndex(); 523 } 524 break; 525 case Primitive::kPrimFloat: 526 is_split_long_or_double_ = false; 527 Visit(); 528 stack_index_++; 529 if (kQuickSoftFloatAbi) { 530 if (gpr_index_ < kNumQuickGprArgs) { 531 IncGprIndex(); 532 } 533 } else { 534 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 535 IncFprIndex(); 536 if (kQuickDoubleRegAlignedFloatBackFilled) { 537 // Double should not overlap with float. 538 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 539 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 540 // Float should not overlap with double. 541 if (fpr_index_ % 2 == 0) { 542 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 543 } 544 } else if (kQuickSkipOddFpRegisters) { 545 IncFprIndex(); 546 } 547 } 548 } 549 break; 550 case Primitive::kPrimDouble: 551 case Primitive::kPrimLong: 552 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 553 if (cur_type_ == Primitive::kPrimLong && 554#if defined(__mips__) && !defined(__LP64__) 555 (gpr_index_ == 0 || gpr_index_ == 2) && 556#else 557 gpr_index_ == 0 && 558#endif 559 kAlignPairRegister) { 560 // Currently, this is only for ARM and MIPS, where we align long parameters with 561 // even-numbered registers by skipping R1 (on ARM) or A1(A3) (on MIPS) and using 562 // R2 (on ARM) or A2(T0) (on MIPS) instead. 563 IncGprIndex(); 564 } 565 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 566 ((gpr_index_ + 1) == kNumQuickGprArgs); 567 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 568 // We don't want to split this. Pass over this register. 569 gpr_index_++; 570 is_split_long_or_double_ = false; 571 } 572 Visit(); 573 if (kBytesStackArgLocation == 4) { 574 stack_index_+= 2; 575 } else { 576 CHECK_EQ(kBytesStackArgLocation, 8U); 577 stack_index_++; 578 } 579 if (gpr_index_ < kNumQuickGprArgs) { 580 IncGprIndex(); 581 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 582 if (gpr_index_ < kNumQuickGprArgs) { 583 IncGprIndex(); 584 } 585 } 586 } 587 } else { 588 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 589 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 590 Visit(); 591 if (kBytesStackArgLocation == 4) { 592 stack_index_+= 2; 593 } else { 594 CHECK_EQ(kBytesStackArgLocation, 8U); 595 stack_index_++; 596 } 597 if (kQuickDoubleRegAlignedFloatBackFilled) { 598 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 599 fpr_double_index_ += 2; 600 // Float should not overlap with double. 601 if (fpr_index_ % 2 == 0) { 602 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 603 } 604 } 605 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 606 IncFprIndex(); 607 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 608 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 609 IncFprIndex(); 610 } 611 } 612 } 613 } 614 break; 615 default: 616 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 617 } 618 } 619 } 620 621 protected: 622 const bool is_static_; 623 const char* const shorty_; 624 const uint32_t shorty_len_; 625 626 private: 627 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 628 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 629 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 630 uint32_t gpr_index_; // Index into spilled GPRs. 631 // Index into spilled FPRs. 632 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 633 // holds a higher register number. 634 uint32_t fpr_index_; 635 // Index into spilled FPRs for aligned double. 636 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 637 // terms of singles, may be behind fpr_index. 638 uint32_t fpr_double_index_; 639 uint32_t stack_index_; // Index into arguments on the stack. 640 // The current type of argument during VisitArguments. 641 Primitive::Type cur_type_; 642 // Does a 64bit parameter straddle the register and stack arguments? 643 bool is_split_long_or_double_; 644}; 645 646// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 647// allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 648extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 649 REQUIRES_SHARED(Locks::mutator_lock_) { 650 return QuickArgumentVisitor::GetProxyThisObject(sp); 651} 652 653// Visits arguments on the stack placing them into the shadow frame. 654class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 655 public: 656 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 657 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 658 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 659 660 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 661 662 private: 663 ShadowFrame* const sf_; 664 uint32_t cur_reg_; 665 666 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 667}; 668 669void BuildQuickShadowFrameVisitor::Visit() { 670 Primitive::Type type = GetParamPrimitiveType(); 671 switch (type) { 672 case Primitive::kPrimLong: // Fall-through. 673 case Primitive::kPrimDouble: 674 if (IsSplitLongOrDouble()) { 675 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 676 } else { 677 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 678 } 679 ++cur_reg_; 680 break; 681 case Primitive::kPrimNot: { 682 StackReference<mirror::Object>* stack_ref = 683 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 684 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 685 } 686 break; 687 case Primitive::kPrimBoolean: // Fall-through. 688 case Primitive::kPrimByte: // Fall-through. 689 case Primitive::kPrimChar: // Fall-through. 690 case Primitive::kPrimShort: // Fall-through. 691 case Primitive::kPrimInt: // Fall-through. 692 case Primitive::kPrimFloat: 693 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 694 break; 695 case Primitive::kPrimVoid: 696 LOG(FATAL) << "UNREACHABLE"; 697 UNREACHABLE(); 698 } 699 ++cur_reg_; 700} 701 702// Don't inline. See b/65159206. 703NO_INLINE 704static void HandleDeoptimization(JValue* result, 705 ArtMethod* method, 706 ShadowFrame* deopt_frame, 707 ManagedStack* fragment) 708 REQUIRES_SHARED(Locks::mutator_lock_) { 709 // Coming from partial-fragment deopt. 710 Thread* self = Thread::Current(); 711 if (kIsDebugBuild) { 712 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom 713 // of the call-stack) corresponds to the called method. 714 ShadowFrame* linked = deopt_frame; 715 while (linked->GetLink() != nullptr) { 716 linked = linked->GetLink(); 717 } 718 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " " 719 << ArtMethod::PrettyMethod(linked->GetMethod()); 720 } 721 722 if (VLOG_IS_ON(deopt)) { 723 // Print out the stack to verify that it was a partial-fragment deopt. 724 LOG(INFO) << "Continue-ing from deopt. Stack is:"; 725 QuickExceptionHandler::DumpFramesWithType(self, true); 726 } 727 728 ObjPtr<mirror::Throwable> pending_exception; 729 bool from_code = false; 730 DeoptimizationMethodType method_type; 731 self->PopDeoptimizationContext(/* out */ result, 732 /* out */ &pending_exception, 733 /* out */ &from_code, 734 /* out */ &method_type); 735 736 // Push a transition back into managed code onto the linked list in thread. 737 self->PushManagedStackFragment(fragment); 738 739 // Ensure that the stack is still in order. 740 if (kIsDebugBuild) { 741 class DummyStackVisitor : public StackVisitor { 742 public: 743 explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_) 744 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 745 746 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 747 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking 748 // logic. Just always say we want to continue. 749 return true; 750 } 751 }; 752 DummyStackVisitor dsv(self); 753 dsv.WalkStack(); 754 } 755 756 // Restore the exception that was pending before deoptimization then interpret the 757 // deoptimized frames. 758 if (pending_exception != nullptr) { 759 self->SetException(pending_exception); 760 } 761 interpreter::EnterInterpreterFromDeoptimize(self, 762 deopt_frame, 763 result, 764 from_code, 765 DeoptimizationMethodType::kDefault); 766} 767 768extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 769 REQUIRES_SHARED(Locks::mutator_lock_) { 770 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 771 // frame. 772 ScopedQuickEntrypointChecks sqec(self); 773 774 if (UNLIKELY(!method->IsInvokable())) { 775 method->ThrowInvocationTimeError(); 776 return 0; 777 } 778 779 JValue tmp_value; 780 ShadowFrame* deopt_frame = self->PopStackedShadowFrame( 781 StackedShadowFrameType::kDeoptimizationShadowFrame, false); 782 ManagedStack fragment; 783 784 DCHECK(!method->IsNative()) << method->PrettyMethod(); 785 uint32_t shorty_len = 0; 786 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 787 DCHECK(non_proxy_method->GetCodeItem() != nullptr) << method->PrettyMethod(); 788 CodeItemDataAccessor accessor(non_proxy_method); 789 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 790 791 JValue result; 792 793 if (UNLIKELY(deopt_frame != nullptr)) { 794 HandleDeoptimization(&result, method, deopt_frame, &fragment); 795 } else { 796 const char* old_cause = self->StartAssertNoThreadSuspension( 797 "Building interpreter shadow frame"); 798 uint16_t num_regs = accessor.RegistersSize(); 799 // No last shadow coming from quick. 800 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 801 CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0); 802 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 803 size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize(); 804 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 805 shadow_frame, first_arg_reg); 806 shadow_frame_builder.VisitArguments(); 807 const bool needs_initialization = 808 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 809 // Push a transition back into managed code onto the linked list in thread. 810 self->PushManagedStackFragment(&fragment); 811 self->PushShadowFrame(shadow_frame); 812 self->EndAssertNoThreadSuspension(old_cause); 813 814 if (needs_initialization) { 815 // Ensure static method's class is initialized. 816 StackHandleScope<1> hs(self); 817 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 818 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 819 DCHECK(Thread::Current()->IsExceptionPending()) 820 << shadow_frame->GetMethod()->PrettyMethod(); 821 self->PopManagedStackFragment(fragment); 822 return 0; 823 } 824 } 825 826 result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame); 827 } 828 829 // Pop transition. 830 self->PopManagedStackFragment(fragment); 831 832 // Request a stack deoptimization if needed 833 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 834 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp); 835 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization 836 // should be done and it knows the real return pc. 837 if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) && 838 Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { 839 if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) { 840 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 841 << caller->PrettyMethod(); 842 } else { 843 // Push the context of the deoptimization stack so we can restore the return value and the 844 // exception before executing the deoptimized frames. 845 self->PushDeoptimizationContext( 846 result, 847 shorty[0] == 'L' || shorty[0] == '[', /* class or array */ 848 self->GetException(), 849 false /* from_code */, 850 DeoptimizationMethodType::kDefault); 851 852 // Set special exception to cause deoptimization. 853 self->SetException(Thread::GetDeoptimizationException()); 854 } 855 } 856 857 // No need to restore the args since the method has already been run by the interpreter. 858 return result.GetJ(); 859} 860 861// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 862// to jobjects. 863class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 864 public: 865 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 866 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 867 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 868 869 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 870 871 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 872 873 private: 874 ScopedObjectAccessUnchecked* const soa_; 875 std::vector<jvalue>* const args_; 876 // References which we must update when exiting in case the GC moved the objects. 877 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 878 879 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 880}; 881 882void BuildQuickArgumentVisitor::Visit() { 883 jvalue val; 884 Primitive::Type type = GetParamPrimitiveType(); 885 switch (type) { 886 case Primitive::kPrimNot: { 887 StackReference<mirror::Object>* stack_ref = 888 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 889 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 890 references_.push_back(std::make_pair(val.l, stack_ref)); 891 break; 892 } 893 case Primitive::kPrimLong: // Fall-through. 894 case Primitive::kPrimDouble: 895 if (IsSplitLongOrDouble()) { 896 val.j = ReadSplitLongParam(); 897 } else { 898 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 899 } 900 break; 901 case Primitive::kPrimBoolean: // Fall-through. 902 case Primitive::kPrimByte: // Fall-through. 903 case Primitive::kPrimChar: // Fall-through. 904 case Primitive::kPrimShort: // Fall-through. 905 case Primitive::kPrimInt: // Fall-through. 906 case Primitive::kPrimFloat: 907 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 908 break; 909 case Primitive::kPrimVoid: 910 LOG(FATAL) << "UNREACHABLE"; 911 UNREACHABLE(); 912 } 913 args_->push_back(val); 914} 915 916void BuildQuickArgumentVisitor::FixupReferences() { 917 // Fixup any references which may have changed. 918 for (const auto& pair : references_) { 919 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 920 soa_->Env()->DeleteLocalRef(pair.first); 921 } 922} 923// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 924// which is responsible for recording callee save registers. We explicitly place into jobjects the 925// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 926// field within the proxy object, which will box the primitive arguments and deal with error cases. 927extern "C" uint64_t artQuickProxyInvokeHandler( 928 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 929 REQUIRES_SHARED(Locks::mutator_lock_) { 930 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod(); 931 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod(); 932 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 933 const char* old_cause = 934 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 935 // Register the top of the managed stack, making stack crawlable. 936 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod(); 937 self->VerifyStack(); 938 // Start new JNI local reference state. 939 JNIEnvExt* env = self->GetJniEnv(); 940 ScopedObjectAccessUnchecked soa(env); 941 ScopedJniEnvLocalRefState env_state(env); 942 // Create local ref. copies of proxy method and the receiver. 943 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 944 945 // Placing arguments into args vector and remove the receiver. 946 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 947 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " " 948 << non_proxy_method->PrettyMethod(); 949 std::vector<jvalue> args; 950 uint32_t shorty_len = 0; 951 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 952 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 953 954 local_ref_visitor.VisitArguments(); 955 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod(); 956 args.erase(args.begin()); 957 958 // Convert proxy method into expected interface method. 959 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize); 960 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod(); 961 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod(); 962 self->EndAssertNoThreadSuspension(old_cause); 963 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 964 DCHECK(!Runtime::Current()->IsActiveTransaction()); 965 ObjPtr<mirror::Method> interface_reflect_method = 966 mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), interface_method); 967 if (interface_reflect_method == nullptr) { 968 soa.Self()->AssertPendingOOMException(); 969 return 0; 970 } 971 jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_reflect_method); 972 973 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 974 // that performs allocations. 975 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 976 // Restore references which might have moved. 977 local_ref_visitor.FixupReferences(); 978 return result.GetJ(); 979} 980 981// Read object references held in arguments from quick frames and place in a JNI local references, 982// so they don't get garbage collected. 983class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 984 public: 985 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 986 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 987 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 988 989 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 990 991 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 992 993 private: 994 ScopedObjectAccessUnchecked* const soa_; 995 // References which we must update when exiting in case the GC moved the objects. 996 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 997 998 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 999}; 1000 1001void RememberForGcArgumentVisitor::Visit() { 1002 if (IsParamAReference()) { 1003 StackReference<mirror::Object>* stack_ref = 1004 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1005 jobject reference = 1006 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 1007 references_.push_back(std::make_pair(reference, stack_ref)); 1008 } 1009} 1010 1011void RememberForGcArgumentVisitor::FixupReferences() { 1012 // Fixup any references which may have changed. 1013 for (const auto& pair : references_) { 1014 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 1015 soa_->Env()->DeleteLocalRef(pair.first); 1016 } 1017} 1018 1019extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, 1020 mirror::Object* this_object, 1021 Thread* self, 1022 ArtMethod** sp) 1023 REQUIRES_SHARED(Locks::mutator_lock_) { 1024 const void* result; 1025 // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip 1026 // that part. 1027 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1028 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1029 if (instrumentation->IsDeoptimized(method)) { 1030 result = GetQuickToInterpreterBridge(); 1031 } else { 1032 result = instrumentation->GetQuickCodeFor(method, kRuntimePointerSize); 1033 DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result)); 1034 } 1035 1036 bool interpreter_entry = (result == GetQuickToInterpreterBridge()); 1037 bool is_static = method->IsStatic(); 1038 uint32_t shorty_len; 1039 const char* shorty = 1040 method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len); 1041 1042 ScopedObjectAccessUnchecked soa(self); 1043 RememberForGcArgumentVisitor visitor(sp, is_static, shorty, shorty_len, &soa); 1044 visitor.VisitArguments(); 1045 1046 instrumentation->PushInstrumentationStackFrame(self, 1047 is_static ? nullptr : this_object, 1048 method, 1049 QuickArgumentVisitor::GetCallingPc(sp), 1050 interpreter_entry); 1051 1052 visitor.FixupReferences(); 1053 if (UNLIKELY(self->IsExceptionPending())) { 1054 return nullptr; 1055 } 1056 CHECK(result != nullptr) << method->PrettyMethod(); 1057 return result; 1058} 1059 1060extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, 1061 ArtMethod** sp, 1062 uint64_t* gpr_result, 1063 uint64_t* fpr_result) 1064 REQUIRES_SHARED(Locks::mutator_lock_) { 1065 DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current())); 1066 CHECK(gpr_result != nullptr); 1067 CHECK(fpr_result != nullptr); 1068 // Instrumentation exit stub must not be entered with a pending exception. 1069 CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception " 1070 << self->GetException()->Dump(); 1071 // Compute address of return PC and sanity check that it currently holds 0. 1072 size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, 1073 CalleeSaveType::kSaveEverything); 1074 uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + 1075 return_pc_offset); 1076 CHECK_EQ(*return_pc, 0U); 1077 1078 // Pop the frame filling in the return pc. The low half of the return value is 0 when 1079 // deoptimization shouldn't be performed with the high-half having the return address. When 1080 // deoptimization should be performed the low half is zero and the high-half the address of the 1081 // deoptimization entry point. 1082 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1083 TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame( 1084 self, return_pc, gpr_result, fpr_result); 1085 if (self->IsExceptionPending()) { 1086 return GetTwoWordFailureValue(); 1087 } 1088 return return_or_deoptimize_pc; 1089} 1090 1091// Lazily resolve a method for quick. Called by stub code. 1092extern "C" const void* artQuickResolutionTrampoline( 1093 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 1094 REQUIRES_SHARED(Locks::mutator_lock_) { 1095 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 1096 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 1097 // does not have the same stack layout as the callee-save method). 1098 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1099 // Start new JNI local reference state 1100 JNIEnvExt* env = self->GetJniEnv(); 1101 ScopedObjectAccessUnchecked soa(env); 1102 ScopedJniEnvLocalRefState env_state(env); 1103 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 1104 1105 // Compute details about the called method (avoid GCs) 1106 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 1107 InvokeType invoke_type; 1108 MethodReference called_method(nullptr, 0); 1109 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 1110 ArtMethod* caller = nullptr; 1111 if (!called_method_known_on_entry) { 1112 caller = QuickArgumentVisitor::GetCallingMethod(sp); 1113 called_method.dex_file = caller->GetDexFile(); 1114 1115 InvokeType stack_map_invoke_type; 1116 uint32_t stack_map_dex_method_idx; 1117 const bool found_stack_map = QuickArgumentVisitor::GetInvokeType(sp, 1118 &stack_map_invoke_type, 1119 &stack_map_dex_method_idx); 1120 // For debug builds, we make sure both of the paths are consistent by also looking at the dex 1121 // code. 1122 if (!found_stack_map || kIsDebugBuild) { 1123 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 1124 CodeItemInstructionAccessor accessor(caller); 1125 CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits()); 1126 const Instruction& instr = accessor.InstructionAt(dex_pc); 1127 Instruction::Code instr_code = instr.Opcode(); 1128 bool is_range; 1129 switch (instr_code) { 1130 case Instruction::INVOKE_DIRECT: 1131 invoke_type = kDirect; 1132 is_range = false; 1133 break; 1134 case Instruction::INVOKE_DIRECT_RANGE: 1135 invoke_type = kDirect; 1136 is_range = true; 1137 break; 1138 case Instruction::INVOKE_STATIC: 1139 invoke_type = kStatic; 1140 is_range = false; 1141 break; 1142 case Instruction::INVOKE_STATIC_RANGE: 1143 invoke_type = kStatic; 1144 is_range = true; 1145 break; 1146 case Instruction::INVOKE_SUPER: 1147 invoke_type = kSuper; 1148 is_range = false; 1149 break; 1150 case Instruction::INVOKE_SUPER_RANGE: 1151 invoke_type = kSuper; 1152 is_range = true; 1153 break; 1154 case Instruction::INVOKE_VIRTUAL: 1155 invoke_type = kVirtual; 1156 is_range = false; 1157 break; 1158 case Instruction::INVOKE_VIRTUAL_RANGE: 1159 invoke_type = kVirtual; 1160 is_range = true; 1161 break; 1162 case Instruction::INVOKE_INTERFACE: 1163 invoke_type = kInterface; 1164 is_range = false; 1165 break; 1166 case Instruction::INVOKE_INTERFACE_RANGE: 1167 invoke_type = kInterface; 1168 is_range = true; 1169 break; 1170 default: 1171 LOG(FATAL) << "Unexpected call into trampoline: " << instr.DumpString(nullptr); 1172 UNREACHABLE(); 1173 } 1174 called_method.index = (is_range) ? instr.VRegB_3rc() : instr.VRegB_35c(); 1175 // Check that the invoke matches what we expected, note that this path only happens for debug 1176 // builds. 1177 if (found_stack_map) { 1178 DCHECK_EQ(stack_map_invoke_type, invoke_type); 1179 if (invoke_type != kSuper) { 1180 // Super may be sharpened. 1181 DCHECK_EQ(stack_map_dex_method_idx, called_method.index) 1182 << called_method.dex_file->PrettyMethod(stack_map_dex_method_idx) << " " 1183 << called_method.PrettyMethod(); 1184 } 1185 } else { 1186 VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " " 1187 << called_method.index; 1188 } 1189 } else { 1190 invoke_type = stack_map_invoke_type; 1191 called_method.index = stack_map_dex_method_idx; 1192 } 1193 } else { 1194 invoke_type = kStatic; 1195 called_method.dex_file = called->GetDexFile(); 1196 called_method.index = called->GetDexMethodIndex(); 1197 } 1198 uint32_t shorty_len; 1199 const char* shorty = 1200 called_method.dex_file->GetMethodShorty(called_method.GetMethodId(), &shorty_len); 1201 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 1202 visitor.VisitArguments(); 1203 self->EndAssertNoThreadSuspension(old_cause); 1204 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 1205 // Resolve method filling in dex cache. 1206 if (!called_method_known_on_entry) { 1207 StackHandleScope<1> hs(self); 1208 mirror::Object* dummy = nullptr; 1209 HandleWrapper<mirror::Object> h_receiver( 1210 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 1211 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1212 called = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( 1213 self, called_method.index, caller, invoke_type); 1214 1215 // Update .bss entry in oat file if any. 1216 if (called != nullptr && called_method.dex_file->GetOatDexFile() != nullptr) { 1217 size_t bss_offset = IndexBssMappingLookup::GetBssOffset( 1218 called_method.dex_file->GetOatDexFile()->GetMethodBssMapping(), 1219 called_method.index, 1220 called_method.dex_file->NumMethodIds(), 1221 static_cast<size_t>(kRuntimePointerSize)); 1222 if (bss_offset != IndexBssMappingLookup::npos) { 1223 DCHECK_ALIGNED(bss_offset, static_cast<size_t>(kRuntimePointerSize)); 1224 const OatFile* oat_file = called_method.dex_file->GetOatDexFile()->GetOatFile(); 1225 ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(const_cast<uint8_t*>( 1226 oat_file->BssBegin() + bss_offset)); 1227 DCHECK_GE(method_entry, oat_file->GetBssMethods().data()); 1228 DCHECK_LT(method_entry, 1229 oat_file->GetBssMethods().data() + oat_file->GetBssMethods().size()); 1230 *method_entry = called; 1231 } 1232 } 1233 } 1234 const void* code = nullptr; 1235 if (LIKELY(!self->IsExceptionPending())) { 1236 // Incompatible class change should have been handled in resolve method. 1237 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 1238 << called->PrettyMethod() << " " << invoke_type; 1239 if (virtual_or_interface || invoke_type == kSuper) { 1240 // Refine called method based on receiver for kVirtual/kInterface, and 1241 // caller for kSuper. 1242 ArtMethod* orig_called = called; 1243 if (invoke_type == kVirtual) { 1244 CHECK(receiver != nullptr) << invoke_type; 1245 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize); 1246 } else if (invoke_type == kInterface) { 1247 CHECK(receiver != nullptr) << invoke_type; 1248 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize); 1249 } else { 1250 DCHECK_EQ(invoke_type, kSuper); 1251 CHECK(caller != nullptr) << invoke_type; 1252 ObjPtr<mirror::Class> ref_class = linker->LookupResolvedType( 1253 caller->GetDexFile()->GetMethodId(called_method.index).class_idx_, caller); 1254 if (ref_class->IsInterface()) { 1255 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize); 1256 } else { 1257 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry( 1258 called->GetMethodIndex(), kRuntimePointerSize); 1259 } 1260 } 1261 1262 CHECK(called != nullptr) << orig_called->PrettyMethod() << " " 1263 << mirror::Object::PrettyTypeOf(receiver) << " " 1264 << invoke_type << " " << orig_called->GetVtableIndex(); 1265 } 1266 1267 // Ensure that the called method's class is initialized. 1268 StackHandleScope<1> hs(soa.Self()); 1269 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 1270 linker->EnsureInitialized(soa.Self(), called_class, true, true); 1271 if (LIKELY(called_class->IsInitialized())) { 1272 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1273 // If we are single-stepping or the called method is deoptimized (by a 1274 // breakpoint, for example), then we have to execute the called method 1275 // with the interpreter. 1276 code = GetQuickToInterpreterBridge(); 1277 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1278 // If the caller is deoptimized (by a breakpoint, for example), we have to 1279 // continue its execution with interpreter when returning from the called 1280 // method. Because we do not want to execute the called method with the 1281 // interpreter, we wrap its execution into the instrumentation stubs. 1282 // When the called method returns, it will execute the instrumentation 1283 // exit hook that will determine the need of the interpreter with a call 1284 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1285 // it is needed. 1286 code = GetQuickInstrumentationEntryPoint(); 1287 } else { 1288 code = called->GetEntryPointFromQuickCompiledCode(); 1289 } 1290 } else if (called_class->IsInitializing()) { 1291 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1292 // If we are single-stepping or the called method is deoptimized (by a 1293 // breakpoint, for example), then we have to execute the called method 1294 // with the interpreter. 1295 code = GetQuickToInterpreterBridge(); 1296 } else if (invoke_type == kStatic) { 1297 // Class is still initializing. The entrypoint contains the trampoline, so we cannot return 1298 // it. Instead, ask the class linker what is the actual code that needs to be invoked. 1299 code = linker->GetQuickEntrypointFor(called); 1300 } else { 1301 // No trampoline for non-static methods. 1302 code = called->GetEntryPointFromQuickCompiledCode(); 1303 } 1304 } else { 1305 DCHECK(called_class->IsErroneous()); 1306 } 1307 } 1308 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1309 // Fixup any locally saved objects may have moved during a GC. 1310 visitor.FixupReferences(); 1311 // Place called method in callee-save frame to be placed as first argument to quick method. 1312 *sp = called; 1313 1314 return code; 1315} 1316 1317/* 1318 * This class uses a couple of observations to unite the different calling conventions through 1319 * a few constants. 1320 * 1321 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1322 * possible alignment. 1323 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1324 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1325 * when we have to split things 1326 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1327 * and we can use Int handling directly. 1328 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1329 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1330 * extension should be compatible with Aarch64, which mandates copying the available bits 1331 * into LSB and leaving the rest unspecified. 1332 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1333 * the stack. 1334 * 6) There is only little endian. 1335 * 1336 * 1337 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1338 * follows: 1339 * 1340 * void PushGpr(uintptr_t): Add a value for the next GPR 1341 * 1342 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1343 * padding, that is, think the architecture is 32b and aligns 64b. 1344 * 1345 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1346 * split this if necessary. The current state will have aligned, if 1347 * necessary. 1348 * 1349 * void PushStack(uintptr_t): Push a value to the stack. 1350 * 1351 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1352 * as this might be important for null initialization. 1353 * Must return the jobject, that is, the reference to the 1354 * entry in the HandleScope (nullptr if necessary). 1355 * 1356 */ 1357template<class T> class BuildNativeCallFrameStateMachine { 1358 public: 1359#if defined(__arm__) 1360 // TODO: These are all dummy values! 1361 static constexpr bool kNativeSoftFloatAbi = true; 1362 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1363 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1364 1365 static constexpr size_t kRegistersNeededForLong = 2; 1366 static constexpr size_t kRegistersNeededForDouble = 2; 1367 static constexpr bool kMultiRegistersAligned = true; 1368 static constexpr bool kMultiFPRegistersWidened = false; 1369 static constexpr bool kMultiGPRegistersWidened = false; 1370 static constexpr bool kAlignLongOnStack = true; 1371 static constexpr bool kAlignDoubleOnStack = true; 1372#elif defined(__aarch64__) 1373 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1374 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1375 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1376 1377 static constexpr size_t kRegistersNeededForLong = 1; 1378 static constexpr size_t kRegistersNeededForDouble = 1; 1379 static constexpr bool kMultiRegistersAligned = false; 1380 static constexpr bool kMultiFPRegistersWidened = false; 1381 static constexpr bool kMultiGPRegistersWidened = false; 1382 static constexpr bool kAlignLongOnStack = false; 1383 static constexpr bool kAlignDoubleOnStack = false; 1384#elif defined(__mips__) && !defined(__LP64__) 1385 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1386 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1387 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1388 1389 static constexpr size_t kRegistersNeededForLong = 2; 1390 static constexpr size_t kRegistersNeededForDouble = 2; 1391 static constexpr bool kMultiRegistersAligned = true; 1392 static constexpr bool kMultiFPRegistersWidened = true; 1393 static constexpr bool kMultiGPRegistersWidened = false; 1394 static constexpr bool kAlignLongOnStack = true; 1395 static constexpr bool kAlignDoubleOnStack = true; 1396#elif defined(__mips__) && defined(__LP64__) 1397 // Let the code prepare GPRs only and we will load the FPRs with same data. 1398 static constexpr bool kNativeSoftFloatAbi = true; 1399 static constexpr size_t kNumNativeGprArgs = 8; 1400 static constexpr size_t kNumNativeFprArgs = 0; 1401 1402 static constexpr size_t kRegistersNeededForLong = 1; 1403 static constexpr size_t kRegistersNeededForDouble = 1; 1404 static constexpr bool kMultiRegistersAligned = false; 1405 static constexpr bool kMultiFPRegistersWidened = false; 1406 static constexpr bool kMultiGPRegistersWidened = true; 1407 static constexpr bool kAlignLongOnStack = false; 1408 static constexpr bool kAlignDoubleOnStack = false; 1409#elif defined(__i386__) 1410 // TODO: Check these! 1411 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1412 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1413 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1414 1415 static constexpr size_t kRegistersNeededForLong = 2; 1416 static constexpr size_t kRegistersNeededForDouble = 2; 1417 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1418 static constexpr bool kMultiFPRegistersWidened = false; 1419 static constexpr bool kMultiGPRegistersWidened = false; 1420 static constexpr bool kAlignLongOnStack = false; 1421 static constexpr bool kAlignDoubleOnStack = false; 1422#elif defined(__x86_64__) 1423 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1424 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1425 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1426 1427 static constexpr size_t kRegistersNeededForLong = 1; 1428 static constexpr size_t kRegistersNeededForDouble = 1; 1429 static constexpr bool kMultiRegistersAligned = false; 1430 static constexpr bool kMultiFPRegistersWidened = false; 1431 static constexpr bool kMultiGPRegistersWidened = false; 1432 static constexpr bool kAlignLongOnStack = false; 1433 static constexpr bool kAlignDoubleOnStack = false; 1434#else 1435#error "Unsupported architecture" 1436#endif 1437 1438 public: 1439 explicit BuildNativeCallFrameStateMachine(T* delegate) 1440 : gpr_index_(kNumNativeGprArgs), 1441 fpr_index_(kNumNativeFprArgs), 1442 stack_entries_(0), 1443 delegate_(delegate) { 1444 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1445 // the next register is even; counting down is just to make the compiler happy... 1446 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1447 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1448 } 1449 1450 virtual ~BuildNativeCallFrameStateMachine() {} 1451 1452 bool HavePointerGpr() const { 1453 return gpr_index_ > 0; 1454 } 1455 1456 void AdvancePointer(const void* val) { 1457 if (HavePointerGpr()) { 1458 gpr_index_--; 1459 PushGpr(reinterpret_cast<uintptr_t>(val)); 1460 } else { 1461 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1462 PushStack(reinterpret_cast<uintptr_t>(val)); 1463 gpr_index_ = 0; 1464 } 1465 } 1466 1467 bool HaveHandleScopeGpr() const { 1468 return gpr_index_ > 0; 1469 } 1470 1471 void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) { 1472 uintptr_t handle = PushHandle(ptr); 1473 if (HaveHandleScopeGpr()) { 1474 gpr_index_--; 1475 PushGpr(handle); 1476 } else { 1477 stack_entries_++; 1478 PushStack(handle); 1479 gpr_index_ = 0; 1480 } 1481 } 1482 1483 bool HaveIntGpr() const { 1484 return gpr_index_ > 0; 1485 } 1486 1487 void AdvanceInt(uint32_t val) { 1488 if (HaveIntGpr()) { 1489 gpr_index_--; 1490 if (kMultiGPRegistersWidened) { 1491 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1492 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1493 } else { 1494 PushGpr(val); 1495 } 1496 } else { 1497 stack_entries_++; 1498 if (kMultiGPRegistersWidened) { 1499 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1500 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1501 } else { 1502 PushStack(val); 1503 } 1504 gpr_index_ = 0; 1505 } 1506 } 1507 1508 bool HaveLongGpr() const { 1509 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1510 } 1511 1512 bool LongGprNeedsPadding() const { 1513 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1514 kAlignLongOnStack && // and when it needs alignment 1515 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1516 } 1517 1518 bool LongStackNeedsPadding() const { 1519 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1520 kAlignLongOnStack && // and when it needs 8B alignment 1521 (stack_entries_ & 1) == 1; // counter is odd 1522 } 1523 1524 void AdvanceLong(uint64_t val) { 1525 if (HaveLongGpr()) { 1526 if (LongGprNeedsPadding()) { 1527 PushGpr(0); 1528 gpr_index_--; 1529 } 1530 if (kRegistersNeededForLong == 1) { 1531 PushGpr(static_cast<uintptr_t>(val)); 1532 } else { 1533 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1534 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1535 } 1536 gpr_index_ -= kRegistersNeededForLong; 1537 } else { 1538 if (LongStackNeedsPadding()) { 1539 PushStack(0); 1540 stack_entries_++; 1541 } 1542 if (kRegistersNeededForLong == 1) { 1543 PushStack(static_cast<uintptr_t>(val)); 1544 stack_entries_++; 1545 } else { 1546 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1547 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1548 stack_entries_ += 2; 1549 } 1550 gpr_index_ = 0; 1551 } 1552 } 1553 1554 bool HaveFloatFpr() const { 1555 return fpr_index_ > 0; 1556 } 1557 1558 void AdvanceFloat(float val) { 1559 if (kNativeSoftFloatAbi) { 1560 AdvanceInt(bit_cast<uint32_t, float>(val)); 1561 } else { 1562 if (HaveFloatFpr()) { 1563 fpr_index_--; 1564 if (kRegistersNeededForDouble == 1) { 1565 if (kMultiFPRegistersWidened) { 1566 PushFpr8(bit_cast<uint64_t, double>(val)); 1567 } else { 1568 // No widening, just use the bits. 1569 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1570 } 1571 } else { 1572 PushFpr4(val); 1573 } 1574 } else { 1575 stack_entries_++; 1576 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1577 // Need to widen before storing: Note the "double" in the template instantiation. 1578 // Note: We need to jump through those hoops to make the compiler happy. 1579 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1580 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1581 } else { 1582 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1583 } 1584 fpr_index_ = 0; 1585 } 1586 } 1587 } 1588 1589 bool HaveDoubleFpr() const { 1590 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1591 } 1592 1593 bool DoubleFprNeedsPadding() const { 1594 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1595 kAlignDoubleOnStack && // and when it needs alignment 1596 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1597 } 1598 1599 bool DoubleStackNeedsPadding() const { 1600 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1601 kAlignDoubleOnStack && // and when it needs 8B alignment 1602 (stack_entries_ & 1) == 1; // counter is odd 1603 } 1604 1605 void AdvanceDouble(uint64_t val) { 1606 if (kNativeSoftFloatAbi) { 1607 AdvanceLong(val); 1608 } else { 1609 if (HaveDoubleFpr()) { 1610 if (DoubleFprNeedsPadding()) { 1611 PushFpr4(0); 1612 fpr_index_--; 1613 } 1614 PushFpr8(val); 1615 fpr_index_ -= kRegistersNeededForDouble; 1616 } else { 1617 if (DoubleStackNeedsPadding()) { 1618 PushStack(0); 1619 stack_entries_++; 1620 } 1621 if (kRegistersNeededForDouble == 1) { 1622 PushStack(static_cast<uintptr_t>(val)); 1623 stack_entries_++; 1624 } else { 1625 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1626 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1627 stack_entries_ += 2; 1628 } 1629 fpr_index_ = 0; 1630 } 1631 } 1632 } 1633 1634 uint32_t GetStackEntries() const { 1635 return stack_entries_; 1636 } 1637 1638 uint32_t GetNumberOfUsedGprs() const { 1639 return kNumNativeGprArgs - gpr_index_; 1640 } 1641 1642 uint32_t GetNumberOfUsedFprs() const { 1643 return kNumNativeFprArgs - fpr_index_; 1644 } 1645 1646 private: 1647 void PushGpr(uintptr_t val) { 1648 delegate_->PushGpr(val); 1649 } 1650 void PushFpr4(float val) { 1651 delegate_->PushFpr4(val); 1652 } 1653 void PushFpr8(uint64_t val) { 1654 delegate_->PushFpr8(val); 1655 } 1656 void PushStack(uintptr_t val) { 1657 delegate_->PushStack(val); 1658 } 1659 uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { 1660 return delegate_->PushHandle(ref); 1661 } 1662 1663 uint32_t gpr_index_; // Number of free GPRs 1664 uint32_t fpr_index_; // Number of free FPRs 1665 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1666 // extended 1667 T* const delegate_; // What Push implementation gets called 1668}; 1669 1670// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1671// in subclasses. 1672// 1673// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1674// them with handles. 1675class ComputeNativeCallFrameSize { 1676 public: 1677 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1678 1679 virtual ~ComputeNativeCallFrameSize() {} 1680 1681 uint32_t GetStackSize() const { 1682 return num_stack_entries_ * sizeof(uintptr_t); 1683 } 1684 1685 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1686 sp8 -= GetStackSize(); 1687 // Align by kStackAlignment. 1688 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1689 return sp8; 1690 } 1691 1692 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1693 const { 1694 // Assumption is OK right now, as we have soft-float arm 1695 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1696 sp8 -= fregs * sizeof(uintptr_t); 1697 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1698 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1699 sp8 -= iregs * sizeof(uintptr_t); 1700 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1701 return sp8; 1702 } 1703 1704 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1705 uint32_t** start_fpr) const { 1706 // Native call stack. 1707 sp8 = LayoutCallStack(sp8); 1708 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1709 1710 // Put fprs and gprs below. 1711 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1712 1713 // Return the new bottom. 1714 return sp8; 1715 } 1716 1717 virtual void WalkHeader( 1718 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED) 1719 REQUIRES_SHARED(Locks::mutator_lock_) { 1720 } 1721 1722 void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) { 1723 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1724 1725 WalkHeader(&sm); 1726 1727 for (uint32_t i = 1; i < shorty_len; ++i) { 1728 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1729 switch (cur_type_) { 1730 case Primitive::kPrimNot: 1731 // TODO: fix abuse of mirror types. 1732 sm.AdvanceHandleScope( 1733 reinterpret_cast<mirror::Object*>(0x12345678)); 1734 break; 1735 1736 case Primitive::kPrimBoolean: 1737 case Primitive::kPrimByte: 1738 case Primitive::kPrimChar: 1739 case Primitive::kPrimShort: 1740 case Primitive::kPrimInt: 1741 sm.AdvanceInt(0); 1742 break; 1743 case Primitive::kPrimFloat: 1744 sm.AdvanceFloat(0); 1745 break; 1746 case Primitive::kPrimDouble: 1747 sm.AdvanceDouble(0); 1748 break; 1749 case Primitive::kPrimLong: 1750 sm.AdvanceLong(0); 1751 break; 1752 default: 1753 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1754 UNREACHABLE(); 1755 } 1756 } 1757 1758 num_stack_entries_ = sm.GetStackEntries(); 1759 } 1760 1761 void PushGpr(uintptr_t /* val */) { 1762 // not optimizing registers, yet 1763 } 1764 1765 void PushFpr4(float /* val */) { 1766 // not optimizing registers, yet 1767 } 1768 1769 void PushFpr8(uint64_t /* val */) { 1770 // not optimizing registers, yet 1771 } 1772 1773 void PushStack(uintptr_t /* val */) { 1774 // counting is already done in the superclass 1775 } 1776 1777 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1778 return reinterpret_cast<uintptr_t>(nullptr); 1779 } 1780 1781 protected: 1782 uint32_t num_stack_entries_; 1783}; 1784 1785class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1786 public: 1787 explicit ComputeGenericJniFrameSize(bool critical_native) 1788 : num_handle_scope_references_(0), critical_native_(critical_native) {} 1789 1790 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1791 // is at *m = sp. Will update to point to the bottom of the save frame. 1792 // 1793 // Note: assumes ComputeAll() has been run before. 1794 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1795 REQUIRES_SHARED(Locks::mutator_lock_) { 1796 ArtMethod* method = **m; 1797 1798 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 1799 1800 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1801 1802 // First, fix up the layout of the callee-save frame. 1803 // We have to squeeze in the HandleScope, and relocate the method pointer. 1804 1805 // "Free" the slot for the method. 1806 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 1807 1808 // Under the callee saves put handle scope and new method stack reference. 1809 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1810 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 1811 1812 sp8 -= scope_and_method; 1813 // Align by kStackAlignment. 1814 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1815 1816 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 1817 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 1818 num_handle_scope_references_); 1819 1820 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1821 uint8_t* method_pointer = sp8; 1822 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 1823 *new_method_ref = method; 1824 *m = new_method_ref; 1825 } 1826 1827 // Adds space for the cookie. Note: may leave stack unaligned. 1828 void LayoutCookie(uint8_t** sp) const { 1829 // Reference cookie and padding 1830 *sp -= 8; 1831 } 1832 1833 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1834 // Returns the new bottom. Note: this may be unaligned. 1835 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1836 REQUIRES_SHARED(Locks::mutator_lock_) { 1837 // First, fix up the layout of the callee-save frame. 1838 // We have to squeeze in the HandleScope, and relocate the method pointer. 1839 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 1840 1841 // The bottom of the callee-save frame is now where the method is, *m. 1842 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1843 1844 // Add space for cookie. 1845 LayoutCookie(&sp8); 1846 1847 return sp8; 1848 } 1849 1850 // WARNING: After this, *sp won't be pointing to the method anymore! 1851 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 1852 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 1853 uint32_t** start_fpr) 1854 REQUIRES_SHARED(Locks::mutator_lock_) { 1855 Walk(shorty, shorty_len); 1856 1857 // JNI part. 1858 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 1859 1860 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1861 1862 // Return the new bottom. 1863 return sp8; 1864 } 1865 1866 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1867 1868 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1869 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1870 REQUIRES_SHARED(Locks::mutator_lock_); 1871 1872 private: 1873 uint32_t num_handle_scope_references_; 1874 const bool critical_native_; 1875}; 1876 1877uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1878 num_handle_scope_references_++; 1879 return reinterpret_cast<uintptr_t>(nullptr); 1880} 1881 1882void ComputeGenericJniFrameSize::WalkHeader( 1883 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1884 // First 2 parameters are always excluded for @CriticalNative. 1885 if (UNLIKELY(critical_native_)) { 1886 return; 1887 } 1888 1889 // JNIEnv 1890 sm->AdvancePointer(nullptr); 1891 1892 // Class object or this as first argument 1893 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1894} 1895 1896// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1897// the template requirements of BuildGenericJniFrameStateMachine. 1898class FillNativeCall { 1899 public: 1900 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1901 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1902 1903 virtual ~FillNativeCall() {} 1904 1905 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1906 cur_gpr_reg_ = gpr_regs; 1907 cur_fpr_reg_ = fpr_regs; 1908 cur_stack_arg_ = stack_args; 1909 } 1910 1911 void PushGpr(uintptr_t val) { 1912 *cur_gpr_reg_ = val; 1913 cur_gpr_reg_++; 1914 } 1915 1916 void PushFpr4(float val) { 1917 *cur_fpr_reg_ = val; 1918 cur_fpr_reg_++; 1919 } 1920 1921 void PushFpr8(uint64_t val) { 1922 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1923 *tmp = val; 1924 cur_fpr_reg_ += 2; 1925 } 1926 1927 void PushStack(uintptr_t val) { 1928 *cur_stack_arg_ = val; 1929 cur_stack_arg_++; 1930 } 1931 1932 virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) { 1933 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1934 UNREACHABLE(); 1935 } 1936 1937 private: 1938 uintptr_t* cur_gpr_reg_; 1939 uint32_t* cur_fpr_reg_; 1940 uintptr_t* cur_stack_arg_; 1941}; 1942 1943// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1944// of transitioning into native code. 1945class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1946 public: 1947 BuildGenericJniFrameVisitor(Thread* self, 1948 bool is_static, 1949 bool critical_native, 1950 const char* shorty, 1951 uint32_t shorty_len, 1952 ArtMethod*** sp) 1953 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1954 jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native), 1955 sm_(&jni_call_) { 1956 ComputeGenericJniFrameSize fsc(critical_native); 1957 uintptr_t* start_gpr_reg; 1958 uint32_t* start_fpr_reg; 1959 uintptr_t* start_stack_arg; 1960 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 1961 &handle_scope_, 1962 &start_stack_arg, 1963 &start_gpr_reg, &start_fpr_reg); 1964 1965 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1966 1967 // First 2 parameters are always excluded for CriticalNative methods. 1968 if (LIKELY(!critical_native)) { 1969 // jni environment is always first argument 1970 sm_.AdvancePointer(self->GetJniEnv()); 1971 1972 if (is_static) { 1973 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 1974 } // else "this" reference is already handled by QuickArgumentVisitor. 1975 } 1976 } 1977 1978 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 1979 1980 void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 1981 1982 StackReference<mirror::Object>* GetFirstHandleScopeEntry() { 1983 return handle_scope_->GetHandle(0).GetReference(); 1984 } 1985 1986 jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) { 1987 return handle_scope_->GetHandle(0).ToJObject(); 1988 } 1989 1990 void* GetBottomOfUsedArea() const { 1991 return bottom_of_used_area_; 1992 } 1993 1994 private: 1995 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 1996 class FillJniCall FINAL : public FillNativeCall { 1997 public: 1998 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 1999 HandleScope* handle_scope, bool critical_native) 2000 : FillNativeCall(gpr_regs, fpr_regs, stack_args), 2001 handle_scope_(handle_scope), 2002 cur_entry_(0), 2003 critical_native_(critical_native) {} 2004 2005 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); 2006 2007 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 2008 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 2009 handle_scope_ = scope; 2010 cur_entry_ = 0U; 2011 } 2012 2013 void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) { 2014 // Initialize padding entries. 2015 size_t expected_slots = handle_scope_->NumberOfReferences(); 2016 while (cur_entry_ < expected_slots) { 2017 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 2018 } 2019 2020 if (!critical_native_) { 2021 // Non-critical natives have at least the self class (jclass) or this (jobject). 2022 DCHECK_NE(cur_entry_, 0U); 2023 } 2024 } 2025 2026 bool CriticalNative() const { 2027 return critical_native_; 2028 } 2029 2030 private: 2031 HandleScope* handle_scope_; 2032 size_t cur_entry_; 2033 const bool critical_native_; 2034 }; 2035 2036 HandleScope* handle_scope_; 2037 FillJniCall jni_call_; 2038 void* bottom_of_used_area_; 2039 2040 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 2041 2042 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 2043}; 2044 2045uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 2046 uintptr_t tmp; 2047 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 2048 h.Assign(ref); 2049 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 2050 cur_entry_++; 2051 return tmp; 2052} 2053 2054void BuildGenericJniFrameVisitor::Visit() { 2055 Primitive::Type type = GetParamPrimitiveType(); 2056 switch (type) { 2057 case Primitive::kPrimLong: { 2058 jlong long_arg; 2059 if (IsSplitLongOrDouble()) { 2060 long_arg = ReadSplitLongParam(); 2061 } else { 2062 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 2063 } 2064 sm_.AdvanceLong(long_arg); 2065 break; 2066 } 2067 case Primitive::kPrimDouble: { 2068 uint64_t double_arg; 2069 if (IsSplitLongOrDouble()) { 2070 // Read into union so that we don't case to a double. 2071 double_arg = ReadSplitLongParam(); 2072 } else { 2073 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 2074 } 2075 sm_.AdvanceDouble(double_arg); 2076 break; 2077 } 2078 case Primitive::kPrimNot: { 2079 StackReference<mirror::Object>* stack_ref = 2080 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 2081 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 2082 break; 2083 } 2084 case Primitive::kPrimFloat: 2085 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 2086 break; 2087 case Primitive::kPrimBoolean: // Fall-through. 2088 case Primitive::kPrimByte: // Fall-through. 2089 case Primitive::kPrimChar: // Fall-through. 2090 case Primitive::kPrimShort: // Fall-through. 2091 case Primitive::kPrimInt: // Fall-through. 2092 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 2093 break; 2094 case Primitive::kPrimVoid: 2095 LOG(FATAL) << "UNREACHABLE"; 2096 UNREACHABLE(); 2097 } 2098} 2099 2100void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 2101 // Clear out rest of the scope. 2102 jni_call_.ResetRemainingScopeSlots(); 2103 if (!jni_call_.CriticalNative()) { 2104 // Install HandleScope. 2105 self->PushHandleScope(handle_scope_); 2106 } 2107} 2108 2109#if defined(__arm__) || defined(__aarch64__) 2110extern "C" const void* artFindNativeMethod(); 2111#else 2112extern "C" const void* artFindNativeMethod(Thread* self); 2113#endif 2114 2115static uint64_t artQuickGenericJniEndJNIRef(Thread* self, 2116 uint32_t cookie, 2117 bool fast_native ATTRIBUTE_UNUSED, 2118 jobject l, 2119 jobject lock) { 2120 // TODO: add entrypoints for @FastNative returning objects. 2121 if (lock != nullptr) { 2122 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 2123 } else { 2124 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 2125 } 2126} 2127 2128static void artQuickGenericJniEndJNINonRef(Thread* self, 2129 uint32_t cookie, 2130 bool fast_native, 2131 jobject lock) { 2132 if (lock != nullptr) { 2133 JniMethodEndSynchronized(cookie, lock, self); 2134 // Ignore "fast_native" here because synchronized functions aren't very fast. 2135 } else { 2136 if (UNLIKELY(fast_native)) { 2137 JniMethodFastEnd(cookie, self); 2138 } else { 2139 JniMethodEnd(cookie, self); 2140 } 2141 } 2142} 2143 2144/* 2145 * Initializes an alloca region assumed to be directly below sp for a native call: 2146 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 2147 * The final element on the stack is a pointer to the native code. 2148 * 2149 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 2150 * We need to fix this, as the handle scope needs to go into the callee-save frame. 2151 * 2152 * The return of this function denotes: 2153 * 1) How many bytes of the alloca can be released, if the value is non-negative. 2154 * 2) An error, if the value is negative. 2155 */ 2156extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 2157 REQUIRES_SHARED(Locks::mutator_lock_) { 2158 // Note: We cannot walk the stack properly until fixed up below. 2159 ArtMethod* called = *sp; 2160 DCHECK(called->IsNative()) << called->PrettyMethod(true); 2161 Runtime* runtime = Runtime::Current(); 2162 jit::Jit* jit = runtime->GetJit(); 2163 if (jit != nullptr) { 2164 jit->AddSamples(self, called, 1u, /*with_backedges*/ false); 2165 } 2166 uint32_t shorty_len = 0; 2167 const char* shorty = called->GetShorty(&shorty_len); 2168 bool critical_native = called->IsCriticalNative(); 2169 bool fast_native = called->IsFastNative(); 2170 bool normal_native = !critical_native && !fast_native; 2171 2172 // Run the visitor and update sp. 2173 BuildGenericJniFrameVisitor visitor(self, 2174 called->IsStatic(), 2175 critical_native, 2176 shorty, 2177 shorty_len, 2178 &sp); 2179 { 2180 ScopedAssertNoThreadSuspension sants(__FUNCTION__); 2181 visitor.VisitArguments(); 2182 // FinalizeHandleScope pushes the handle scope on the thread. 2183 visitor.FinalizeHandleScope(self); 2184 } 2185 2186 // Fix up managed-stack things in Thread. After this we can walk the stack. 2187 self->SetTopOfStackTagged(sp); 2188 2189 self->VerifyStack(); 2190 2191 uint32_t cookie; 2192 uint32_t* sp32; 2193 // Skip calling JniMethodStart for @CriticalNative. 2194 if (LIKELY(!critical_native)) { 2195 // Start JNI, save the cookie. 2196 if (called->IsSynchronized()) { 2197 DCHECK(normal_native) << " @FastNative and synchronize is not supported"; 2198 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 2199 if (self->IsExceptionPending()) { 2200 self->PopHandleScope(); 2201 // A negative value denotes an error. 2202 return GetTwoWordFailureValue(); 2203 } 2204 } else { 2205 if (fast_native) { 2206 cookie = JniMethodFastStart(self); 2207 } else { 2208 DCHECK(normal_native); 2209 cookie = JniMethodStart(self); 2210 } 2211 } 2212 sp32 = reinterpret_cast<uint32_t*>(sp); 2213 *(sp32 - 1) = cookie; 2214 } 2215 2216 // Retrieve the stored native code. 2217 void const* nativeCode = called->GetEntryPointFromJni(); 2218 2219 // There are two cases for the content of nativeCode: 2220 // 1) Pointer to the native function. 2221 // 2) Pointer to the trampoline for native code binding. 2222 // In the second case, we need to execute the binding and continue with the actual native function 2223 // pointer. 2224 DCHECK(nativeCode != nullptr); 2225 if (nativeCode == GetJniDlsymLookupStub()) { 2226#if defined(__arm__) || defined(__aarch64__) 2227 nativeCode = artFindNativeMethod(); 2228#else 2229 nativeCode = artFindNativeMethod(self); 2230#endif 2231 2232 if (nativeCode == nullptr) { 2233 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 2234 2235 // @CriticalNative calls do not need to call back into JniMethodEnd. 2236 if (LIKELY(!critical_native)) { 2237 // End JNI, as the assembly will move to deliver the exception. 2238 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 2239 if (shorty[0] == 'L') { 2240 artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock); 2241 } else { 2242 artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock); 2243 } 2244 } 2245 2246 return GetTwoWordFailureValue(); 2247 } 2248 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 2249 } 2250 2251#if defined(__mips__) && !defined(__LP64__) 2252 // On MIPS32 if the first two arguments are floating-point, we need to know their types 2253 // so that art_quick_generic_jni_trampoline can correctly extract them from the stack 2254 // and load into floating-point registers. 2255 // Possible arrangements of first two floating-point arguments on the stack (32-bit FPU 2256 // view): 2257 // (1) 2258 // | DOUBLE | DOUBLE | other args, if any 2259 // | F12 | F13 | F14 | F15 | 2260 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2261 // (2) 2262 // | DOUBLE | FLOAT | (PAD) | other args, if any 2263 // | F12 | F13 | F14 | | 2264 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2265 // (3) 2266 // | FLOAT | (PAD) | DOUBLE | other args, if any 2267 // | F12 | | F14 | F15 | 2268 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2269 // (4) 2270 // | FLOAT | FLOAT | other args, if any 2271 // | F12 | F14 | 2272 // | SP+0 | SP+4 | SP+8 2273 // As you can see, only the last case (4) is special. In all others we can just 2274 // load F12/F13 and F14/F15 in the same manner. 2275 // Set bit 0 of the native code address to 1 in this case (valid code addresses 2276 // are always a multiple of 4 on MIPS32, so we have 2 spare bits available). 2277 if (nativeCode != nullptr && 2278 shorty != nullptr && 2279 shorty_len >= 3 && 2280 shorty[1] == 'F' && 2281 shorty[2] == 'F') { 2282 nativeCode = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(nativeCode) | 1); 2283 } 2284#endif 2285 2286 // Return native code addr(lo) and bottom of alloca address(hi). 2287 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 2288 reinterpret_cast<uintptr_t>(nativeCode)); 2289} 2290 2291// Defined in quick_jni_entrypoints.cc. 2292extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie, 2293 jvalue result, uint64_t result_f, ArtMethod* called, 2294 HandleScope* handle_scope); 2295/* 2296 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 2297 * unlocking. 2298 */ 2299extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, 2300 jvalue result, 2301 uint64_t result_f) { 2302 // We're here just back from a native call. We don't have the shared mutator lock at this point 2303 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing 2304 // anything that requires a mutator lock before that would cause problems as GC may have the 2305 // exclusive mutator lock and may be moving objects, etc. 2306 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 2307 DCHECK(self->GetManagedStack()->GetTopQuickFrameTag()); 2308 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 2309 ArtMethod* called = *sp; 2310 uint32_t cookie = *(sp32 - 1); 2311 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp)); 2312 return GenericJniMethodEnd(self, cookie, result, result_f, called, table); 2313} 2314 2315// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 2316// for the method pointer. 2317// 2318// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 2319// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations). 2320 2321template <InvokeType type, bool access_check> 2322static TwoWordReturn artInvokeCommon(uint32_t method_idx, 2323 ObjPtr<mirror::Object> this_object, 2324 Thread* self, 2325 ArtMethod** sp) { 2326 ScopedQuickEntrypointChecks sqec(self); 2327 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2328 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2329 ArtMethod* method = FindMethodFast<type, access_check>(method_idx, this_object, caller_method); 2330 if (UNLIKELY(method == nullptr)) { 2331 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 2332 uint32_t shorty_len; 2333 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 2334 { 2335 // Remember the args in case a GC happens in FindMethodFromCode. 2336 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2337 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 2338 visitor.VisitArguments(); 2339 method = FindMethodFromCode<type, access_check>(method_idx, 2340 &this_object, 2341 caller_method, 2342 self); 2343 visitor.FixupReferences(); 2344 } 2345 2346 if (UNLIKELY(method == nullptr)) { 2347 CHECK(self->IsExceptionPending()); 2348 return GetTwoWordFailureValue(); // Failure. 2349 } 2350 } 2351 DCHECK(!self->IsExceptionPending()); 2352 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2353 2354 // When we return, the caller will branch to this address, so it had better not be 0! 2355 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2356 << " location: " 2357 << method->GetDexFile()->GetLocation(); 2358 2359 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2360 reinterpret_cast<uintptr_t>(method)); 2361} 2362 2363// Explicit artInvokeCommon template function declarations to please analysis tool. 2364#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2365 template REQUIRES_SHARED(Locks::mutator_lock_) \ 2366 TwoWordReturn artInvokeCommon<type, access_check>( \ 2367 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp) 2368 2369EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2370EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2371EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2372EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2373EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2374EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2375EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2376EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2377EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2378EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2379#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2380 2381// See comments in runtime_support_asm.S 2382extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2383 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2384 REQUIRES_SHARED(Locks::mutator_lock_) { 2385 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2386} 2387 2388extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2389 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2390 REQUIRES_SHARED(Locks::mutator_lock_) { 2391 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2392} 2393 2394extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2395 uint32_t method_idx, 2396 mirror::Object* this_object ATTRIBUTE_UNUSED, 2397 Thread* self, 2398 ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 2399 // For static, this_object is not required and may be random garbage. Don't pass it down so that 2400 // it doesn't cause ObjPtr alignment failure check. 2401 return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp); 2402} 2403 2404extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2405 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2406 REQUIRES_SHARED(Locks::mutator_lock_) { 2407 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2408} 2409 2410extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2411 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2412 REQUIRES_SHARED(Locks::mutator_lock_) { 2413 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2414} 2415 2416// Helper function for art_quick_imt_conflict_trampoline to look up the interface method. 2417extern "C" ArtMethod* artLookupResolvedMethod(uint32_t method_index, ArtMethod* referrer) 2418 REQUIRES_SHARED(Locks::mutator_lock_) { 2419 ScopedAssertNoThreadSuspension ants(__FUNCTION__); 2420 DCHECK(!referrer->IsProxyMethod()); 2421 ArtMethod* result = Runtime::Current()->GetClassLinker()->LookupResolvedMethod( 2422 method_index, referrer->GetDexCache(), referrer->GetClassLoader()); 2423 DCHECK(result == nullptr || 2424 result->GetDeclaringClass()->IsInterface() || 2425 result->GetDeclaringClass() == 2426 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object)) 2427 << result->PrettyMethod(); 2428 return result; 2429} 2430 2431// Determine target of interface dispatch. The interface method and this object are known non-null. 2432// The interface method is the method returned by the dex cache in the conflict trampoline. 2433extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method, 2434 mirror::Object* raw_this_object, 2435 Thread* self, 2436 ArtMethod** sp) 2437 REQUIRES_SHARED(Locks::mutator_lock_) { 2438 ScopedQuickEntrypointChecks sqec(self); 2439 StackHandleScope<2> hs(self); 2440 Handle<mirror::Object> this_object = hs.NewHandle(raw_this_object); 2441 Handle<mirror::Class> cls = hs.NewHandle(this_object->GetClass()); 2442 2443 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2444 ArtMethod* method = nullptr; 2445 ImTable* imt = cls->GetImt(kRuntimePointerSize); 2446 2447 if (UNLIKELY(interface_method == nullptr)) { 2448 // The interface method is unresolved, so resolve it in the dex file of the caller. 2449 // Fetch the dex_method_idx of the target interface method from the caller. 2450 uint32_t dex_method_idx; 2451 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2452 const Instruction& instr = caller_method->DexInstructions().InstructionAt(dex_pc); 2453 Instruction::Code instr_code = instr.Opcode(); 2454 DCHECK(instr_code == Instruction::INVOKE_INTERFACE || 2455 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2456 << "Unexpected call into interface trampoline: " << instr.DumpString(nullptr); 2457 if (instr_code == Instruction::INVOKE_INTERFACE) { 2458 dex_method_idx = instr.VRegB_35c(); 2459 } else { 2460 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2461 dex_method_idx = instr.VRegB_3rc(); 2462 } 2463 2464 const DexFile& dex_file = caller_method->GetDeclaringClass()->GetDexFile(); 2465 uint32_t shorty_len; 2466 const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_idx), 2467 &shorty_len); 2468 { 2469 // Remember the args in case a GC happens in ClassLinker::ResolveMethod(). 2470 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2471 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2472 visitor.VisitArguments(); 2473 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 2474 interface_method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( 2475 self, dex_method_idx, caller_method, kInterface); 2476 visitor.FixupReferences(); 2477 } 2478 2479 if (UNLIKELY(interface_method == nullptr)) { 2480 CHECK(self->IsExceptionPending()); 2481 return GetTwoWordFailureValue(); // Failure. 2482 } 2483 } 2484 2485 DCHECK(!interface_method->IsRuntimeMethod()); 2486 // Look whether we have a match in the ImtConflictTable. 2487 uint32_t imt_index = ImTable::GetImtIndex(interface_method); 2488 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize); 2489 if (LIKELY(conflict_method->IsRuntimeMethod())) { 2490 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize); 2491 DCHECK(current_table != nullptr); 2492 method = current_table->Lookup(interface_method, kRuntimePointerSize); 2493 } else { 2494 // It seems we aren't really a conflict method! 2495 if (kIsDebugBuild) { 2496 ArtMethod* m = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2497 CHECK_EQ(conflict_method, m) 2498 << interface_method->PrettyMethod() << " / " << conflict_method->PrettyMethod() << " / " 2499 << " / " << ArtMethod::PrettyMethod(m) << " / " << cls->PrettyClass(); 2500 } 2501 method = conflict_method; 2502 } 2503 if (method != nullptr) { 2504 return GetTwoWordSuccessValue( 2505 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()), 2506 reinterpret_cast<uintptr_t>(method)); 2507 } 2508 2509 // No match, use the IfTable. 2510 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2511 if (UNLIKELY(method == nullptr)) { 2512 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2513 interface_method, this_object.Get(), caller_method); 2514 return GetTwoWordFailureValue(); // Failure. 2515 } 2516 2517 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. 2518 // We create a new table with the new pair { interface_method, method }. 2519 DCHECK(conflict_method->IsRuntimeMethod()); 2520 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( 2521 cls.Get(), 2522 conflict_method, 2523 interface_method, 2524 method, 2525 /*force_new_conflict_method*/false); 2526 if (new_conflict_method != conflict_method) { 2527 // Update the IMT if we create a new conflict method. No fence needed here, as the 2528 // data is consistent. 2529 imt->Set(imt_index, 2530 new_conflict_method, 2531 kRuntimePointerSize); 2532 } 2533 2534 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2535 2536 // When we return, the caller will branch to this address, so it had better not be 0! 2537 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2538 << " location: " << method->GetDexFile()->GetLocation(); 2539 2540 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2541 reinterpret_cast<uintptr_t>(method)); 2542} 2543 2544// Returns shorty type so the caller can determine how to put |result| 2545// into expected registers. The shorty type is static so the compiler 2546// could call different flavors of this code path depending on the 2547// shorty type though this would require different entry points for 2548// each type. 2549extern "C" uintptr_t artInvokePolymorphic( 2550 JValue* result, 2551 mirror::Object* raw_receiver, 2552 Thread* self, 2553 ArtMethod** sp) 2554 REQUIRES_SHARED(Locks::mutator_lock_) { 2555 ScopedQuickEntrypointChecks sqec(self); 2556 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2557 2558 // Start new JNI local reference state 2559 JNIEnvExt* env = self->GetJniEnv(); 2560 ScopedObjectAccessUnchecked soa(env); 2561 ScopedJniEnvLocalRefState env_state(env); 2562 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe."); 2563 2564 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC. 2565 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2566 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2567 const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc); 2568 DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC || 2569 inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2570 const uint32_t proto_idx = inst.VRegH(); 2571 const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx); 2572 const size_t shorty_length = strlen(shorty); 2573 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static. 2574 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa); 2575 gc_visitor.VisitArguments(); 2576 2577 // Wrap raw_receiver in a Handle for safety. 2578 StackHandleScope<3> hs(self); 2579 Handle<mirror::Object> receiver_handle(hs.NewHandle(raw_receiver)); 2580 raw_receiver = nullptr; 2581 self->EndAssertNoThreadSuspension(old_cause); 2582 2583 // Resolve method. 2584 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 2585 ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( 2586 self, inst.VRegB(), caller_method, kVirtual); 2587 2588 if (UNLIKELY(receiver_handle.IsNull())) { 2589 ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual); 2590 return static_cast<uintptr_t>('V'); 2591 } 2592 2593 // TODO(oth): Ensure this path isn't taken for VarHandle accessors (b/65872996). 2594 DCHECK_EQ(resolved_method->GetDeclaringClass(), 2595 WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_MethodHandle)); 2596 2597 Handle<mirror::MethodHandle> method_handle(hs.NewHandle( 2598 ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(receiver_handle.Get())))); 2599 2600 Handle<mirror::MethodType> method_type( 2601 hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method))); 2602 2603 // This implies we couldn't resolve one or more types in this method handle. 2604 if (UNLIKELY(method_type.IsNull())) { 2605 CHECK(self->IsExceptionPending()); 2606 return static_cast<uintptr_t>('V'); 2607 } 2608 2609 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA()); 2610 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic); 2611 2612 // Fix references before constructing the shadow frame. 2613 gc_visitor.FixupReferences(); 2614 2615 // Construct shadow frame placing arguments consecutively from |first_arg|. 2616 const bool is_range = (inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2617 const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc(); 2618 const size_t first_arg = 0; 2619 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 2620 CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc); 2621 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 2622 ScopedStackedShadowFramePusher 2623 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction); 2624 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, 2625 kMethodIsStatic, 2626 shorty, 2627 strlen(shorty), 2628 shadow_frame, 2629 first_arg); 2630 shadow_frame_builder.VisitArguments(); 2631 2632 // Push a transition back into managed code onto the linked list in thread. 2633 ManagedStack fragment; 2634 self->PushManagedStackFragment(&fragment); 2635 2636 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in 2637 // consecutive order. 2638 RangeInstructionOperands operands(first_arg + 1, num_vregs - 1); 2639 bool isExact = (jni::EncodeArtMethod(resolved_method) == 2640 WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact); 2641 bool success = false; 2642 if (isExact) { 2643 success = MethodHandleInvokeExact(self, 2644 *shadow_frame, 2645 method_handle, 2646 method_type, 2647 &operands, 2648 result); 2649 } else { 2650 success = MethodHandleInvoke(self, 2651 *shadow_frame, 2652 method_handle, 2653 method_type, 2654 &operands, 2655 result); 2656 } 2657 DCHECK(success || self->IsExceptionPending()); 2658 2659 // Pop transition record. 2660 self->PopManagedStackFragment(fragment); 2661 2662 return static_cast<uintptr_t>(shorty[0]); 2663} 2664 2665} // namespace art 2666