quick_trampoline_entrypoints.cc revision 0d2323e9fc700388ac0134390819c653c7893204
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "art_method-inl.h" 18#include "base/callee_save_type.h" 19#include "base/enums.h" 20#include "callee_save_frame.h" 21#include "common_throws.h" 22#include "debugger.h" 23#include "dex_file-inl.h" 24#include "dex_instruction-inl.h" 25#include "entrypoints/entrypoint_utils-inl.h" 26#include "entrypoints/runtime_asm_entrypoints.h" 27#include "gc/accounting/card_table-inl.h" 28#include "imt_conflict_table.h" 29#include "imtable-inl.h" 30#include "interpreter/interpreter.h" 31#include "instrumentation.h" 32#include "linear_alloc.h" 33#include "method_bss_mapping.h" 34#include "method_handles.h" 35#include "method_reference.h" 36#include "mirror/class-inl.h" 37#include "mirror/dex_cache-inl.h" 38#include "mirror/method.h" 39#include "mirror/method_handle_impl.h" 40#include "mirror/object-inl.h" 41#include "mirror/object_array-inl.h" 42#include "oat_file.h" 43#include "oat_quick_method_header.h" 44#include "quick_exception_handler.h" 45#include "runtime.h" 46#include "scoped_thread_state_change-inl.h" 47#include "stack.h" 48#include "thread-inl.h" 49#include "well_known_classes.h" 50 51namespace art { 52 53// Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame. 54class QuickArgumentVisitor { 55 // Number of bytes for each out register in the caller method's frame. 56 static constexpr size_t kBytesStackArgLocation = 4; 57 // Frame size in bytes of a callee-save frame for RefsAndArgs. 58 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 59 GetCalleeSaveFrameSize(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs); 60#if defined(__arm__) 61 // The callee save frame is pointed to by SP. 62 // | argN | | 63 // | ... | | 64 // | arg4 | | 65 // | arg3 spill | | Caller's frame 66 // | arg2 spill | | 67 // | arg1 spill | | 68 // | Method* | --- 69 // | LR | 70 // | ... | 4x6 bytes callee saves 71 // | R3 | 72 // | R2 | 73 // | R1 | 74 // | S15 | 75 // | : | 76 // | S0 | 77 // | | 4x2 bytes padding 78 // | Method* | <- sp 79 static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat; 80 static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat; 81 static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat; 82 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat; 83 static constexpr bool kQuickSkipOddFpRegisters = false; 84 static constexpr size_t kNumQuickGprArgs = 3; 85 static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16; 86 static constexpr bool kGprFprLockstep = false; 87 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 88 arm::ArmCalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first FPR arg. 89 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 90 arm::ArmCalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first GPR arg. 91 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 92 arm::ArmCalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); // Offset of return address. 93 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 94 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 95 } 96#elif defined(__aarch64__) 97 // The callee save frame is pointed to by SP. 98 // | argN | | 99 // | ... | | 100 // | arg4 | | 101 // | arg3 spill | | Caller's frame 102 // | arg2 spill | | 103 // | arg1 spill | | 104 // | Method* | --- 105 // | LR | 106 // | X29 | 107 // | : | 108 // | X20 | 109 // | X7 | 110 // | : | 111 // | X1 | 112 // | D7 | 113 // | : | 114 // | D0 | 115 // | | padding 116 // | Method* | <- sp 117 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 118 static constexpr bool kAlignPairRegister = false; 119 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 120 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 121 static constexpr bool kQuickSkipOddFpRegisters = false; 122 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 123 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 124 static constexpr bool kGprFprLockstep = false; 125 // Offset of first FPR arg. 126 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 127 arm64::Arm64CalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 128 // Offset of first GPR arg. 129 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 130 arm64::Arm64CalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); 131 // Offset of return address. 132 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 133 arm64::Arm64CalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); 134 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 135 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 136 } 137#elif defined(__mips__) && !defined(__LP64__) 138 // The callee save frame is pointed to by SP. 139 // | argN | | 140 // | ... | | 141 // | arg4 | | 142 // | arg3 spill | | Caller's frame 143 // | arg2 spill | | 144 // | arg1 spill | | 145 // | Method* | --- 146 // | RA | 147 // | ... | callee saves 148 // | T1 | arg5 149 // | T0 | arg4 150 // | A3 | arg3 151 // | A2 | arg2 152 // | A1 | arg1 153 // | F19 | 154 // | F18 | f_arg5 155 // | F17 | 156 // | F16 | f_arg4 157 // | F15 | 158 // | F14 | f_arg3 159 // | F13 | 160 // | F12 | f_arg2 161 // | F11 | 162 // | F10 | f_arg1 163 // | F9 | 164 // | F8 | f_arg0 165 // | | padding 166 // | A0/Method* | <- sp 167 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 168 static constexpr bool kAlignPairRegister = true; 169 static constexpr bool kQuickSoftFloatAbi = false; 170 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 171 static constexpr bool kQuickSkipOddFpRegisters = true; 172 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 173 static constexpr size_t kNumQuickFprArgs = 12; // 6 arguments passed in FPRs. Floats can be 174 // passed only in even numbered registers and each 175 // double occupies two registers. 176 static constexpr bool kGprFprLockstep = false; 177 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 8; // Offset of first FPR arg. 178 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 56; // Offset of first GPR arg. 179 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 108; // Offset of return address. 180 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 181 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 182 } 183#elif defined(__mips__) && defined(__LP64__) 184 // The callee save frame is pointed to by SP. 185 // | argN | | 186 // | ... | | 187 // | arg4 | | 188 // | arg3 spill | | Caller's frame 189 // | arg2 spill | | 190 // | arg1 spill | | 191 // | Method* | --- 192 // | RA | 193 // | ... | callee saves 194 // | A7 | arg7 195 // | A6 | arg6 196 // | A5 | arg5 197 // | A4 | arg4 198 // | A3 | arg3 199 // | A2 | arg2 200 // | A1 | arg1 201 // | F19 | f_arg7 202 // | F18 | f_arg6 203 // | F17 | f_arg5 204 // | F16 | f_arg4 205 // | F15 | f_arg3 206 // | F14 | f_arg2 207 // | F13 | f_arg1 208 // | F12 | f_arg0 209 // | | padding 210 // | A0/Method* | <- sp 211 // NOTE: for Mip64, when A0 is skipped, F12 is also skipped. 212 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 213 static constexpr bool kAlignPairRegister = false; 214 static constexpr bool kQuickSoftFloatAbi = false; 215 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 216 static constexpr bool kQuickSkipOddFpRegisters = false; 217 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 218 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 219 static constexpr bool kGprFprLockstep = true; 220 221 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F13). 222 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 223 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 224 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 225 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 226 } 227#elif defined(__i386__) 228 // The callee save frame is pointed to by SP. 229 // | argN | | 230 // | ... | | 231 // | arg4 | | 232 // | arg3 spill | | Caller's frame 233 // | arg2 spill | | 234 // | arg1 spill | | 235 // | Method* | --- 236 // | Return | 237 // | EBP,ESI,EDI | callee saves 238 // | EBX | arg3 239 // | EDX | arg2 240 // | ECX | arg1 241 // | XMM3 | float arg 4 242 // | XMM2 | float arg 3 243 // | XMM1 | float arg 2 244 // | XMM0 | float arg 1 245 // | EAX/Method* | <- sp 246 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 247 static constexpr bool kAlignPairRegister = false; 248 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 249 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 250 static constexpr bool kQuickSkipOddFpRegisters = false; 251 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 252 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 253 static constexpr bool kGprFprLockstep = false; 254 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 255 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 256 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 257 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 258 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 259 } 260#elif defined(__x86_64__) 261 // The callee save frame is pointed to by SP. 262 // | argN | | 263 // | ... | | 264 // | reg. arg spills | | Caller's frame 265 // | Method* | --- 266 // | Return | 267 // | R15 | callee save 268 // | R14 | callee save 269 // | R13 | callee save 270 // | R12 | callee save 271 // | R9 | arg5 272 // | R8 | arg4 273 // | RSI/R6 | arg1 274 // | RBP/R5 | callee save 275 // | RBX/R3 | callee save 276 // | RDX/R2 | arg2 277 // | RCX/R1 | arg3 278 // | XMM7 | float arg 8 279 // | XMM6 | float arg 7 280 // | XMM5 | float arg 6 281 // | XMM4 | float arg 5 282 // | XMM3 | float arg 4 283 // | XMM2 | float arg 3 284 // | XMM1 | float arg 2 285 // | XMM0 | float arg 1 286 // | Padding | 287 // | RDI/Method* | <- sp 288 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 289 static constexpr bool kAlignPairRegister = false; 290 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 291 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 292 static constexpr bool kQuickSkipOddFpRegisters = false; 293 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 294 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 295 static constexpr bool kGprFprLockstep = false; 296 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 297 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 298 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 299 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 300 switch (gpr_index) { 301 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 302 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 303 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 304 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 305 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 306 default: 307 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 308 return 0; 309 } 310 } 311#else 312#error "Unsupported architecture" 313#endif 314 315 public: 316 // Special handling for proxy methods. Proxy methods are instance methods so the 317 // 'this' object is the 1st argument. They also have the same frame layout as the 318 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 319 // 1st GPR. 320 static mirror::Object* GetProxyThisObject(ArtMethod** sp) 321 REQUIRES_SHARED(Locks::mutator_lock_) { 322 CHECK((*sp)->IsProxyMethod()); 323 CHECK_GT(kNumQuickGprArgs, 0u); 324 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 325 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 326 GprIndexToGprOffset(kThisGprIndex); 327 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 328 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); 329 } 330 331 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 332 DCHECK((*sp)->IsCalleeSaveMethod()); 333 return GetCalleeSaveMethodCaller(sp, CalleeSaveType::kSaveRefsAndArgs); 334 } 335 336 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 337 DCHECK((*sp)->IsCalleeSaveMethod()); 338 uint8_t* previous_sp = 339 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 340 return *reinterpret_cast<ArtMethod**>(previous_sp); 341 } 342 343 static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 344 DCHECK((*sp)->IsCalleeSaveMethod()); 345 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, 346 CalleeSaveType::kSaveRefsAndArgs); 347 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 348 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 349 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 350 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 351 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 352 353 if (current_code->IsOptimized()) { 354 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 355 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 356 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding); 357 DCHECK(stack_map.IsValid()); 358 if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) { 359 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 360 return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 361 inline_info.GetDepth(encoding.inline_info.encoding)-1); 362 } else { 363 return stack_map.GetDexPc(encoding.stack_map.encoding); 364 } 365 } else { 366 return current_code->ToDexPc(*caller_sp, outer_pc); 367 } 368 } 369 370 static bool GetInvokeType(ArtMethod** sp, InvokeType* invoke_type, uint32_t* dex_method_index) 371 REQUIRES_SHARED(Locks::mutator_lock_) { 372 DCHECK((*sp)->IsCalleeSaveMethod()); 373 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, 374 CalleeSaveType::kSaveRefsAndArgs); 375 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 376 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 377 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 378 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 379 if (!current_code->IsOptimized()) { 380 return false; 381 } 382 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 383 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 384 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 385 MethodInfo method_info = current_code->GetOptimizedMethodInfo(); 386 InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding)); 387 if (invoke.IsValid()) { 388 *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding)); 389 *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info); 390 return true; 391 } 392 return false; 393 } 394 395 // For the given quick ref and args quick frame, return the caller's PC. 396 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 397 DCHECK((*sp)->IsCalleeSaveMethod()); 398 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 399 return *reinterpret_cast<uintptr_t*>(lr); 400 } 401 402 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 403 uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) : 404 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 405 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 406 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 407 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 408 + sizeof(ArtMethod*)), // Skip ArtMethod*. 409 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 410 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 411 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 412 "Number of Quick FPR arguments unexpected"); 413 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 414 "Double alignment unexpected"); 415 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 416 // next register is even. 417 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 418 "Number of Quick FPR arguments not even"); 419 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 420 } 421 422 virtual ~QuickArgumentVisitor() {} 423 424 virtual void Visit() = 0; 425 426 Primitive::Type GetParamPrimitiveType() const { 427 return cur_type_; 428 } 429 430 uint8_t* GetParamAddress() const { 431 if (!kQuickSoftFloatAbi) { 432 Primitive::Type type = GetParamPrimitiveType(); 433 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 434 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 435 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 436 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 437 } 438 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 439 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 440 } 441 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 442 } 443 } 444 if (gpr_index_ < kNumQuickGprArgs) { 445 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 446 } 447 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 448 } 449 450 bool IsSplitLongOrDouble() const { 451 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 452 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 453 return is_split_long_or_double_; 454 } else { 455 return false; // An optimization for when GPR and FPRs are 64bit. 456 } 457 } 458 459 bool IsParamAReference() const { 460 return GetParamPrimitiveType() == Primitive::kPrimNot; 461 } 462 463 bool IsParamALongOrDouble() const { 464 Primitive::Type type = GetParamPrimitiveType(); 465 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 466 } 467 468 uint64_t ReadSplitLongParam() const { 469 // The splitted long is always available through the stack. 470 return *reinterpret_cast<uint64_t*>(stack_args_ 471 + stack_index_ * kBytesStackArgLocation); 472 } 473 474 void IncGprIndex() { 475 gpr_index_++; 476 if (kGprFprLockstep) { 477 fpr_index_++; 478 } 479 } 480 481 void IncFprIndex() { 482 fpr_index_++; 483 if (kGprFprLockstep) { 484 gpr_index_++; 485 } 486 } 487 488 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) { 489 // (a) 'stack_args_' should point to the first method's argument 490 // (b) whatever the argument type it is, the 'stack_index_' should 491 // be moved forward along with every visiting. 492 gpr_index_ = 0; 493 fpr_index_ = 0; 494 if (kQuickDoubleRegAlignedFloatBackFilled) { 495 fpr_double_index_ = 0; 496 } 497 stack_index_ = 0; 498 if (!is_static_) { // Handle this. 499 cur_type_ = Primitive::kPrimNot; 500 is_split_long_or_double_ = false; 501 Visit(); 502 stack_index_++; 503 if (kNumQuickGprArgs > 0) { 504 IncGprIndex(); 505 } 506 } 507 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 508 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 509 switch (cur_type_) { 510 case Primitive::kPrimNot: 511 case Primitive::kPrimBoolean: 512 case Primitive::kPrimByte: 513 case Primitive::kPrimChar: 514 case Primitive::kPrimShort: 515 case Primitive::kPrimInt: 516 is_split_long_or_double_ = false; 517 Visit(); 518 stack_index_++; 519 if (gpr_index_ < kNumQuickGprArgs) { 520 IncGprIndex(); 521 } 522 break; 523 case Primitive::kPrimFloat: 524 is_split_long_or_double_ = false; 525 Visit(); 526 stack_index_++; 527 if (kQuickSoftFloatAbi) { 528 if (gpr_index_ < kNumQuickGprArgs) { 529 IncGprIndex(); 530 } 531 } else { 532 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 533 IncFprIndex(); 534 if (kQuickDoubleRegAlignedFloatBackFilled) { 535 // Double should not overlap with float. 536 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 537 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 538 // Float should not overlap with double. 539 if (fpr_index_ % 2 == 0) { 540 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 541 } 542 } else if (kQuickSkipOddFpRegisters) { 543 IncFprIndex(); 544 } 545 } 546 } 547 break; 548 case Primitive::kPrimDouble: 549 case Primitive::kPrimLong: 550 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 551 if (cur_type_ == Primitive::kPrimLong && 552#if defined(__mips__) && !defined(__LP64__) 553 (gpr_index_ == 0 || gpr_index_ == 2) && 554#else 555 gpr_index_ == 0 && 556#endif 557 kAlignPairRegister) { 558 // Currently, this is only for ARM and MIPS, where we align long parameters with 559 // even-numbered registers by skipping R1 (on ARM) or A1(A3) (on MIPS) and using 560 // R2 (on ARM) or A2(T0) (on MIPS) instead. 561 IncGprIndex(); 562 } 563 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 564 ((gpr_index_ + 1) == kNumQuickGprArgs); 565 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 566 // We don't want to split this. Pass over this register. 567 gpr_index_++; 568 is_split_long_or_double_ = false; 569 } 570 Visit(); 571 if (kBytesStackArgLocation == 4) { 572 stack_index_+= 2; 573 } else { 574 CHECK_EQ(kBytesStackArgLocation, 8U); 575 stack_index_++; 576 } 577 if (gpr_index_ < kNumQuickGprArgs) { 578 IncGprIndex(); 579 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 580 if (gpr_index_ < kNumQuickGprArgs) { 581 IncGprIndex(); 582 } 583 } 584 } 585 } else { 586 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 587 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 588 Visit(); 589 if (kBytesStackArgLocation == 4) { 590 stack_index_+= 2; 591 } else { 592 CHECK_EQ(kBytesStackArgLocation, 8U); 593 stack_index_++; 594 } 595 if (kQuickDoubleRegAlignedFloatBackFilled) { 596 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 597 fpr_double_index_ += 2; 598 // Float should not overlap with double. 599 if (fpr_index_ % 2 == 0) { 600 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 601 } 602 } 603 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 604 IncFprIndex(); 605 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 606 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 607 IncFprIndex(); 608 } 609 } 610 } 611 } 612 break; 613 default: 614 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 615 } 616 } 617 } 618 619 protected: 620 const bool is_static_; 621 const char* const shorty_; 622 const uint32_t shorty_len_; 623 624 private: 625 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 626 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 627 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 628 uint32_t gpr_index_; // Index into spilled GPRs. 629 // Index into spilled FPRs. 630 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 631 // holds a higher register number. 632 uint32_t fpr_index_; 633 // Index into spilled FPRs for aligned double. 634 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 635 // terms of singles, may be behind fpr_index. 636 uint32_t fpr_double_index_; 637 uint32_t stack_index_; // Index into arguments on the stack. 638 // The current type of argument during VisitArguments. 639 Primitive::Type cur_type_; 640 // Does a 64bit parameter straddle the register and stack arguments? 641 bool is_split_long_or_double_; 642}; 643 644// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 645// allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 646extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 647 REQUIRES_SHARED(Locks::mutator_lock_) { 648 return QuickArgumentVisitor::GetProxyThisObject(sp); 649} 650 651// Visits arguments on the stack placing them into the shadow frame. 652class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 653 public: 654 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 655 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 656 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 657 658 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 659 660 private: 661 ShadowFrame* const sf_; 662 uint32_t cur_reg_; 663 664 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 665}; 666 667void BuildQuickShadowFrameVisitor::Visit() { 668 Primitive::Type type = GetParamPrimitiveType(); 669 switch (type) { 670 case Primitive::kPrimLong: // Fall-through. 671 case Primitive::kPrimDouble: 672 if (IsSplitLongOrDouble()) { 673 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 674 } else { 675 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 676 } 677 ++cur_reg_; 678 break; 679 case Primitive::kPrimNot: { 680 StackReference<mirror::Object>* stack_ref = 681 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 682 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 683 } 684 break; 685 case Primitive::kPrimBoolean: // Fall-through. 686 case Primitive::kPrimByte: // Fall-through. 687 case Primitive::kPrimChar: // Fall-through. 688 case Primitive::kPrimShort: // Fall-through. 689 case Primitive::kPrimInt: // Fall-through. 690 case Primitive::kPrimFloat: 691 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 692 break; 693 case Primitive::kPrimVoid: 694 LOG(FATAL) << "UNREACHABLE"; 695 UNREACHABLE(); 696 } 697 ++cur_reg_; 698} 699 700extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 701 REQUIRES_SHARED(Locks::mutator_lock_) { 702 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 703 // frame. 704 ScopedQuickEntrypointChecks sqec(self); 705 706 if (UNLIKELY(!method->IsInvokable())) { 707 method->ThrowInvocationTimeError(); 708 return 0; 709 } 710 711 JValue tmp_value; 712 ShadowFrame* deopt_frame = self->PopStackedShadowFrame( 713 StackedShadowFrameType::kDeoptimizationShadowFrame, false); 714 ManagedStack fragment; 715 716 DCHECK(!method->IsNative()) << method->PrettyMethod(); 717 uint32_t shorty_len = 0; 718 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 719 const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem(); 720 DCHECK(code_item != nullptr) << method->PrettyMethod(); 721 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 722 723 JValue result; 724 725 if (deopt_frame != nullptr) { 726 // Coming from partial-fragment deopt. 727 728 if (kIsDebugBuild) { 729 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom 730 // of the call-stack) corresponds to the called method. 731 ShadowFrame* linked = deopt_frame; 732 while (linked->GetLink() != nullptr) { 733 linked = linked->GetLink(); 734 } 735 CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " " 736 << ArtMethod::PrettyMethod(linked->GetMethod()); 737 } 738 739 if (VLOG_IS_ON(deopt)) { 740 // Print out the stack to verify that it was a partial-fragment deopt. 741 LOG(INFO) << "Continue-ing from deopt. Stack is:"; 742 QuickExceptionHandler::DumpFramesWithType(self, true); 743 } 744 745 ObjPtr<mirror::Throwable> pending_exception; 746 bool from_code = false; 747 self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code); 748 749 // Push a transition back into managed code onto the linked list in thread. 750 self->PushManagedStackFragment(&fragment); 751 752 // Ensure that the stack is still in order. 753 if (kIsDebugBuild) { 754 class DummyStackVisitor : public StackVisitor { 755 public: 756 explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_) 757 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 758 759 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 760 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking 761 // logic. Just always say we want to continue. 762 return true; 763 } 764 }; 765 DummyStackVisitor dsv(self); 766 dsv.WalkStack(); 767 } 768 769 // Restore the exception that was pending before deoptimization then interpret the 770 // deoptimized frames. 771 if (pending_exception != nullptr) { 772 self->SetException(pending_exception); 773 } 774 interpreter::EnterInterpreterFromDeoptimize(self, deopt_frame, from_code, &result); 775 } else { 776 const char* old_cause = self->StartAssertNoThreadSuspension( 777 "Building interpreter shadow frame"); 778 uint16_t num_regs = code_item->registers_size_; 779 // No last shadow coming from quick. 780 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 781 CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0); 782 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 783 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 784 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 785 shadow_frame, first_arg_reg); 786 shadow_frame_builder.VisitArguments(); 787 const bool needs_initialization = 788 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 789 // Push a transition back into managed code onto the linked list in thread. 790 self->PushManagedStackFragment(&fragment); 791 self->PushShadowFrame(shadow_frame); 792 self->EndAssertNoThreadSuspension(old_cause); 793 794 if (needs_initialization) { 795 // Ensure static method's class is initialized. 796 StackHandleScope<1> hs(self); 797 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 798 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 799 DCHECK(Thread::Current()->IsExceptionPending()) 800 << shadow_frame->GetMethod()->PrettyMethod(); 801 self->PopManagedStackFragment(fragment); 802 return 0; 803 } 804 } 805 806 result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); 807 } 808 809 // Pop transition. 810 self->PopManagedStackFragment(fragment); 811 812 // Request a stack deoptimization if needed 813 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 814 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp); 815 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization 816 // should be done and it knows the real return pc. 817 if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) && 818 Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { 819 if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) { 820 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 821 << caller->PrettyMethod(); 822 } else { 823 // Push the context of the deoptimization stack so we can restore the return value and the 824 // exception before executing the deoptimized frames. 825 self->PushDeoptimizationContext( 826 result, shorty[0] == 'L', /* from_code */ false, self->GetException()); 827 828 // Set special exception to cause deoptimization. 829 self->SetException(Thread::GetDeoptimizationException()); 830 } 831 } 832 833 // No need to restore the args since the method has already been run by the interpreter. 834 return result.GetJ(); 835} 836 837// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 838// to jobjects. 839class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 840 public: 841 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 842 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 843 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 844 845 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 846 847 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 848 849 private: 850 ScopedObjectAccessUnchecked* const soa_; 851 std::vector<jvalue>* const args_; 852 // References which we must update when exiting in case the GC moved the objects. 853 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 854 855 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 856}; 857 858void BuildQuickArgumentVisitor::Visit() { 859 jvalue val; 860 Primitive::Type type = GetParamPrimitiveType(); 861 switch (type) { 862 case Primitive::kPrimNot: { 863 StackReference<mirror::Object>* stack_ref = 864 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 865 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 866 references_.push_back(std::make_pair(val.l, stack_ref)); 867 break; 868 } 869 case Primitive::kPrimLong: // Fall-through. 870 case Primitive::kPrimDouble: 871 if (IsSplitLongOrDouble()) { 872 val.j = ReadSplitLongParam(); 873 } else { 874 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 875 } 876 break; 877 case Primitive::kPrimBoolean: // Fall-through. 878 case Primitive::kPrimByte: // Fall-through. 879 case Primitive::kPrimChar: // Fall-through. 880 case Primitive::kPrimShort: // Fall-through. 881 case Primitive::kPrimInt: // Fall-through. 882 case Primitive::kPrimFloat: 883 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 884 break; 885 case Primitive::kPrimVoid: 886 LOG(FATAL) << "UNREACHABLE"; 887 UNREACHABLE(); 888 } 889 args_->push_back(val); 890} 891 892void BuildQuickArgumentVisitor::FixupReferences() { 893 // Fixup any references which may have changed. 894 for (const auto& pair : references_) { 895 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 896 soa_->Env()->DeleteLocalRef(pair.first); 897 } 898} 899// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 900// which is responsible for recording callee save registers. We explicitly place into jobjects the 901// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 902// field within the proxy object, which will box the primitive arguments and deal with error cases. 903extern "C" uint64_t artQuickProxyInvokeHandler( 904 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 905 REQUIRES_SHARED(Locks::mutator_lock_) { 906 DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod(); 907 DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod(); 908 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 909 const char* old_cause = 910 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 911 // Register the top of the managed stack, making stack crawlable. 912 DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod(); 913 self->VerifyStack(); 914 // Start new JNI local reference state. 915 JNIEnvExt* env = self->GetJniEnv(); 916 ScopedObjectAccessUnchecked soa(env); 917 ScopedJniEnvLocalRefState env_state(env); 918 // Create local ref. copies of proxy method and the receiver. 919 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 920 921 // Placing arguments into args vector and remove the receiver. 922 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 923 CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " " 924 << non_proxy_method->PrettyMethod(); 925 std::vector<jvalue> args; 926 uint32_t shorty_len = 0; 927 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 928 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 929 930 local_ref_visitor.VisitArguments(); 931 DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod(); 932 args.erase(args.begin()); 933 934 // Convert proxy method into expected interface method. 935 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize); 936 DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod(); 937 DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod(); 938 self->EndAssertNoThreadSuspension(old_cause); 939 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 940 DCHECK(!Runtime::Current()->IsActiveTransaction()); 941 jobject interface_method_jobj = soa.AddLocalReference<jobject>( 942 mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), 943 interface_method)); 944 945 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 946 // that performs allocations. 947 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 948 // Restore references which might have moved. 949 local_ref_visitor.FixupReferences(); 950 return result.GetJ(); 951} 952 953// Read object references held in arguments from quick frames and place in a JNI local references, 954// so they don't get garbage collected. 955class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 956 public: 957 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 958 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 959 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 960 961 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 962 963 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 964 965 private: 966 ScopedObjectAccessUnchecked* const soa_; 967 // References which we must update when exiting in case the GC moved the objects. 968 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 969 970 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 971}; 972 973void RememberForGcArgumentVisitor::Visit() { 974 if (IsParamAReference()) { 975 StackReference<mirror::Object>* stack_ref = 976 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 977 jobject reference = 978 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 979 references_.push_back(std::make_pair(reference, stack_ref)); 980 } 981} 982 983void RememberForGcArgumentVisitor::FixupReferences() { 984 // Fixup any references which may have changed. 985 for (const auto& pair : references_) { 986 pair.second->Assign(soa_->Decode<mirror::Object>(pair.first)); 987 soa_->Env()->DeleteLocalRef(pair.first); 988 } 989} 990 991extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, 992 mirror::Object* this_object, 993 Thread* self, 994 ArtMethod** sp) 995 REQUIRES_SHARED(Locks::mutator_lock_) { 996 const void* result; 997 // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip 998 // that part. 999 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1000 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1001 if (instrumentation->IsDeoptimized(method)) { 1002 result = GetQuickToInterpreterBridge(); 1003 } else { 1004 result = instrumentation->GetQuickCodeFor(method, kRuntimePointerSize); 1005 DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result)); 1006 } 1007 1008 bool interpreter_entry = (result == GetQuickToInterpreterBridge()); 1009 bool is_static = method->IsStatic(); 1010 uint32_t shorty_len; 1011 const char* shorty = 1012 method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len); 1013 1014 ScopedObjectAccessUnchecked soa(self); 1015 RememberForGcArgumentVisitor visitor(sp, is_static, shorty, shorty_len, &soa); 1016 visitor.VisitArguments(); 1017 1018 instrumentation->PushInstrumentationStackFrame(self, 1019 is_static ? nullptr : this_object, 1020 method, 1021 QuickArgumentVisitor::GetCallingPc(sp), 1022 interpreter_entry); 1023 1024 visitor.FixupReferences(); 1025 if (UNLIKELY(self->IsExceptionPending())) { 1026 return nullptr; 1027 } 1028 CHECK(result != nullptr) << method->PrettyMethod(); 1029 return result; 1030} 1031 1032extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, 1033 ArtMethod** sp, 1034 uint64_t* gpr_result, 1035 uint64_t* fpr_result) 1036 REQUIRES_SHARED(Locks::mutator_lock_) { 1037 DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current())); 1038 CHECK(gpr_result != nullptr); 1039 CHECK(fpr_result != nullptr); 1040 // Instrumentation exit stub must not be entered with a pending exception. 1041 CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception " 1042 << self->GetException()->Dump(); 1043 // Compute address of return PC and sanity check that it currently holds 0. 1044 size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly); 1045 uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + 1046 return_pc_offset); 1047 CHECK_EQ(*return_pc, 0U); 1048 1049 // Pop the frame filling in the return pc. The low half of the return value is 0 when 1050 // deoptimization shouldn't be performed with the high-half having the return address. When 1051 // deoptimization should be performed the low half is zero and the high-half the address of the 1052 // deoptimization entry point. 1053 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1054 TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame( 1055 self, return_pc, gpr_result, fpr_result); 1056 if (self->IsExceptionPending()) { 1057 return GetTwoWordFailureValue(); 1058 } 1059 return return_or_deoptimize_pc; 1060} 1061 1062// Lazily resolve a method for quick. Called by stub code. 1063extern "C" const void* artQuickResolutionTrampoline( 1064 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 1065 REQUIRES_SHARED(Locks::mutator_lock_) { 1066 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 1067 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 1068 // does not have the same stack layout as the callee-save method). 1069 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 1070 // Start new JNI local reference state 1071 JNIEnvExt* env = self->GetJniEnv(); 1072 ScopedObjectAccessUnchecked soa(env); 1073 ScopedJniEnvLocalRefState env_state(env); 1074 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 1075 1076 // Compute details about the called method (avoid GCs) 1077 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 1078 InvokeType invoke_type; 1079 MethodReference called_method(nullptr, 0); 1080 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 1081 ArtMethod* caller = nullptr; 1082 if (!called_method_known_on_entry) { 1083 caller = QuickArgumentVisitor::GetCallingMethod(sp); 1084 called_method.dex_file = caller->GetDexFile(); 1085 1086 InvokeType stack_map_invoke_type; 1087 uint32_t stack_map_dex_method_idx; 1088 const bool found_stack_map = QuickArgumentVisitor::GetInvokeType(sp, 1089 &stack_map_invoke_type, 1090 &stack_map_dex_method_idx); 1091 // For debug builds, we make sure both of the paths are consistent by also looking at the dex 1092 // code. 1093 if (!found_stack_map || kIsDebugBuild) { 1094 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 1095 const DexFile::CodeItem* code; 1096 code = caller->GetCodeItem(); 1097 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 1098 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 1099 Instruction::Code instr_code = instr->Opcode(); 1100 bool is_range; 1101 switch (instr_code) { 1102 case Instruction::INVOKE_DIRECT: 1103 invoke_type = kDirect; 1104 is_range = false; 1105 break; 1106 case Instruction::INVOKE_DIRECT_RANGE: 1107 invoke_type = kDirect; 1108 is_range = true; 1109 break; 1110 case Instruction::INVOKE_STATIC: 1111 invoke_type = kStatic; 1112 is_range = false; 1113 break; 1114 case Instruction::INVOKE_STATIC_RANGE: 1115 invoke_type = kStatic; 1116 is_range = true; 1117 break; 1118 case Instruction::INVOKE_SUPER: 1119 invoke_type = kSuper; 1120 is_range = false; 1121 break; 1122 case Instruction::INVOKE_SUPER_RANGE: 1123 invoke_type = kSuper; 1124 is_range = true; 1125 break; 1126 case Instruction::INVOKE_VIRTUAL: 1127 invoke_type = kVirtual; 1128 is_range = false; 1129 break; 1130 case Instruction::INVOKE_VIRTUAL_RANGE: 1131 invoke_type = kVirtual; 1132 is_range = true; 1133 break; 1134 case Instruction::INVOKE_INTERFACE: 1135 invoke_type = kInterface; 1136 is_range = false; 1137 break; 1138 case Instruction::INVOKE_INTERFACE_RANGE: 1139 invoke_type = kInterface; 1140 is_range = true; 1141 break; 1142 default: 1143 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr); 1144 UNREACHABLE(); 1145 } 1146 called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 1147 // Check that the invoke matches what we expected, note that this path only happens for debug 1148 // builds. 1149 if (found_stack_map) { 1150 DCHECK_EQ(stack_map_invoke_type, invoke_type); 1151 if (invoke_type != kSuper) { 1152 // Super may be sharpened. 1153 DCHECK_EQ(stack_map_dex_method_idx, called_method.dex_method_index) 1154 << called_method.dex_file->PrettyMethod(stack_map_dex_method_idx) << " " 1155 << called_method.dex_file->PrettyMethod(called_method.dex_method_index); 1156 } 1157 } else { 1158 VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " " 1159 << called_method.dex_method_index; 1160 } 1161 } else { 1162 invoke_type = stack_map_invoke_type; 1163 called_method.dex_method_index = stack_map_dex_method_idx; 1164 } 1165 } else { 1166 invoke_type = kStatic; 1167 called_method.dex_file = called->GetDexFile(); 1168 called_method.dex_method_index = called->GetDexMethodIndex(); 1169 } 1170 uint32_t shorty_len; 1171 const char* shorty = 1172 called_method.dex_file->GetMethodShorty( 1173 called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len); 1174 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 1175 visitor.VisitArguments(); 1176 self->EndAssertNoThreadSuspension(old_cause); 1177 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 1178 // Resolve method filling in dex cache. 1179 if (!called_method_known_on_entry) { 1180 StackHandleScope<1> hs(self); 1181 mirror::Object* dummy = nullptr; 1182 HandleWrapper<mirror::Object> h_receiver( 1183 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 1184 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1185 called = linker->ResolveMethod<ClassLinker::kForceICCECheck>( 1186 self, called_method.dex_method_index, caller, invoke_type); 1187 1188 // Update .bss entry in oat file if any. 1189 if (called != nullptr && called_method.dex_file->GetOatDexFile() != nullptr) { 1190 const MethodBssMapping* mapping = 1191 called_method.dex_file->GetOatDexFile()->GetMethodBssMapping(); 1192 if (mapping != nullptr) { 1193 auto pp = std::partition_point( 1194 mapping->begin(), 1195 mapping->end(), 1196 [called_method](const MethodBssMappingEntry& entry) { 1197 return entry.method_index < called_method.dex_method_index; 1198 }); 1199 if (pp != mapping->end() && pp->CoversIndex(called_method.dex_method_index)) { 1200 size_t bss_offset = pp->GetBssOffset(called_method.dex_method_index, 1201 static_cast<size_t>(kRuntimePointerSize)); 1202 DCHECK_ALIGNED(bss_offset, static_cast<size_t>(kRuntimePointerSize)); 1203 const OatFile* oat_file = called_method.dex_file->GetOatDexFile()->GetOatFile(); 1204 ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(const_cast<uint8_t*>( 1205 oat_file->BssBegin() + bss_offset)); 1206 DCHECK_GE(method_entry, oat_file->GetBssMethods().data()); 1207 DCHECK_LT(method_entry, 1208 oat_file->GetBssMethods().data() + oat_file->GetBssMethods().size()); 1209 *method_entry = called; 1210 } 1211 } 1212 } 1213 } 1214 const void* code = nullptr; 1215 if (LIKELY(!self->IsExceptionPending())) { 1216 // Incompatible class change should have been handled in resolve method. 1217 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 1218 << called->PrettyMethod() << " " << invoke_type; 1219 if (virtual_or_interface || invoke_type == kSuper) { 1220 // Refine called method based on receiver for kVirtual/kInterface, and 1221 // caller for kSuper. 1222 ArtMethod* orig_called = called; 1223 if (invoke_type == kVirtual) { 1224 CHECK(receiver != nullptr) << invoke_type; 1225 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize); 1226 } else if (invoke_type == kInterface) { 1227 CHECK(receiver != nullptr) << invoke_type; 1228 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize); 1229 } else { 1230 DCHECK_EQ(invoke_type, kSuper); 1231 CHECK(caller != nullptr) << invoke_type; 1232 StackHandleScope<2> hs(self); 1233 Handle<mirror::DexCache> dex_cache( 1234 hs.NewHandle(caller->GetDeclaringClass()->GetDexCache())); 1235 Handle<mirror::ClassLoader> class_loader( 1236 hs.NewHandle(caller->GetDeclaringClass()->GetClassLoader())); 1237 // TODO Maybe put this into a mirror::Class function. 1238 mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod( 1239 called_method.dex_method_index, dex_cache, class_loader); 1240 if (ref_class->IsInterface()) { 1241 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize); 1242 } else { 1243 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry( 1244 called->GetMethodIndex(), kRuntimePointerSize); 1245 } 1246 } 1247 1248 CHECK(called != nullptr) << orig_called->PrettyMethod() << " " 1249 << mirror::Object::PrettyTypeOf(receiver) << " " 1250 << invoke_type << " " << orig_called->GetVtableIndex(); 1251 1252 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 1253 // of the sharpened method avoiding dirtying the dex cache if possible. 1254 // Note, called_method.dex_method_index references the dex method before the 1255 // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares 1256 // about the name and signature. 1257 uint32_t update_dex_cache_method_index = called->GetDexMethodIndex(); 1258 if (!called->HasSameDexCacheResolvedMethods(caller, kRuntimePointerSize)) { 1259 // Calling from one dex file to another, need to compute the method index appropriate to 1260 // the caller's dex file. Since we get here only if the original called was a runtime 1261 // method, we've got the correct dex_file and a dex_method_idx from above. 1262 DCHECK(!called_method_known_on_entry); 1263 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1264 const DexFile* caller_dex_file = called_method.dex_file; 1265 uint32_t caller_method_name_and_sig_index = called_method.dex_method_index; 1266 update_dex_cache_method_index = 1267 called->FindDexMethodIndexInOtherDexFile(*caller_dex_file, 1268 caller_method_name_and_sig_index); 1269 } 1270 if ((update_dex_cache_method_index != DexFile::kDexNoIndex) && 1271 (caller->GetDexCacheResolvedMethod( 1272 update_dex_cache_method_index, kRuntimePointerSize) != called)) { 1273 caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, 1274 called, 1275 kRuntimePointerSize); 1276 } 1277 } else if (invoke_type == kStatic) { 1278 const auto called_dex_method_idx = called->GetDexMethodIndex(); 1279 // For static invokes, we may dispatch to the static method in the superclass but resolve 1280 // using the subclass. To prevent getting slow paths on each invoke, we force set the 1281 // resolved method for the super class dex method index if we are in the same dex file. 1282 // b/19175856 1283 if (called->GetDexFile() == called_method.dex_file && 1284 called_method.dex_method_index != called_dex_method_idx) { 1285 called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, 1286 called, 1287 kRuntimePointerSize); 1288 } 1289 } 1290 1291 // Ensure that the called method's class is initialized. 1292 StackHandleScope<1> hs(soa.Self()); 1293 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 1294 linker->EnsureInitialized(soa.Self(), called_class, true, true); 1295 if (LIKELY(called_class->IsInitialized())) { 1296 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1297 // If we are single-stepping or the called method is deoptimized (by a 1298 // breakpoint, for example), then we have to execute the called method 1299 // with the interpreter. 1300 code = GetQuickToInterpreterBridge(); 1301 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1302 // If the caller is deoptimized (by a breakpoint, for example), we have to 1303 // continue its execution with interpreter when returning from the called 1304 // method. Because we do not want to execute the called method with the 1305 // interpreter, we wrap its execution into the instrumentation stubs. 1306 // When the called method returns, it will execute the instrumentation 1307 // exit hook that will determine the need of the interpreter with a call 1308 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1309 // it is needed. 1310 code = GetQuickInstrumentationEntryPoint(); 1311 } else { 1312 code = called->GetEntryPointFromQuickCompiledCode(); 1313 } 1314 } else if (called_class->IsInitializing()) { 1315 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1316 // If we are single-stepping or the called method is deoptimized (by a 1317 // breakpoint, for example), then we have to execute the called method 1318 // with the interpreter. 1319 code = GetQuickToInterpreterBridge(); 1320 } else if (invoke_type == kStatic) { 1321 // Class is still initializing, go to oat and grab code (trampoline must be left in place 1322 // until class is initialized to stop races between threads). 1323 code = linker->GetQuickOatCodeFor(called); 1324 } else { 1325 // No trampoline for non-static methods. 1326 code = called->GetEntryPointFromQuickCompiledCode(); 1327 } 1328 } else { 1329 DCHECK(called_class->IsErroneous()); 1330 } 1331 } 1332 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1333 // Fixup any locally saved objects may have moved during a GC. 1334 visitor.FixupReferences(); 1335 // Place called method in callee-save frame to be placed as first argument to quick method. 1336 *sp = called; 1337 1338 return code; 1339} 1340 1341/* 1342 * This class uses a couple of observations to unite the different calling conventions through 1343 * a few constants. 1344 * 1345 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1346 * possible alignment. 1347 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1348 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1349 * when we have to split things 1350 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1351 * and we can use Int handling directly. 1352 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1353 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1354 * extension should be compatible with Aarch64, which mandates copying the available bits 1355 * into LSB and leaving the rest unspecified. 1356 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1357 * the stack. 1358 * 6) There is only little endian. 1359 * 1360 * 1361 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1362 * follows: 1363 * 1364 * void PushGpr(uintptr_t): Add a value for the next GPR 1365 * 1366 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1367 * padding, that is, think the architecture is 32b and aligns 64b. 1368 * 1369 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1370 * split this if necessary. The current state will have aligned, if 1371 * necessary. 1372 * 1373 * void PushStack(uintptr_t): Push a value to the stack. 1374 * 1375 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1376 * as this might be important for null initialization. 1377 * Must return the jobject, that is, the reference to the 1378 * entry in the HandleScope (nullptr if necessary). 1379 * 1380 */ 1381template<class T> class BuildNativeCallFrameStateMachine { 1382 public: 1383#if defined(__arm__) 1384 // TODO: These are all dummy values! 1385 static constexpr bool kNativeSoftFloatAbi = true; 1386 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1387 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1388 1389 static constexpr size_t kRegistersNeededForLong = 2; 1390 static constexpr size_t kRegistersNeededForDouble = 2; 1391 static constexpr bool kMultiRegistersAligned = true; 1392 static constexpr bool kMultiFPRegistersWidened = false; 1393 static constexpr bool kMultiGPRegistersWidened = false; 1394 static constexpr bool kAlignLongOnStack = true; 1395 static constexpr bool kAlignDoubleOnStack = true; 1396#elif defined(__aarch64__) 1397 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1398 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1399 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1400 1401 static constexpr size_t kRegistersNeededForLong = 1; 1402 static constexpr size_t kRegistersNeededForDouble = 1; 1403 static constexpr bool kMultiRegistersAligned = false; 1404 static constexpr bool kMultiFPRegistersWidened = false; 1405 static constexpr bool kMultiGPRegistersWidened = false; 1406 static constexpr bool kAlignLongOnStack = false; 1407 static constexpr bool kAlignDoubleOnStack = false; 1408#elif defined(__mips__) && !defined(__LP64__) 1409 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1410 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1411 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1412 1413 static constexpr size_t kRegistersNeededForLong = 2; 1414 static constexpr size_t kRegistersNeededForDouble = 2; 1415 static constexpr bool kMultiRegistersAligned = true; 1416 static constexpr bool kMultiFPRegistersWidened = true; 1417 static constexpr bool kMultiGPRegistersWidened = false; 1418 static constexpr bool kAlignLongOnStack = true; 1419 static constexpr bool kAlignDoubleOnStack = true; 1420#elif defined(__mips__) && defined(__LP64__) 1421 // Let the code prepare GPRs only and we will load the FPRs with same data. 1422 static constexpr bool kNativeSoftFloatAbi = true; 1423 static constexpr size_t kNumNativeGprArgs = 8; 1424 static constexpr size_t kNumNativeFprArgs = 0; 1425 1426 static constexpr size_t kRegistersNeededForLong = 1; 1427 static constexpr size_t kRegistersNeededForDouble = 1; 1428 static constexpr bool kMultiRegistersAligned = false; 1429 static constexpr bool kMultiFPRegistersWidened = false; 1430 static constexpr bool kMultiGPRegistersWidened = true; 1431 static constexpr bool kAlignLongOnStack = false; 1432 static constexpr bool kAlignDoubleOnStack = false; 1433#elif defined(__i386__) 1434 // TODO: Check these! 1435 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1436 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1437 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1438 1439 static constexpr size_t kRegistersNeededForLong = 2; 1440 static constexpr size_t kRegistersNeededForDouble = 2; 1441 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1442 static constexpr bool kMultiFPRegistersWidened = false; 1443 static constexpr bool kMultiGPRegistersWidened = false; 1444 static constexpr bool kAlignLongOnStack = false; 1445 static constexpr bool kAlignDoubleOnStack = false; 1446#elif defined(__x86_64__) 1447 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1448 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1449 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1450 1451 static constexpr size_t kRegistersNeededForLong = 1; 1452 static constexpr size_t kRegistersNeededForDouble = 1; 1453 static constexpr bool kMultiRegistersAligned = false; 1454 static constexpr bool kMultiFPRegistersWidened = false; 1455 static constexpr bool kMultiGPRegistersWidened = false; 1456 static constexpr bool kAlignLongOnStack = false; 1457 static constexpr bool kAlignDoubleOnStack = false; 1458#else 1459#error "Unsupported architecture" 1460#endif 1461 1462 public: 1463 explicit BuildNativeCallFrameStateMachine(T* delegate) 1464 : gpr_index_(kNumNativeGprArgs), 1465 fpr_index_(kNumNativeFprArgs), 1466 stack_entries_(0), 1467 delegate_(delegate) { 1468 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1469 // the next register is even; counting down is just to make the compiler happy... 1470 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1471 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1472 } 1473 1474 virtual ~BuildNativeCallFrameStateMachine() {} 1475 1476 bool HavePointerGpr() const { 1477 return gpr_index_ > 0; 1478 } 1479 1480 void AdvancePointer(const void* val) { 1481 if (HavePointerGpr()) { 1482 gpr_index_--; 1483 PushGpr(reinterpret_cast<uintptr_t>(val)); 1484 } else { 1485 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1486 PushStack(reinterpret_cast<uintptr_t>(val)); 1487 gpr_index_ = 0; 1488 } 1489 } 1490 1491 bool HaveHandleScopeGpr() const { 1492 return gpr_index_ > 0; 1493 } 1494 1495 void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) { 1496 uintptr_t handle = PushHandle(ptr); 1497 if (HaveHandleScopeGpr()) { 1498 gpr_index_--; 1499 PushGpr(handle); 1500 } else { 1501 stack_entries_++; 1502 PushStack(handle); 1503 gpr_index_ = 0; 1504 } 1505 } 1506 1507 bool HaveIntGpr() const { 1508 return gpr_index_ > 0; 1509 } 1510 1511 void AdvanceInt(uint32_t val) { 1512 if (HaveIntGpr()) { 1513 gpr_index_--; 1514 if (kMultiGPRegistersWidened) { 1515 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1516 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1517 } else { 1518 PushGpr(val); 1519 } 1520 } else { 1521 stack_entries_++; 1522 if (kMultiGPRegistersWidened) { 1523 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1524 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1525 } else { 1526 PushStack(val); 1527 } 1528 gpr_index_ = 0; 1529 } 1530 } 1531 1532 bool HaveLongGpr() const { 1533 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1534 } 1535 1536 bool LongGprNeedsPadding() const { 1537 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1538 kAlignLongOnStack && // and when it needs alignment 1539 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1540 } 1541 1542 bool LongStackNeedsPadding() const { 1543 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1544 kAlignLongOnStack && // and when it needs 8B alignment 1545 (stack_entries_ & 1) == 1; // counter is odd 1546 } 1547 1548 void AdvanceLong(uint64_t val) { 1549 if (HaveLongGpr()) { 1550 if (LongGprNeedsPadding()) { 1551 PushGpr(0); 1552 gpr_index_--; 1553 } 1554 if (kRegistersNeededForLong == 1) { 1555 PushGpr(static_cast<uintptr_t>(val)); 1556 } else { 1557 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1558 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1559 } 1560 gpr_index_ -= kRegistersNeededForLong; 1561 } else { 1562 if (LongStackNeedsPadding()) { 1563 PushStack(0); 1564 stack_entries_++; 1565 } 1566 if (kRegistersNeededForLong == 1) { 1567 PushStack(static_cast<uintptr_t>(val)); 1568 stack_entries_++; 1569 } else { 1570 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1571 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1572 stack_entries_ += 2; 1573 } 1574 gpr_index_ = 0; 1575 } 1576 } 1577 1578 bool HaveFloatFpr() const { 1579 return fpr_index_ > 0; 1580 } 1581 1582 void AdvanceFloat(float val) { 1583 if (kNativeSoftFloatAbi) { 1584 AdvanceInt(bit_cast<uint32_t, float>(val)); 1585 } else { 1586 if (HaveFloatFpr()) { 1587 fpr_index_--; 1588 if (kRegistersNeededForDouble == 1) { 1589 if (kMultiFPRegistersWidened) { 1590 PushFpr8(bit_cast<uint64_t, double>(val)); 1591 } else { 1592 // No widening, just use the bits. 1593 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1594 } 1595 } else { 1596 PushFpr4(val); 1597 } 1598 } else { 1599 stack_entries_++; 1600 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1601 // Need to widen before storing: Note the "double" in the template instantiation. 1602 // Note: We need to jump through those hoops to make the compiler happy. 1603 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1604 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1605 } else { 1606 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1607 } 1608 fpr_index_ = 0; 1609 } 1610 } 1611 } 1612 1613 bool HaveDoubleFpr() const { 1614 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1615 } 1616 1617 bool DoubleFprNeedsPadding() const { 1618 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1619 kAlignDoubleOnStack && // and when it needs alignment 1620 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1621 } 1622 1623 bool DoubleStackNeedsPadding() const { 1624 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1625 kAlignDoubleOnStack && // and when it needs 8B alignment 1626 (stack_entries_ & 1) == 1; // counter is odd 1627 } 1628 1629 void AdvanceDouble(uint64_t val) { 1630 if (kNativeSoftFloatAbi) { 1631 AdvanceLong(val); 1632 } else { 1633 if (HaveDoubleFpr()) { 1634 if (DoubleFprNeedsPadding()) { 1635 PushFpr4(0); 1636 fpr_index_--; 1637 } 1638 PushFpr8(val); 1639 fpr_index_ -= kRegistersNeededForDouble; 1640 } else { 1641 if (DoubleStackNeedsPadding()) { 1642 PushStack(0); 1643 stack_entries_++; 1644 } 1645 if (kRegistersNeededForDouble == 1) { 1646 PushStack(static_cast<uintptr_t>(val)); 1647 stack_entries_++; 1648 } else { 1649 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1650 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1651 stack_entries_ += 2; 1652 } 1653 fpr_index_ = 0; 1654 } 1655 } 1656 } 1657 1658 uint32_t GetStackEntries() const { 1659 return stack_entries_; 1660 } 1661 1662 uint32_t GetNumberOfUsedGprs() const { 1663 return kNumNativeGprArgs - gpr_index_; 1664 } 1665 1666 uint32_t GetNumberOfUsedFprs() const { 1667 return kNumNativeFprArgs - fpr_index_; 1668 } 1669 1670 private: 1671 void PushGpr(uintptr_t val) { 1672 delegate_->PushGpr(val); 1673 } 1674 void PushFpr4(float val) { 1675 delegate_->PushFpr4(val); 1676 } 1677 void PushFpr8(uint64_t val) { 1678 delegate_->PushFpr8(val); 1679 } 1680 void PushStack(uintptr_t val) { 1681 delegate_->PushStack(val); 1682 } 1683 uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { 1684 return delegate_->PushHandle(ref); 1685 } 1686 1687 uint32_t gpr_index_; // Number of free GPRs 1688 uint32_t fpr_index_; // Number of free FPRs 1689 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1690 // extended 1691 T* const delegate_; // What Push implementation gets called 1692}; 1693 1694// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1695// in subclasses. 1696// 1697// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1698// them with handles. 1699class ComputeNativeCallFrameSize { 1700 public: 1701 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1702 1703 virtual ~ComputeNativeCallFrameSize() {} 1704 1705 uint32_t GetStackSize() const { 1706 return num_stack_entries_ * sizeof(uintptr_t); 1707 } 1708 1709 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1710 sp8 -= GetStackSize(); 1711 // Align by kStackAlignment. 1712 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1713 return sp8; 1714 } 1715 1716 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1717 const { 1718 // Assumption is OK right now, as we have soft-float arm 1719 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1720 sp8 -= fregs * sizeof(uintptr_t); 1721 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1722 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1723 sp8 -= iregs * sizeof(uintptr_t); 1724 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1725 return sp8; 1726 } 1727 1728 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1729 uint32_t** start_fpr) const { 1730 // Native call stack. 1731 sp8 = LayoutCallStack(sp8); 1732 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1733 1734 // Put fprs and gprs below. 1735 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1736 1737 // Return the new bottom. 1738 return sp8; 1739 } 1740 1741 virtual void WalkHeader( 1742 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED) 1743 REQUIRES_SHARED(Locks::mutator_lock_) { 1744 } 1745 1746 void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) { 1747 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1748 1749 WalkHeader(&sm); 1750 1751 for (uint32_t i = 1; i < shorty_len; ++i) { 1752 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1753 switch (cur_type_) { 1754 case Primitive::kPrimNot: 1755 // TODO: fix abuse of mirror types. 1756 sm.AdvanceHandleScope( 1757 reinterpret_cast<mirror::Object*>(0x12345678)); 1758 break; 1759 1760 case Primitive::kPrimBoolean: 1761 case Primitive::kPrimByte: 1762 case Primitive::kPrimChar: 1763 case Primitive::kPrimShort: 1764 case Primitive::kPrimInt: 1765 sm.AdvanceInt(0); 1766 break; 1767 case Primitive::kPrimFloat: 1768 sm.AdvanceFloat(0); 1769 break; 1770 case Primitive::kPrimDouble: 1771 sm.AdvanceDouble(0); 1772 break; 1773 case Primitive::kPrimLong: 1774 sm.AdvanceLong(0); 1775 break; 1776 default: 1777 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1778 UNREACHABLE(); 1779 } 1780 } 1781 1782 num_stack_entries_ = sm.GetStackEntries(); 1783 } 1784 1785 void PushGpr(uintptr_t /* val */) { 1786 // not optimizing registers, yet 1787 } 1788 1789 void PushFpr4(float /* val */) { 1790 // not optimizing registers, yet 1791 } 1792 1793 void PushFpr8(uint64_t /* val */) { 1794 // not optimizing registers, yet 1795 } 1796 1797 void PushStack(uintptr_t /* val */) { 1798 // counting is already done in the superclass 1799 } 1800 1801 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1802 return reinterpret_cast<uintptr_t>(nullptr); 1803 } 1804 1805 protected: 1806 uint32_t num_stack_entries_; 1807}; 1808 1809class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1810 public: 1811 explicit ComputeGenericJniFrameSize(bool critical_native) 1812 : num_handle_scope_references_(0), critical_native_(critical_native) {} 1813 1814 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1815 // is at *m = sp. Will update to point to the bottom of the save frame. 1816 // 1817 // Note: assumes ComputeAll() has been run before. 1818 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1819 REQUIRES_SHARED(Locks::mutator_lock_) { 1820 ArtMethod* method = **m; 1821 1822 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 1823 1824 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1825 1826 // First, fix up the layout of the callee-save frame. 1827 // We have to squeeze in the HandleScope, and relocate the method pointer. 1828 1829 // "Free" the slot for the method. 1830 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 1831 1832 // Under the callee saves put handle scope and new method stack reference. 1833 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1834 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 1835 1836 sp8 -= scope_and_method; 1837 // Align by kStackAlignment. 1838 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1839 1840 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 1841 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 1842 num_handle_scope_references_); 1843 1844 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1845 uint8_t* method_pointer = sp8; 1846 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 1847 *new_method_ref = method; 1848 *m = new_method_ref; 1849 } 1850 1851 // Adds space for the cookie. Note: may leave stack unaligned. 1852 void LayoutCookie(uint8_t** sp) const { 1853 // Reference cookie and padding 1854 *sp -= 8; 1855 } 1856 1857 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1858 // Returns the new bottom. Note: this may be unaligned. 1859 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1860 REQUIRES_SHARED(Locks::mutator_lock_) { 1861 // First, fix up the layout of the callee-save frame. 1862 // We have to squeeze in the HandleScope, and relocate the method pointer. 1863 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 1864 1865 // The bottom of the callee-save frame is now where the method is, *m. 1866 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1867 1868 // Add space for cookie. 1869 LayoutCookie(&sp8); 1870 1871 return sp8; 1872 } 1873 1874 // WARNING: After this, *sp won't be pointing to the method anymore! 1875 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 1876 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 1877 uint32_t** start_fpr) 1878 REQUIRES_SHARED(Locks::mutator_lock_) { 1879 Walk(shorty, shorty_len); 1880 1881 // JNI part. 1882 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 1883 1884 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1885 1886 // Return the new bottom. 1887 return sp8; 1888 } 1889 1890 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1891 1892 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1893 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1894 REQUIRES_SHARED(Locks::mutator_lock_); 1895 1896 private: 1897 uint32_t num_handle_scope_references_; 1898 const bool critical_native_; 1899}; 1900 1901uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1902 num_handle_scope_references_++; 1903 return reinterpret_cast<uintptr_t>(nullptr); 1904} 1905 1906void ComputeGenericJniFrameSize::WalkHeader( 1907 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1908 // First 2 parameters are always excluded for @CriticalNative. 1909 if (UNLIKELY(critical_native_)) { 1910 return; 1911 } 1912 1913 // JNIEnv 1914 sm->AdvancePointer(nullptr); 1915 1916 // Class object or this as first argument 1917 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1918} 1919 1920// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1921// the template requirements of BuildGenericJniFrameStateMachine. 1922class FillNativeCall { 1923 public: 1924 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1925 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1926 1927 virtual ~FillNativeCall() {} 1928 1929 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1930 cur_gpr_reg_ = gpr_regs; 1931 cur_fpr_reg_ = fpr_regs; 1932 cur_stack_arg_ = stack_args; 1933 } 1934 1935 void PushGpr(uintptr_t val) { 1936 *cur_gpr_reg_ = val; 1937 cur_gpr_reg_++; 1938 } 1939 1940 void PushFpr4(float val) { 1941 *cur_fpr_reg_ = val; 1942 cur_fpr_reg_++; 1943 } 1944 1945 void PushFpr8(uint64_t val) { 1946 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1947 *tmp = val; 1948 cur_fpr_reg_ += 2; 1949 } 1950 1951 void PushStack(uintptr_t val) { 1952 *cur_stack_arg_ = val; 1953 cur_stack_arg_++; 1954 } 1955 1956 virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) { 1957 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1958 UNREACHABLE(); 1959 } 1960 1961 private: 1962 uintptr_t* cur_gpr_reg_; 1963 uint32_t* cur_fpr_reg_; 1964 uintptr_t* cur_stack_arg_; 1965}; 1966 1967// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1968// of transitioning into native code. 1969class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1970 public: 1971 BuildGenericJniFrameVisitor(Thread* self, 1972 bool is_static, 1973 bool critical_native, 1974 const char* shorty, 1975 uint32_t shorty_len, 1976 ArtMethod*** sp) 1977 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1978 jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native), 1979 sm_(&jni_call_) { 1980 ComputeGenericJniFrameSize fsc(critical_native); 1981 uintptr_t* start_gpr_reg; 1982 uint32_t* start_fpr_reg; 1983 uintptr_t* start_stack_arg; 1984 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 1985 &handle_scope_, 1986 &start_stack_arg, 1987 &start_gpr_reg, &start_fpr_reg); 1988 1989 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1990 1991 // First 2 parameters are always excluded for CriticalNative methods. 1992 if (LIKELY(!critical_native)) { 1993 // jni environment is always first argument 1994 sm_.AdvancePointer(self->GetJniEnv()); 1995 1996 if (is_static) { 1997 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 1998 } // else "this" reference is already handled by QuickArgumentVisitor. 1999 } 2000 } 2001 2002 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 2003 2004 void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 2005 2006 StackReference<mirror::Object>* GetFirstHandleScopeEntry() { 2007 return handle_scope_->GetHandle(0).GetReference(); 2008 } 2009 2010 jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) { 2011 return handle_scope_->GetHandle(0).ToJObject(); 2012 } 2013 2014 void* GetBottomOfUsedArea() const { 2015 return bottom_of_used_area_; 2016 } 2017 2018 private: 2019 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 2020 class FillJniCall FINAL : public FillNativeCall { 2021 public: 2022 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 2023 HandleScope* handle_scope, bool critical_native) 2024 : FillNativeCall(gpr_regs, fpr_regs, stack_args), 2025 handle_scope_(handle_scope), 2026 cur_entry_(0), 2027 critical_native_(critical_native) {} 2028 2029 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); 2030 2031 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 2032 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 2033 handle_scope_ = scope; 2034 cur_entry_ = 0U; 2035 } 2036 2037 void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) { 2038 // Initialize padding entries. 2039 size_t expected_slots = handle_scope_->NumberOfReferences(); 2040 while (cur_entry_ < expected_slots) { 2041 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 2042 } 2043 2044 if (!critical_native_) { 2045 // Non-critical natives have at least the self class (jclass) or this (jobject). 2046 DCHECK_NE(cur_entry_, 0U); 2047 } 2048 } 2049 2050 bool CriticalNative() const { 2051 return critical_native_; 2052 } 2053 2054 private: 2055 HandleScope* handle_scope_; 2056 size_t cur_entry_; 2057 const bool critical_native_; 2058 }; 2059 2060 HandleScope* handle_scope_; 2061 FillJniCall jni_call_; 2062 void* bottom_of_used_area_; 2063 2064 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 2065 2066 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 2067}; 2068 2069uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 2070 uintptr_t tmp; 2071 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 2072 h.Assign(ref); 2073 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 2074 cur_entry_++; 2075 return tmp; 2076} 2077 2078void BuildGenericJniFrameVisitor::Visit() { 2079 Primitive::Type type = GetParamPrimitiveType(); 2080 switch (type) { 2081 case Primitive::kPrimLong: { 2082 jlong long_arg; 2083 if (IsSplitLongOrDouble()) { 2084 long_arg = ReadSplitLongParam(); 2085 } else { 2086 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 2087 } 2088 sm_.AdvanceLong(long_arg); 2089 break; 2090 } 2091 case Primitive::kPrimDouble: { 2092 uint64_t double_arg; 2093 if (IsSplitLongOrDouble()) { 2094 // Read into union so that we don't case to a double. 2095 double_arg = ReadSplitLongParam(); 2096 } else { 2097 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 2098 } 2099 sm_.AdvanceDouble(double_arg); 2100 break; 2101 } 2102 case Primitive::kPrimNot: { 2103 StackReference<mirror::Object>* stack_ref = 2104 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 2105 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 2106 break; 2107 } 2108 case Primitive::kPrimFloat: 2109 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 2110 break; 2111 case Primitive::kPrimBoolean: // Fall-through. 2112 case Primitive::kPrimByte: // Fall-through. 2113 case Primitive::kPrimChar: // Fall-through. 2114 case Primitive::kPrimShort: // Fall-through. 2115 case Primitive::kPrimInt: // Fall-through. 2116 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 2117 break; 2118 case Primitive::kPrimVoid: 2119 LOG(FATAL) << "UNREACHABLE"; 2120 UNREACHABLE(); 2121 } 2122} 2123 2124void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 2125 // Clear out rest of the scope. 2126 jni_call_.ResetRemainingScopeSlots(); 2127 if (!jni_call_.CriticalNative()) { 2128 // Install HandleScope. 2129 self->PushHandleScope(handle_scope_); 2130 } 2131} 2132 2133#if defined(__arm__) || defined(__aarch64__) 2134extern "C" const void* artFindNativeMethod(); 2135#else 2136extern "C" const void* artFindNativeMethod(Thread* self); 2137#endif 2138 2139static uint64_t artQuickGenericJniEndJNIRef(Thread* self, 2140 uint32_t cookie, 2141 bool fast_native ATTRIBUTE_UNUSED, 2142 jobject l, 2143 jobject lock) { 2144 // TODO: add entrypoints for @FastNative returning objects. 2145 if (lock != nullptr) { 2146 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 2147 } else { 2148 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 2149 } 2150} 2151 2152static void artQuickGenericJniEndJNINonRef(Thread* self, 2153 uint32_t cookie, 2154 bool fast_native, 2155 jobject lock) { 2156 if (lock != nullptr) { 2157 JniMethodEndSynchronized(cookie, lock, self); 2158 // Ignore "fast_native" here because synchronized functions aren't very fast. 2159 } else { 2160 if (UNLIKELY(fast_native)) { 2161 JniMethodFastEnd(cookie, self); 2162 } else { 2163 JniMethodEnd(cookie, self); 2164 } 2165 } 2166} 2167 2168/* 2169 * Initializes an alloca region assumed to be directly below sp for a native call: 2170 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 2171 * The final element on the stack is a pointer to the native code. 2172 * 2173 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 2174 * We need to fix this, as the handle scope needs to go into the callee-save frame. 2175 * 2176 * The return of this function denotes: 2177 * 1) How many bytes of the alloca can be released, if the value is non-negative. 2178 * 2) An error, if the value is negative. 2179 */ 2180extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 2181 REQUIRES_SHARED(Locks::mutator_lock_) { 2182 ArtMethod* called = *sp; 2183 DCHECK(called->IsNative()) << called->PrettyMethod(true); 2184 // Fix up a callee-save frame at the bottom of the stack (at `*sp`, 2185 // above the alloca region) while we check for optimization 2186 // annotations, thus allowing stack walking until the completion of 2187 // the JNI frame creation. 2188 // 2189 // Note however that the Generic JNI trampoline does not expect 2190 // exception being thrown at that stage. 2191 *sp = Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs); 2192 self->SetTopOfStack(sp); 2193 uint32_t shorty_len = 0; 2194 const char* shorty = called->GetShorty(&shorty_len); 2195 bool critical_native = called->IsAnnotatedWithCriticalNative(); 2196 // ArtMethod::IsAnnotatedWithCriticalNative should not throw 2197 // an exception; clear it if it happened anyway. 2198 // TODO: Revisit this code path and turn this into a CHECK(!self->IsExceptionPending()). 2199 if (self->IsExceptionPending()) { 2200 self->ClearException(); 2201 } 2202 bool fast_native = called->IsAnnotatedWithFastNative(); 2203 // ArtMethod::IsAnnotatedWithFastNative should not throw 2204 // an exception; clear it if it happened anyway. 2205 // TODO: Revisit this code path and turn this into a CHECK(!self->IsExceptionPending()). 2206 if (self->IsExceptionPending()) { 2207 self->ClearException(); 2208 } 2209 bool normal_native = !critical_native && !fast_native; 2210 // Restore the initial ArtMethod pointer at `*sp`. 2211 *sp = called; 2212 2213 // Run the visitor and update sp. 2214 BuildGenericJniFrameVisitor visitor(self, 2215 called->IsStatic(), 2216 critical_native, 2217 shorty, 2218 shorty_len, 2219 &sp); 2220 { 2221 ScopedAssertNoThreadSuspension sants(__FUNCTION__); 2222 visitor.VisitArguments(); 2223 // FinalizeHandleScope pushes the handle scope on the thread. 2224 visitor.FinalizeHandleScope(self); 2225 } 2226 2227 // Fix up managed-stack things in Thread. 2228 self->SetTopOfStack(sp); 2229 2230 self->VerifyStack(); 2231 2232 uint32_t cookie; 2233 uint32_t* sp32; 2234 // Skip calling JniMethodStart for @CriticalNative. 2235 if (LIKELY(!critical_native)) { 2236 // Start JNI, save the cookie. 2237 if (called->IsSynchronized()) { 2238 DCHECK(normal_native) << " @FastNative and synchronize is not supported"; 2239 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 2240 if (self->IsExceptionPending()) { 2241 self->PopHandleScope(); 2242 // A negative value denotes an error. 2243 return GetTwoWordFailureValue(); 2244 } 2245 } else { 2246 if (fast_native) { 2247 cookie = JniMethodFastStart(self); 2248 } else { 2249 DCHECK(normal_native); 2250 cookie = JniMethodStart(self); 2251 } 2252 } 2253 sp32 = reinterpret_cast<uint32_t*>(sp); 2254 *(sp32 - 1) = cookie; 2255 } 2256 2257 // Retrieve the stored native code. 2258 void const* nativeCode = called->GetEntryPointFromJni(); 2259 2260 // There are two cases for the content of nativeCode: 2261 // 1) Pointer to the native function. 2262 // 2) Pointer to the trampoline for native code binding. 2263 // In the second case, we need to execute the binding and continue with the actual native function 2264 // pointer. 2265 DCHECK(nativeCode != nullptr); 2266 if (nativeCode == GetJniDlsymLookupStub()) { 2267#if defined(__arm__) || defined(__aarch64__) 2268 nativeCode = artFindNativeMethod(); 2269#else 2270 nativeCode = artFindNativeMethod(self); 2271#endif 2272 2273 if (nativeCode == nullptr) { 2274 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 2275 2276 // @CriticalNative calls do not need to call back into JniMethodEnd. 2277 if (LIKELY(!critical_native)) { 2278 // End JNI, as the assembly will move to deliver the exception. 2279 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 2280 if (shorty[0] == 'L') { 2281 artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock); 2282 } else { 2283 artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock); 2284 } 2285 } 2286 2287 return GetTwoWordFailureValue(); 2288 } 2289 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 2290 } 2291 2292#if defined(__mips__) && !defined(__LP64__) 2293 // On MIPS32 if the first two arguments are floating-point, we need to know their types 2294 // so that art_quick_generic_jni_trampoline can correctly extract them from the stack 2295 // and load into floating-point registers. 2296 // Possible arrangements of first two floating-point arguments on the stack (32-bit FPU 2297 // view): 2298 // (1) 2299 // | DOUBLE | DOUBLE | other args, if any 2300 // | F12 | F13 | F14 | F15 | 2301 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2302 // (2) 2303 // | DOUBLE | FLOAT | (PAD) | other args, if any 2304 // | F12 | F13 | F14 | | 2305 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2306 // (3) 2307 // | FLOAT | (PAD) | DOUBLE | other args, if any 2308 // | F12 | | F14 | F15 | 2309 // | SP+0 | SP+4 | SP+8 | SP+12 | SP+16 2310 // (4) 2311 // | FLOAT | FLOAT | other args, if any 2312 // | F12 | F14 | 2313 // | SP+0 | SP+4 | SP+8 2314 // As you can see, only the last case (4) is special. In all others we can just 2315 // load F12/F13 and F14/F15 in the same manner. 2316 // Set bit 0 of the native code address to 1 in this case (valid code addresses 2317 // are always a multiple of 4 on MIPS32, so we have 2 spare bits available). 2318 if (nativeCode != nullptr && 2319 shorty != nullptr && 2320 shorty_len >= 3 && 2321 shorty[1] == 'F' && 2322 shorty[2] == 'F') { 2323 nativeCode = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(nativeCode) | 1); 2324 } 2325#endif 2326 2327 // Return native code addr(lo) and bottom of alloca address(hi). 2328 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 2329 reinterpret_cast<uintptr_t>(nativeCode)); 2330} 2331 2332// Defined in quick_jni_entrypoints.cc. 2333extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie, 2334 jvalue result, uint64_t result_f, ArtMethod* called, 2335 HandleScope* handle_scope); 2336/* 2337 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 2338 * unlocking. 2339 */ 2340extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, 2341 jvalue result, 2342 uint64_t result_f) { 2343 // We're here just back from a native call. We don't have the shared mutator lock at this point 2344 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing 2345 // anything that requires a mutator lock before that would cause problems as GC may have the 2346 // exclusive mutator lock and may be moving objects, etc. 2347 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 2348 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 2349 ArtMethod* called = *sp; 2350 uint32_t cookie = *(sp32 - 1); 2351 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp)); 2352 return GenericJniMethodEnd(self, cookie, result, result_f, called, table); 2353} 2354 2355// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 2356// for the method pointer. 2357// 2358// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 2359// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations). 2360 2361template<InvokeType type, bool access_check> 2362static TwoWordReturn artInvokeCommon(uint32_t method_idx, 2363 ObjPtr<mirror::Object> this_object, 2364 Thread* self, 2365 ArtMethod** sp) { 2366 ScopedQuickEntrypointChecks sqec(self); 2367 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2368 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2369 ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); 2370 if (UNLIKELY(method == nullptr)) { 2371 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 2372 uint32_t shorty_len; 2373 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 2374 { 2375 // Remember the args in case a GC happens in FindMethodFromCode. 2376 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2377 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 2378 visitor.VisitArguments(); 2379 method = FindMethodFromCode<type, access_check>(method_idx, 2380 &this_object, 2381 caller_method, 2382 self); 2383 visitor.FixupReferences(); 2384 } 2385 2386 if (UNLIKELY(method == nullptr)) { 2387 CHECK(self->IsExceptionPending()); 2388 return GetTwoWordFailureValue(); // Failure. 2389 } 2390 } 2391 DCHECK(!self->IsExceptionPending()); 2392 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2393 2394 // When we return, the caller will branch to this address, so it had better not be 0! 2395 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2396 << " location: " 2397 << method->GetDexFile()->GetLocation(); 2398 2399 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2400 reinterpret_cast<uintptr_t>(method)); 2401} 2402 2403// Explicit artInvokeCommon template function declarations to please analysis tool. 2404#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2405 template REQUIRES_SHARED(Locks::mutator_lock_) \ 2406 TwoWordReturn artInvokeCommon<type, access_check>( \ 2407 uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp) 2408 2409EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2410EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2411EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2412EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2413EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2414EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2415EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2416EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2417EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2418EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2419#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2420 2421// See comments in runtime_support_asm.S 2422extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2423 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2424 REQUIRES_SHARED(Locks::mutator_lock_) { 2425 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2426} 2427 2428extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2429 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2430 REQUIRES_SHARED(Locks::mutator_lock_) { 2431 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2432} 2433 2434extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2435 uint32_t method_idx, 2436 mirror::Object* this_object ATTRIBUTE_UNUSED, 2437 Thread* self, 2438 ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 2439 // For static, this_object is not required and may be random garbage. Don't pass it down so that 2440 // it doesn't cause ObjPtr alignment failure check. 2441 return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp); 2442} 2443 2444extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2445 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2446 REQUIRES_SHARED(Locks::mutator_lock_) { 2447 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2448} 2449 2450extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2451 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2452 REQUIRES_SHARED(Locks::mutator_lock_) { 2453 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2454} 2455 2456// Determine target of interface dispatch. The interface method and this object are known non-null. 2457// The interface method is the method returned by the dex cache in the conflict trampoline. 2458extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method, 2459 mirror::Object* raw_this_object, 2460 Thread* self, 2461 ArtMethod** sp) 2462 REQUIRES_SHARED(Locks::mutator_lock_) { 2463 CHECK(interface_method != nullptr); 2464 ObjPtr<mirror::Object> this_object(raw_this_object); 2465 ScopedQuickEntrypointChecks sqec(self); 2466 StackHandleScope<1> hs(self); 2467 Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass())); 2468 2469 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2470 ArtMethod* method = nullptr; 2471 ImTable* imt = cls->GetImt(kRuntimePointerSize); 2472 2473 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 2474 // If the interface method is already resolved, look whether we have a match in the 2475 // ImtConflictTable. 2476 ArtMethod* conflict_method = imt->Get(ImTable::GetImtIndex(interface_method), 2477 kRuntimePointerSize); 2478 if (LIKELY(conflict_method->IsRuntimeMethod())) { 2479 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize); 2480 DCHECK(current_table != nullptr); 2481 method = current_table->Lookup(interface_method, kRuntimePointerSize); 2482 } else { 2483 // It seems we aren't really a conflict method! 2484 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2485 } 2486 if (method != nullptr) { 2487 return GetTwoWordSuccessValue( 2488 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()), 2489 reinterpret_cast<uintptr_t>(method)); 2490 } 2491 2492 // No match, use the IfTable. 2493 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2494 if (UNLIKELY(method == nullptr)) { 2495 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2496 interface_method, this_object, caller_method); 2497 return GetTwoWordFailureValue(); // Failure. 2498 } 2499 } else { 2500 // The interface method is unresolved, so look it up in the dex file of the caller. 2501 DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod()); 2502 2503 // Fetch the dex_method_idx of the target interface method from the caller. 2504 uint32_t dex_method_idx; 2505 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2506 const DexFile::CodeItem* code_item = caller_method->GetCodeItem(); 2507 DCHECK_LT(dex_pc, code_item->insns_size_in_code_units_); 2508 const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]); 2509 Instruction::Code instr_code = instr->Opcode(); 2510 DCHECK(instr_code == Instruction::INVOKE_INTERFACE || 2511 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2512 << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr); 2513 if (instr_code == Instruction::INVOKE_INTERFACE) { 2514 dex_method_idx = instr->VRegB_35c(); 2515 } else { 2516 DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2517 dex_method_idx = instr->VRegB_3rc(); 2518 } 2519 2520 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache() 2521 ->GetDexFile(); 2522 uint32_t shorty_len; 2523 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), 2524 &shorty_len); 2525 { 2526 // Remember the args in case a GC happens in FindMethodFromCode. 2527 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2528 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2529 visitor.VisitArguments(); 2530 method = FindMethodFromCode<kInterface, false>(dex_method_idx, 2531 &this_object, 2532 caller_method, 2533 self); 2534 visitor.FixupReferences(); 2535 } 2536 2537 if (UNLIKELY(method == nullptr)) { 2538 CHECK(self->IsExceptionPending()); 2539 return GetTwoWordFailureValue(); // Failure. 2540 } 2541 interface_method = 2542 caller_method->GetDexCacheResolvedMethod(dex_method_idx, kRuntimePointerSize); 2543 DCHECK(!interface_method->IsRuntimeMethod()); 2544 } 2545 2546 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. 2547 // We create a new table with the new pair { interface_method, method }. 2548 uint32_t imt_index = ImTable::GetImtIndex(interface_method); 2549 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize); 2550 if (conflict_method->IsRuntimeMethod()) { 2551 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( 2552 cls.Get(), 2553 conflict_method, 2554 interface_method, 2555 method, 2556 /*force_new_conflict_method*/false); 2557 if (new_conflict_method != conflict_method) { 2558 // Update the IMT if we create a new conflict method. No fence needed here, as the 2559 // data is consistent. 2560 imt->Set(imt_index, 2561 new_conflict_method, 2562 kRuntimePointerSize); 2563 } 2564 } 2565 2566 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2567 2568 // When we return, the caller will branch to this address, so it had better not be 0! 2569 DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod() 2570 << " location: " << method->GetDexFile()->GetLocation(); 2571 2572 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2573 reinterpret_cast<uintptr_t>(method)); 2574} 2575 2576// Returns shorty type so the caller can determine how to put |result| 2577// into expected registers. The shorty type is static so the compiler 2578// could call different flavors of this code path depending on the 2579// shorty type though this would require different entry points for 2580// each type. 2581extern "C" uintptr_t artInvokePolymorphic( 2582 JValue* result, 2583 mirror::Object* raw_method_handle, 2584 Thread* self, 2585 ArtMethod** sp) 2586 REQUIRES_SHARED(Locks::mutator_lock_) { 2587 ScopedQuickEntrypointChecks sqec(self); 2588 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs)); 2589 2590 // Start new JNI local reference state 2591 JNIEnvExt* env = self->GetJniEnv(); 2592 ScopedObjectAccessUnchecked soa(env); 2593 ScopedJniEnvLocalRefState env_state(env); 2594 const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe."); 2595 2596 // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC. 2597 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2598 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2599 const DexFile::CodeItem* code = caller_method->GetCodeItem(); 2600 const Instruction* inst = Instruction::At(&code->insns_[dex_pc]); 2601 DCHECK(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC || 2602 inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2603 const DexFile* dex_file = caller_method->GetDexFile(); 2604 const uint32_t proto_idx = inst->VRegH(); 2605 const char* shorty = dex_file->GetShorty(proto_idx); 2606 const size_t shorty_length = strlen(shorty); 2607 static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static. 2608 RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa); 2609 gc_visitor.VisitArguments(); 2610 2611 // Wrap raw_method_handle in a Handle for safety. 2612 StackHandleScope<5> hs(self); 2613 Handle<mirror::MethodHandle> method_handle( 2614 hs.NewHandle(ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(raw_method_handle)))); 2615 raw_method_handle = nullptr; 2616 self->EndAssertNoThreadSuspension(old_cause); 2617 2618 // Resolve method - it's either MethodHandle.invoke() or MethodHandle.invokeExact(). 2619 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 2620 ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::kForceICCECheck>(self, 2621 inst->VRegB(), 2622 caller_method, 2623 kVirtual); 2624 DCHECK((resolved_method == 2625 jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) || 2626 (resolved_method == 2627 jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invoke))); 2628 if (UNLIKELY(method_handle.IsNull())) { 2629 ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual); 2630 return static_cast<uintptr_t>('V'); 2631 } 2632 2633 Handle<mirror::Class> caller_class(hs.NewHandle(caller_method->GetDeclaringClass())); 2634 Handle<mirror::MethodType> method_type(hs.NewHandle(linker->ResolveMethodType( 2635 *dex_file, proto_idx, 2636 hs.NewHandle<mirror::DexCache>(caller_class->GetDexCache()), 2637 hs.NewHandle<mirror::ClassLoader>(caller_class->GetClassLoader())))); 2638 // This implies we couldn't resolve one or more types in this method handle. 2639 if (UNLIKELY(method_type.IsNull())) { 2640 CHECK(self->IsExceptionPending()); 2641 return static_cast<uintptr_t>('V'); 2642 } 2643 2644 DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst->VRegA()); 2645 DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic); 2646 2647 // Fix references before constructing the shadow frame. 2648 gc_visitor.FixupReferences(); 2649 2650 // Construct shadow frame placing arguments consecutively from |first_arg|. 2651 const bool is_range = (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE); 2652 const size_t num_vregs = is_range ? inst->VRegA_4rcc() : inst->VRegA_45cc(); 2653 const size_t first_arg = 0; 2654 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 2655 CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc); 2656 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 2657 ScopedStackedShadowFramePusher 2658 frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction); 2659 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, 2660 kMethodIsStatic, 2661 shorty, 2662 strlen(shorty), 2663 shadow_frame, 2664 first_arg); 2665 shadow_frame_builder.VisitArguments(); 2666 2667 // Push a transition back into managed code onto the linked list in thread. 2668 ManagedStack fragment; 2669 self->PushManagedStackFragment(&fragment); 2670 2671 // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in 2672 // consecutive order. 2673 uint32_t unused_args[Instruction::kMaxVarArgRegs] = {}; 2674 uint32_t first_callee_arg = first_arg + 1; 2675 if (!DoInvokePolymorphic<true /* is_range */>(self, 2676 resolved_method, 2677 *shadow_frame, 2678 method_handle, 2679 method_type, 2680 unused_args, 2681 first_callee_arg, 2682 result)) { 2683 DCHECK(self->IsExceptionPending()); 2684 } 2685 2686 // Pop transition record. 2687 self->PopManagedStackFragment(fragment); 2688 2689 return static_cast<uintptr_t>(shorty[0]); 2690} 2691 2692} // namespace art 2693