quick_trampoline_entrypoints.cc revision badee9820fcf5dca5f8c46c3215ae1779ee7736e
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "art_method-inl.h" 18#include "callee_save_frame.h" 19#include "common_throws.h" 20#include "dex_file-inl.h" 21#include "dex_instruction-inl.h" 22#include "entrypoints/entrypoint_utils-inl.h" 23#include "entrypoints/runtime_asm_entrypoints.h" 24#include "gc/accounting/card_table-inl.h" 25#include "interpreter/interpreter.h" 26#include "linear_alloc.h" 27#include "method_reference.h" 28#include "mirror/class-inl.h" 29#include "mirror/dex_cache-inl.h" 30#include "mirror/method.h" 31#include "mirror/object-inl.h" 32#include "mirror/object_array-inl.h" 33#include "oat_quick_method_header.h" 34#include "quick_exception_handler.h" 35#include "runtime.h" 36#include "scoped_thread_state_change.h" 37#include "stack.h" 38#include "debugger.h" 39 40namespace art { 41 42// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 43class QuickArgumentVisitor { 44 // Number of bytes for each out register in the caller method's frame. 45 static constexpr size_t kBytesStackArgLocation = 4; 46 // Frame size in bytes of a callee-save frame for RefsAndArgs. 47 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 48 GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); 49#if defined(__arm__) 50 // The callee save frame is pointed to by SP. 51 // | argN | | 52 // | ... | | 53 // | arg4 | | 54 // | arg3 spill | | Caller's frame 55 // | arg2 spill | | 56 // | arg1 spill | | 57 // | Method* | --- 58 // | LR | 59 // | ... | 4x6 bytes callee saves 60 // | R3 | 61 // | R2 | 62 // | R1 | 63 // | S15 | 64 // | : | 65 // | S0 | 66 // | | 4x2 bytes padding 67 // | Method* | <- sp 68 static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat; 69 static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat; 70 static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat; 71 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat; 72 static constexpr bool kQuickSkipOddFpRegisters = false; 73 static constexpr size_t kNumQuickGprArgs = 3; 74 static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16; 75 static constexpr bool kGprFprLockstep = false; 76 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 77 arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg. 78 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 79 arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg. 80 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 81 arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address. 82 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 83 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 84 } 85#elif defined(__aarch64__) 86 // The callee save frame is pointed to by SP. 87 // | argN | | 88 // | ... | | 89 // | arg4 | | 90 // | arg3 spill | | Caller's frame 91 // | arg2 spill | | 92 // | arg1 spill | | 93 // | Method* | --- 94 // | LR | 95 // | X29 | 96 // | : | 97 // | X20 | 98 // | X7 | 99 // | : | 100 // | X1 | 101 // | D7 | 102 // | : | 103 // | D0 | 104 // | | padding 105 // | Method* | <- sp 106 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 107 static constexpr bool kAlignPairRegister = false; 108 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 109 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 110 static constexpr bool kQuickSkipOddFpRegisters = false; 111 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 112 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 113 static constexpr bool kGprFprLockstep = false; 114 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 115 arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg. 116 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 117 arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg. 118 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 119 arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address. 120 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 121 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 122 } 123#elif defined(__mips__) && !defined(__LP64__) 124 // The callee save frame is pointed to by SP. 125 // | argN | | 126 // | ... | | 127 // | arg4 | | 128 // | arg3 spill | | Caller's frame 129 // | arg2 spill | | 130 // | arg1 spill | | 131 // | Method* | --- 132 // | RA | 133 // | ... | callee saves 134 // | A3 | arg3 135 // | A2 | arg2 136 // | A1 | arg1 137 // | F15 | 138 // | F14 | f_arg1 139 // | F13 | 140 // | F12 | f_arg0 141 // | | padding 142 // | A0/Method* | <- sp 143 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 144 static constexpr bool kAlignPairRegister = true; 145 static constexpr bool kQuickSoftFloatAbi = false; 146 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 147 static constexpr bool kQuickSkipOddFpRegisters = true; 148 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 149 static constexpr size_t kNumQuickFprArgs = 4; // 2 arguments passed in FPRs. Floats can be passed 150 // only in even numbered registers and each double 151 // occupies two registers. 152 static constexpr bool kGprFprLockstep = false; 153 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 154 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 32; // Offset of first GPR arg. 155 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 76; // Offset of return address. 156 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 157 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 158 } 159#elif defined(__mips__) && defined(__LP64__) 160 // The callee save frame is pointed to by SP. 161 // | argN | | 162 // | ... | | 163 // | arg4 | | 164 // | arg3 spill | | Caller's frame 165 // | arg2 spill | | 166 // | arg1 spill | | 167 // | Method* | --- 168 // | RA | 169 // | ... | callee saves 170 // | A7 | arg7 171 // | A6 | arg6 172 // | A5 | arg5 173 // | A4 | arg4 174 // | A3 | arg3 175 // | A2 | arg2 176 // | A1 | arg1 177 // | F19 | f_arg7 178 // | F18 | f_arg6 179 // | F17 | f_arg5 180 // | F16 | f_arg4 181 // | F15 | f_arg3 182 // | F14 | f_arg2 183 // | F13 | f_arg1 184 // | F12 | f_arg0 185 // | | padding 186 // | A0/Method* | <- sp 187 // NOTE: for Mip64, when A0 is skipped, F0 is also skipped. 188 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 189 static constexpr bool kAlignPairRegister = false; 190 static constexpr bool kQuickSoftFloatAbi = false; 191 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 192 static constexpr bool kQuickSkipOddFpRegisters = false; 193 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 194 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 195 static constexpr bool kGprFprLockstep = true; 196 197 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F1). 198 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 199 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 200 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 201 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 202 } 203#elif defined(__i386__) 204 // The callee save frame is pointed to by SP. 205 // | argN | | 206 // | ... | | 207 // | arg4 | | 208 // | arg3 spill | | Caller's frame 209 // | arg2 spill | | 210 // | arg1 spill | | 211 // | Method* | --- 212 // | Return | 213 // | EBP,ESI,EDI | callee saves 214 // | EBX | arg3 215 // | EDX | arg2 216 // | ECX | arg1 217 // | XMM3 | float arg 4 218 // | XMM2 | float arg 3 219 // | XMM1 | float arg 2 220 // | XMM0 | float arg 1 221 // | EAX/Method* | <- sp 222 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 223 static constexpr bool kAlignPairRegister = false; 224 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 225 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 226 static constexpr bool kQuickSkipOddFpRegisters = false; 227 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 228 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 229 static constexpr bool kGprFprLockstep = false; 230 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 231 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 232 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 233 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 234 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 235 } 236#elif defined(__x86_64__) 237 // The callee save frame is pointed to by SP. 238 // | argN | | 239 // | ... | | 240 // | reg. arg spills | | Caller's frame 241 // | Method* | --- 242 // | Return | 243 // | R15 | callee save 244 // | R14 | callee save 245 // | R13 | callee save 246 // | R12 | callee save 247 // | R9 | arg5 248 // | R8 | arg4 249 // | RSI/R6 | arg1 250 // | RBP/R5 | callee save 251 // | RBX/R3 | callee save 252 // | RDX/R2 | arg2 253 // | RCX/R1 | arg3 254 // | XMM7 | float arg 8 255 // | XMM6 | float arg 7 256 // | XMM5 | float arg 6 257 // | XMM4 | float arg 5 258 // | XMM3 | float arg 4 259 // | XMM2 | float arg 3 260 // | XMM1 | float arg 2 261 // | XMM0 | float arg 1 262 // | Padding | 263 // | RDI/Method* | <- sp 264 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 265 static constexpr bool kAlignPairRegister = false; 266 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 267 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 268 static constexpr bool kQuickSkipOddFpRegisters = false; 269 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 270 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 271 static constexpr bool kGprFprLockstep = false; 272 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 273 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 274 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 275 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 276 switch (gpr_index) { 277 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 278 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 279 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 280 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 281 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 282 default: 283 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 284 return 0; 285 } 286 } 287#else 288#error "Unsupported architecture" 289#endif 290 291 public: 292 // Special handling for proxy methods. Proxy methods are instance methods so the 293 // 'this' object is the 1st argument. They also have the same frame layout as the 294 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 295 // 1st GPR. 296 static mirror::Object* GetProxyThisObject(ArtMethod** sp) 297 SHARED_REQUIRES(Locks::mutator_lock_) { 298 CHECK((*sp)->IsProxyMethod()); 299 CHECK_GT(kNumQuickGprArgs, 0u); 300 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 301 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 302 GprIndexToGprOffset(kThisGprIndex); 303 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 304 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); 305 } 306 307 static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { 308 DCHECK((*sp)->IsCalleeSaveMethod()); 309 return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs); 310 } 311 312 static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { 313 DCHECK((*sp)->IsCalleeSaveMethod()); 314 uint8_t* previous_sp = 315 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 316 return *reinterpret_cast<ArtMethod**>(previous_sp); 317 } 318 319 static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { 320 DCHECK((*sp)->IsCalleeSaveMethod()); 321 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); 322 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 323 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 324 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 325 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 326 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 327 328 if (current_code->IsOptimized()) { 329 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 330 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 331 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding); 332 DCHECK(stack_map.IsValid()); 333 if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) { 334 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 335 return inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 336 inline_info.GetDepth(encoding.inline_info_encoding)-1); 337 } else { 338 return stack_map.GetDexPc(encoding.stack_map_encoding); 339 } 340 } else { 341 return current_code->ToDexPc(*caller_sp, outer_pc); 342 } 343 } 344 345 // For the given quick ref and args quick frame, return the caller's PC. 346 static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { 347 DCHECK((*sp)->IsCalleeSaveMethod()); 348 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 349 return *reinterpret_cast<uintptr_t*>(lr); 350 } 351 352 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 353 uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) : 354 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 355 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 356 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 357 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 358 + sizeof(ArtMethod*)), // Skip ArtMethod*. 359 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 360 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 361 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 362 "Number of Quick FPR arguments unexpected"); 363 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 364 "Double alignment unexpected"); 365 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 366 // next register is even. 367 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 368 "Number of Quick FPR arguments not even"); 369 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); 370 } 371 372 virtual ~QuickArgumentVisitor() {} 373 374 virtual void Visit() = 0; 375 376 Primitive::Type GetParamPrimitiveType() const { 377 return cur_type_; 378 } 379 380 uint8_t* GetParamAddress() const { 381 if (!kQuickSoftFloatAbi) { 382 Primitive::Type type = GetParamPrimitiveType(); 383 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 384 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 385 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 386 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 387 } 388 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 389 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 390 } 391 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 392 } 393 } 394 if (gpr_index_ < kNumQuickGprArgs) { 395 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 396 } 397 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 398 } 399 400 bool IsSplitLongOrDouble() const { 401 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 402 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 403 return is_split_long_or_double_; 404 } else { 405 return false; // An optimization for when GPR and FPRs are 64bit. 406 } 407 } 408 409 bool IsParamAReference() const { 410 return GetParamPrimitiveType() == Primitive::kPrimNot; 411 } 412 413 bool IsParamALongOrDouble() const { 414 Primitive::Type type = GetParamPrimitiveType(); 415 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 416 } 417 418 uint64_t ReadSplitLongParam() const { 419 // The splitted long is always available through the stack. 420 return *reinterpret_cast<uint64_t*>(stack_args_ 421 + stack_index_ * kBytesStackArgLocation); 422 } 423 424 void IncGprIndex() { 425 gpr_index_++; 426 if (kGprFprLockstep) { 427 fpr_index_++; 428 } 429 } 430 431 void IncFprIndex() { 432 fpr_index_++; 433 if (kGprFprLockstep) { 434 gpr_index_++; 435 } 436 } 437 438 void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) { 439 // (a) 'stack_args_' should point to the first method's argument 440 // (b) whatever the argument type it is, the 'stack_index_' should 441 // be moved forward along with every visiting. 442 gpr_index_ = 0; 443 fpr_index_ = 0; 444 if (kQuickDoubleRegAlignedFloatBackFilled) { 445 fpr_double_index_ = 0; 446 } 447 stack_index_ = 0; 448 if (!is_static_) { // Handle this. 449 cur_type_ = Primitive::kPrimNot; 450 is_split_long_or_double_ = false; 451 Visit(); 452 stack_index_++; 453 if (kNumQuickGprArgs > 0) { 454 IncGprIndex(); 455 } 456 } 457 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 458 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 459 switch (cur_type_) { 460 case Primitive::kPrimNot: 461 case Primitive::kPrimBoolean: 462 case Primitive::kPrimByte: 463 case Primitive::kPrimChar: 464 case Primitive::kPrimShort: 465 case Primitive::kPrimInt: 466 is_split_long_or_double_ = false; 467 Visit(); 468 stack_index_++; 469 if (gpr_index_ < kNumQuickGprArgs) { 470 IncGprIndex(); 471 } 472 break; 473 case Primitive::kPrimFloat: 474 is_split_long_or_double_ = false; 475 Visit(); 476 stack_index_++; 477 if (kQuickSoftFloatAbi) { 478 if (gpr_index_ < kNumQuickGprArgs) { 479 IncGprIndex(); 480 } 481 } else { 482 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 483 IncFprIndex(); 484 if (kQuickDoubleRegAlignedFloatBackFilled) { 485 // Double should not overlap with float. 486 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 487 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 488 // Float should not overlap with double. 489 if (fpr_index_ % 2 == 0) { 490 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 491 } 492 } else if (kQuickSkipOddFpRegisters) { 493 IncFprIndex(); 494 } 495 } 496 } 497 break; 498 case Primitive::kPrimDouble: 499 case Primitive::kPrimLong: 500 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 501 if (cur_type_ == Primitive::kPrimLong && kAlignPairRegister && gpr_index_ == 0) { 502 // Currently, this is only for ARM and MIPS, where the first available parameter 503 // register is R1 (on ARM) or A1 (on MIPS). So we skip it, and use R2 (on ARM) or 504 // A2 (on MIPS) instead. 505 IncGprIndex(); 506 } 507 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 508 ((gpr_index_ + 1) == kNumQuickGprArgs); 509 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 510 // We don't want to split this. Pass over this register. 511 gpr_index_++; 512 is_split_long_or_double_ = false; 513 } 514 Visit(); 515 if (kBytesStackArgLocation == 4) { 516 stack_index_+= 2; 517 } else { 518 CHECK_EQ(kBytesStackArgLocation, 8U); 519 stack_index_++; 520 } 521 if (gpr_index_ < kNumQuickGprArgs) { 522 IncGprIndex(); 523 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 524 if (gpr_index_ < kNumQuickGprArgs) { 525 IncGprIndex(); 526 } 527 } 528 } 529 } else { 530 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 531 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 532 Visit(); 533 if (kBytesStackArgLocation == 4) { 534 stack_index_+= 2; 535 } else { 536 CHECK_EQ(kBytesStackArgLocation, 8U); 537 stack_index_++; 538 } 539 if (kQuickDoubleRegAlignedFloatBackFilled) { 540 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 541 fpr_double_index_ += 2; 542 // Float should not overlap with double. 543 if (fpr_index_ % 2 == 0) { 544 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 545 } 546 } 547 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 548 IncFprIndex(); 549 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 550 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 551 IncFprIndex(); 552 } 553 } 554 } 555 } 556 break; 557 default: 558 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 559 } 560 } 561 } 562 563 protected: 564 const bool is_static_; 565 const char* const shorty_; 566 const uint32_t shorty_len_; 567 568 private: 569 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 570 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 571 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 572 uint32_t gpr_index_; // Index into spilled GPRs. 573 // Index into spilled FPRs. 574 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 575 // holds a higher register number. 576 uint32_t fpr_index_; 577 // Index into spilled FPRs for aligned double. 578 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 579 // terms of singles, may be behind fpr_index. 580 uint32_t fpr_double_index_; 581 uint32_t stack_index_; // Index into arguments on the stack. 582 // The current type of argument during VisitArguments. 583 Primitive::Type cur_type_; 584 // Does a 64bit parameter straddle the register and stack arguments? 585 bool is_split_long_or_double_; 586}; 587 588// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 589// allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 590extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 591 SHARED_REQUIRES(Locks::mutator_lock_) { 592 return QuickArgumentVisitor::GetProxyThisObject(sp); 593} 594 595// Visits arguments on the stack placing them into the shadow frame. 596class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 597 public: 598 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 599 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 600 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 601 602 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; 603 604 private: 605 ShadowFrame* const sf_; 606 uint32_t cur_reg_; 607 608 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 609}; 610 611void BuildQuickShadowFrameVisitor::Visit() { 612 Primitive::Type type = GetParamPrimitiveType(); 613 switch (type) { 614 case Primitive::kPrimLong: // Fall-through. 615 case Primitive::kPrimDouble: 616 if (IsSplitLongOrDouble()) { 617 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 618 } else { 619 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 620 } 621 ++cur_reg_; 622 break; 623 case Primitive::kPrimNot: { 624 StackReference<mirror::Object>* stack_ref = 625 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 626 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 627 } 628 break; 629 case Primitive::kPrimBoolean: // Fall-through. 630 case Primitive::kPrimByte: // Fall-through. 631 case Primitive::kPrimChar: // Fall-through. 632 case Primitive::kPrimShort: // Fall-through. 633 case Primitive::kPrimInt: // Fall-through. 634 case Primitive::kPrimFloat: 635 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 636 break; 637 case Primitive::kPrimVoid: 638 LOG(FATAL) << "UNREACHABLE"; 639 UNREACHABLE(); 640 } 641 ++cur_reg_; 642} 643 644extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 645 SHARED_REQUIRES(Locks::mutator_lock_) { 646 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 647 // frame. 648 ScopedQuickEntrypointChecks sqec(self); 649 650 if (UNLIKELY(!method->IsInvokable())) { 651 method->ThrowInvocationTimeError(); 652 return 0; 653 } 654 655 JValue tmp_value; 656 ShadowFrame* deopt_frame = self->PopStackedShadowFrame( 657 StackedShadowFrameType::kDeoptimizationShadowFrame, false); 658 ManagedStack fragment; 659 660 DCHECK(!method->IsNative()) << PrettyMethod(method); 661 uint32_t shorty_len = 0; 662 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*)); 663 const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem(); 664 DCHECK(code_item != nullptr) << PrettyMethod(method); 665 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 666 667 JValue result; 668 669 if (deopt_frame != nullptr) { 670 // Coming from partial-fragment deopt. 671 672 if (kIsDebugBuild) { 673 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom 674 // of the call-stack) corresponds to the called method. 675 ShadowFrame* linked = deopt_frame; 676 while (linked->GetLink() != nullptr) { 677 linked = linked->GetLink(); 678 } 679 CHECK_EQ(method, linked->GetMethod()) << PrettyMethod(method) << " " 680 << PrettyMethod(linked->GetMethod()); 681 } 682 683 if (VLOG_IS_ON(deopt)) { 684 // Print out the stack to verify that it was a partial-fragment deopt. 685 LOG(INFO) << "Continue-ing from deopt. Stack is:"; 686 QuickExceptionHandler::DumpFramesWithType(self, true); 687 } 688 689 mirror::Throwable* pending_exception = nullptr; 690 bool from_code = false; 691 self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code); 692 693 // Push a transition back into managed code onto the linked list in thread. 694 self->PushManagedStackFragment(&fragment); 695 696 // Ensure that the stack is still in order. 697 if (kIsDebugBuild) { 698 class DummyStackVisitor : public StackVisitor { 699 public: 700 explicit DummyStackVisitor(Thread* self_in) SHARED_REQUIRES(Locks::mutator_lock_) 701 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 702 703 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 704 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking 705 // logic. Just always say we want to continue. 706 return true; 707 } 708 }; 709 DummyStackVisitor dsv(self); 710 dsv.WalkStack(); 711 } 712 713 // Restore the exception that was pending before deoptimization then interpret the 714 // deoptimized frames. 715 if (pending_exception != nullptr) { 716 self->SetException(pending_exception); 717 } 718 interpreter::EnterInterpreterFromDeoptimize(self, deopt_frame, from_code, &result); 719 } else { 720 const char* old_cause = self->StartAssertNoThreadSuspension( 721 "Building interpreter shadow frame"); 722 uint16_t num_regs = code_item->registers_size_; 723 // No last shadow coming from quick. 724 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 725 CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0); 726 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 727 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 728 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 729 shadow_frame, first_arg_reg); 730 shadow_frame_builder.VisitArguments(); 731 const bool needs_initialization = 732 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 733 // Push a transition back into managed code onto the linked list in thread. 734 self->PushManagedStackFragment(&fragment); 735 self->PushShadowFrame(shadow_frame); 736 self->EndAssertNoThreadSuspension(old_cause); 737 738 if (needs_initialization) { 739 // Ensure static method's class is initialized. 740 StackHandleScope<1> hs(self); 741 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 742 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 743 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(shadow_frame->GetMethod()); 744 self->PopManagedStackFragment(fragment); 745 return 0; 746 } 747 } 748 749 result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); 750 } 751 752 // Pop transition. 753 self->PopManagedStackFragment(fragment); 754 755 // Request a stack deoptimization if needed 756 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 757 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp); 758 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization 759 // should be done and it knows the real return pc. 760 if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) && 761 Dbg::IsForcedInterpreterNeededForUpcall(self, caller) && 762 Runtime::Current()->IsDeoptimizeable(caller_pc))) { 763 // Push the context of the deoptimization stack so we can restore the return value and the 764 // exception before executing the deoptimized frames. 765 self->PushDeoptimizationContext( 766 result, shorty[0] == 'L', /* from_code */ false, self->GetException()); 767 768 // Set special exception to cause deoptimization. 769 self->SetException(Thread::GetDeoptimizationException()); 770 } 771 772 // No need to restore the args since the method has already been run by the interpreter. 773 return result.GetJ(); 774} 775 776// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 777// to jobjects. 778class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 779 public: 780 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 781 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 782 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 783 784 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; 785 786 void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_); 787 788 private: 789 ScopedObjectAccessUnchecked* const soa_; 790 std::vector<jvalue>* const args_; 791 // References which we must update when exiting in case the GC moved the objects. 792 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 793 794 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 795}; 796 797void BuildQuickArgumentVisitor::Visit() { 798 jvalue val; 799 Primitive::Type type = GetParamPrimitiveType(); 800 switch (type) { 801 case Primitive::kPrimNot: { 802 StackReference<mirror::Object>* stack_ref = 803 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 804 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 805 references_.push_back(std::make_pair(val.l, stack_ref)); 806 break; 807 } 808 case Primitive::kPrimLong: // Fall-through. 809 case Primitive::kPrimDouble: 810 if (IsSplitLongOrDouble()) { 811 val.j = ReadSplitLongParam(); 812 } else { 813 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 814 } 815 break; 816 case Primitive::kPrimBoolean: // Fall-through. 817 case Primitive::kPrimByte: // Fall-through. 818 case Primitive::kPrimChar: // Fall-through. 819 case Primitive::kPrimShort: // Fall-through. 820 case Primitive::kPrimInt: // Fall-through. 821 case Primitive::kPrimFloat: 822 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 823 break; 824 case Primitive::kPrimVoid: 825 LOG(FATAL) << "UNREACHABLE"; 826 UNREACHABLE(); 827 } 828 args_->push_back(val); 829} 830 831void BuildQuickArgumentVisitor::FixupReferences() { 832 // Fixup any references which may have changed. 833 for (const auto& pair : references_) { 834 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 835 soa_->Env()->DeleteLocalRef(pair.first); 836 } 837} 838 839// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 840// which is responsible for recording callee save registers. We explicitly place into jobjects the 841// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 842// field within the proxy object, which will box the primitive arguments and deal with error cases. 843extern "C" uint64_t artQuickProxyInvokeHandler( 844 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 845 SHARED_REQUIRES(Locks::mutator_lock_) { 846 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 847 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 848 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 849 const char* old_cause = 850 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 851 // Register the top of the managed stack, making stack crawlable. 852 DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method); 853 self->VerifyStack(); 854 // Start new JNI local reference state. 855 JNIEnvExt* env = self->GetJniEnv(); 856 ScopedObjectAccessUnchecked soa(env); 857 ScopedJniEnvLocalRefState env_state(env); 858 // Create local ref. copies of proxy method and the receiver. 859 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 860 861 // Placing arguments into args vector and remove the receiver. 862 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*)); 863 CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " 864 << PrettyMethod(non_proxy_method); 865 std::vector<jvalue> args; 866 uint32_t shorty_len = 0; 867 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 868 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 869 870 local_ref_visitor.VisitArguments(); 871 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 872 args.erase(args.begin()); 873 874 // Convert proxy method into expected interface method. 875 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(sizeof(void*)); 876 DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method); 877 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 878 self->EndAssertNoThreadSuspension(old_cause); 879 jobject interface_method_jobj = soa.AddLocalReference<jobject>( 880 mirror::Method::CreateFromArtMethod(soa.Self(), interface_method)); 881 882 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 883 // that performs allocations. 884 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 885 // Restore references which might have moved. 886 local_ref_visitor.FixupReferences(); 887 return result.GetJ(); 888} 889 890// Read object references held in arguments from quick frames and place in a JNI local references, 891// so they don't get garbage collected. 892class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 893 public: 894 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 895 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 896 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 897 898 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; 899 900 void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_); 901 902 private: 903 ScopedObjectAccessUnchecked* const soa_; 904 // References which we must update when exiting in case the GC moved the objects. 905 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 906 907 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 908}; 909 910void RememberForGcArgumentVisitor::Visit() { 911 if (IsParamAReference()) { 912 StackReference<mirror::Object>* stack_ref = 913 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 914 jobject reference = 915 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 916 references_.push_back(std::make_pair(reference, stack_ref)); 917 } 918} 919 920void RememberForGcArgumentVisitor::FixupReferences() { 921 // Fixup any references which may have changed. 922 for (const auto& pair : references_) { 923 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 924 soa_->Env()->DeleteLocalRef(pair.first); 925 } 926} 927 928// Lazily resolve a method for quick. Called by stub code. 929extern "C" const void* artQuickResolutionTrampoline( 930 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 931 SHARED_REQUIRES(Locks::mutator_lock_) { 932 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 933 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 934 // does not have the same stack layout as the callee-save method). 935 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 936 // Start new JNI local reference state 937 JNIEnvExt* env = self->GetJniEnv(); 938 ScopedObjectAccessUnchecked soa(env); 939 ScopedJniEnvLocalRefState env_state(env); 940 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 941 942 // Compute details about the called method (avoid GCs) 943 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 944 InvokeType invoke_type; 945 MethodReference called_method(nullptr, 0); 946 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 947 ArtMethod* caller = nullptr; 948 if (!called_method_known_on_entry) { 949 caller = QuickArgumentVisitor::GetCallingMethod(sp); 950 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 951 const DexFile::CodeItem* code; 952 called_method.dex_file = caller->GetDexFile(); 953 code = caller->GetCodeItem(); 954 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 955 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 956 Instruction::Code instr_code = instr->Opcode(); 957 bool is_range; 958 switch (instr_code) { 959 case Instruction::INVOKE_DIRECT: 960 invoke_type = kDirect; 961 is_range = false; 962 break; 963 case Instruction::INVOKE_DIRECT_RANGE: 964 invoke_type = kDirect; 965 is_range = true; 966 break; 967 case Instruction::INVOKE_STATIC: 968 invoke_type = kStatic; 969 is_range = false; 970 break; 971 case Instruction::INVOKE_STATIC_RANGE: 972 invoke_type = kStatic; 973 is_range = true; 974 break; 975 case Instruction::INVOKE_SUPER: 976 invoke_type = kSuper; 977 is_range = false; 978 break; 979 case Instruction::INVOKE_SUPER_RANGE: 980 invoke_type = kSuper; 981 is_range = true; 982 break; 983 case Instruction::INVOKE_VIRTUAL: 984 invoke_type = kVirtual; 985 is_range = false; 986 break; 987 case Instruction::INVOKE_VIRTUAL_RANGE: 988 invoke_type = kVirtual; 989 is_range = true; 990 break; 991 case Instruction::INVOKE_INTERFACE: 992 invoke_type = kInterface; 993 is_range = false; 994 break; 995 case Instruction::INVOKE_INTERFACE_RANGE: 996 invoke_type = kInterface; 997 is_range = true; 998 break; 999 default: 1000 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr); 1001 UNREACHABLE(); 1002 } 1003 called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 1004 } else { 1005 invoke_type = kStatic; 1006 called_method.dex_file = called->GetDexFile(); 1007 called_method.dex_method_index = called->GetDexMethodIndex(); 1008 } 1009 uint32_t shorty_len; 1010 const char* shorty = 1011 called_method.dex_file->GetMethodShorty( 1012 called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len); 1013 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 1014 visitor.VisitArguments(); 1015 self->EndAssertNoThreadSuspension(old_cause); 1016 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 1017 // Resolve method filling in dex cache. 1018 if (!called_method_known_on_entry) { 1019 StackHandleScope<1> hs(self); 1020 mirror::Object* dummy = nullptr; 1021 HandleWrapper<mirror::Object> h_receiver( 1022 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 1023 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1024 called = linker->ResolveMethod<ClassLinker::kForceICCECheck>( 1025 self, called_method.dex_method_index, caller, invoke_type); 1026 } 1027 const void* code = nullptr; 1028 if (LIKELY(!self->IsExceptionPending())) { 1029 // Incompatible class change should have been handled in resolve method. 1030 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 1031 << PrettyMethod(called) << " " << invoke_type; 1032 if (virtual_or_interface || invoke_type == kSuper) { 1033 // Refine called method based on receiver for kVirtual/kInterface, and 1034 // caller for kSuper. 1035 ArtMethod* orig_called = called; 1036 if (invoke_type == kVirtual) { 1037 CHECK(receiver != nullptr) << invoke_type; 1038 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, sizeof(void*)); 1039 } else if (invoke_type == kInterface) { 1040 CHECK(receiver != nullptr) << invoke_type; 1041 called = receiver->GetClass()->FindVirtualMethodForInterface(called, sizeof(void*)); 1042 } else { 1043 DCHECK_EQ(invoke_type, kSuper); 1044 CHECK(caller != nullptr) << invoke_type; 1045 StackHandleScope<2> hs(self); 1046 Handle<mirror::DexCache> dex_cache( 1047 hs.NewHandle(caller->GetDeclaringClass()->GetDexCache())); 1048 Handle<mirror::ClassLoader> class_loader( 1049 hs.NewHandle(caller->GetDeclaringClass()->GetClassLoader())); 1050 // TODO Maybe put this into a mirror::Class function. 1051 mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod( 1052 called_method.dex_method_index, dex_cache, class_loader); 1053 if (ref_class->IsInterface()) { 1054 called = ref_class->FindVirtualMethodForInterfaceSuper(called, sizeof(void*)); 1055 } else { 1056 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry( 1057 called->GetMethodIndex(), sizeof(void*)); 1058 } 1059 } 1060 1061 CHECK(called != nullptr) << PrettyMethod(orig_called) << " " 1062 << PrettyTypeOf(receiver) << " " 1063 << invoke_type << " " << orig_called->GetVtableIndex(); 1064 1065 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 1066 // of the sharpened method avoiding dirtying the dex cache if possible. 1067 // Note, called_method.dex_method_index references the dex method before the 1068 // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares 1069 // about the name and signature. 1070 uint32_t update_dex_cache_method_index = called->GetDexMethodIndex(); 1071 if (!called->HasSameDexCacheResolvedMethods(caller, sizeof(void*))) { 1072 // Calling from one dex file to another, need to compute the method index appropriate to 1073 // the caller's dex file. Since we get here only if the original called was a runtime 1074 // method, we've got the correct dex_file and a dex_method_idx from above. 1075 DCHECK(!called_method_known_on_entry); 1076 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1077 const DexFile* caller_dex_file = called_method.dex_file; 1078 uint32_t caller_method_name_and_sig_index = called_method.dex_method_index; 1079 update_dex_cache_method_index = 1080 called->FindDexMethodIndexInOtherDexFile(*caller_dex_file, 1081 caller_method_name_and_sig_index); 1082 } 1083 if ((update_dex_cache_method_index != DexFile::kDexNoIndex) && 1084 (caller->GetDexCacheResolvedMethod( 1085 update_dex_cache_method_index, sizeof(void*)) != called)) { 1086 caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called, sizeof(void*)); 1087 } 1088 } else if (invoke_type == kStatic) { 1089 const auto called_dex_method_idx = called->GetDexMethodIndex(); 1090 // For static invokes, we may dispatch to the static method in the superclass but resolve 1091 // using the subclass. To prevent getting slow paths on each invoke, we force set the 1092 // resolved method for the super class dex method index if we are in the same dex file. 1093 // b/19175856 1094 if (called->GetDexFile() == called_method.dex_file && 1095 called_method.dex_method_index != called_dex_method_idx) { 1096 called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called, sizeof(void*)); 1097 } 1098 } 1099 1100 // Ensure that the called method's class is initialized. 1101 StackHandleScope<1> hs(soa.Self()); 1102 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 1103 linker->EnsureInitialized(soa.Self(), called_class, true, true); 1104 if (LIKELY(called_class->IsInitialized())) { 1105 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1106 // If we are single-stepping or the called method is deoptimized (by a 1107 // breakpoint, for example), then we have to execute the called method 1108 // with the interpreter. 1109 code = GetQuickToInterpreterBridge(); 1110 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1111 // If the caller is deoptimized (by a breakpoint, for example), we have to 1112 // continue its execution with interpreter when returning from the called 1113 // method. Because we do not want to execute the called method with the 1114 // interpreter, we wrap its execution into the instrumentation stubs. 1115 // When the called method returns, it will execute the instrumentation 1116 // exit hook that will determine the need of the interpreter with a call 1117 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1118 // it is needed. 1119 code = GetQuickInstrumentationEntryPoint(); 1120 } else { 1121 code = called->GetEntryPointFromQuickCompiledCode(); 1122 } 1123 } else if (called_class->IsInitializing()) { 1124 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1125 // If we are single-stepping or the called method is deoptimized (by a 1126 // breakpoint, for example), then we have to execute the called method 1127 // with the interpreter. 1128 code = GetQuickToInterpreterBridge(); 1129 } else if (invoke_type == kStatic) { 1130 // Class is still initializing, go to oat and grab code (trampoline must be left in place 1131 // until class is initialized to stop races between threads). 1132 code = linker->GetQuickOatCodeFor(called); 1133 } else { 1134 // No trampoline for non-static methods. 1135 code = called->GetEntryPointFromQuickCompiledCode(); 1136 } 1137 } else { 1138 DCHECK(called_class->IsErroneous()); 1139 } 1140 } 1141 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1142 // Fixup any locally saved objects may have moved during a GC. 1143 visitor.FixupReferences(); 1144 // Place called method in callee-save frame to be placed as first argument to quick method. 1145 *sp = called; 1146 1147 return code; 1148} 1149 1150/* 1151 * This class uses a couple of observations to unite the different calling conventions through 1152 * a few constants. 1153 * 1154 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1155 * possible alignment. 1156 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1157 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1158 * when we have to split things 1159 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1160 * and we can use Int handling directly. 1161 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1162 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1163 * extension should be compatible with Aarch64, which mandates copying the available bits 1164 * into LSB and leaving the rest unspecified. 1165 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1166 * the stack. 1167 * 6) There is only little endian. 1168 * 1169 * 1170 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1171 * follows: 1172 * 1173 * void PushGpr(uintptr_t): Add a value for the next GPR 1174 * 1175 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1176 * padding, that is, think the architecture is 32b and aligns 64b. 1177 * 1178 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1179 * split this if necessary. The current state will have aligned, if 1180 * necessary. 1181 * 1182 * void PushStack(uintptr_t): Push a value to the stack. 1183 * 1184 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1185 * as this might be important for null initialization. 1186 * Must return the jobject, that is, the reference to the 1187 * entry in the HandleScope (nullptr if necessary). 1188 * 1189 */ 1190template<class T> class BuildNativeCallFrameStateMachine { 1191 public: 1192#if defined(__arm__) 1193 // TODO: These are all dummy values! 1194 static constexpr bool kNativeSoftFloatAbi = true; 1195 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1196 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1197 1198 static constexpr size_t kRegistersNeededForLong = 2; 1199 static constexpr size_t kRegistersNeededForDouble = 2; 1200 static constexpr bool kMultiRegistersAligned = true; 1201 static constexpr bool kMultiFPRegistersWidened = false; 1202 static constexpr bool kMultiGPRegistersWidened = false; 1203 static constexpr bool kAlignLongOnStack = true; 1204 static constexpr bool kAlignDoubleOnStack = true; 1205#elif defined(__aarch64__) 1206 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1207 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1208 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1209 1210 static constexpr size_t kRegistersNeededForLong = 1; 1211 static constexpr size_t kRegistersNeededForDouble = 1; 1212 static constexpr bool kMultiRegistersAligned = false; 1213 static constexpr bool kMultiFPRegistersWidened = false; 1214 static constexpr bool kMultiGPRegistersWidened = false; 1215 static constexpr bool kAlignLongOnStack = false; 1216 static constexpr bool kAlignDoubleOnStack = false; 1217#elif defined(__mips__) && !defined(__LP64__) 1218 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1219 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1220 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1221 1222 static constexpr size_t kRegistersNeededForLong = 2; 1223 static constexpr size_t kRegistersNeededForDouble = 2; 1224 static constexpr bool kMultiRegistersAligned = true; 1225 static constexpr bool kMultiFPRegistersWidened = true; 1226 static constexpr bool kMultiGPRegistersWidened = false; 1227 static constexpr bool kAlignLongOnStack = true; 1228 static constexpr bool kAlignDoubleOnStack = true; 1229#elif defined(__mips__) && defined(__LP64__) 1230 // Let the code prepare GPRs only and we will load the FPRs with same data. 1231 static constexpr bool kNativeSoftFloatAbi = true; 1232 static constexpr size_t kNumNativeGprArgs = 8; 1233 static constexpr size_t kNumNativeFprArgs = 0; 1234 1235 static constexpr size_t kRegistersNeededForLong = 1; 1236 static constexpr size_t kRegistersNeededForDouble = 1; 1237 static constexpr bool kMultiRegistersAligned = false; 1238 static constexpr bool kMultiFPRegistersWidened = false; 1239 static constexpr bool kMultiGPRegistersWidened = true; 1240 static constexpr bool kAlignLongOnStack = false; 1241 static constexpr bool kAlignDoubleOnStack = false; 1242#elif defined(__i386__) 1243 // TODO: Check these! 1244 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1245 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1246 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1247 1248 static constexpr size_t kRegistersNeededForLong = 2; 1249 static constexpr size_t kRegistersNeededForDouble = 2; 1250 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1251 static constexpr bool kMultiFPRegistersWidened = false; 1252 static constexpr bool kMultiGPRegistersWidened = false; 1253 static constexpr bool kAlignLongOnStack = false; 1254 static constexpr bool kAlignDoubleOnStack = false; 1255#elif defined(__x86_64__) 1256 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1257 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1258 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1259 1260 static constexpr size_t kRegistersNeededForLong = 1; 1261 static constexpr size_t kRegistersNeededForDouble = 1; 1262 static constexpr bool kMultiRegistersAligned = false; 1263 static constexpr bool kMultiFPRegistersWidened = false; 1264 static constexpr bool kMultiGPRegistersWidened = false; 1265 static constexpr bool kAlignLongOnStack = false; 1266 static constexpr bool kAlignDoubleOnStack = false; 1267#else 1268#error "Unsupported architecture" 1269#endif 1270 1271 public: 1272 explicit BuildNativeCallFrameStateMachine(T* delegate) 1273 : gpr_index_(kNumNativeGprArgs), 1274 fpr_index_(kNumNativeFprArgs), 1275 stack_entries_(0), 1276 delegate_(delegate) { 1277 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1278 // the next register is even; counting down is just to make the compiler happy... 1279 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1280 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1281 } 1282 1283 virtual ~BuildNativeCallFrameStateMachine() {} 1284 1285 bool HavePointerGpr() const { 1286 return gpr_index_ > 0; 1287 } 1288 1289 void AdvancePointer(const void* val) { 1290 if (HavePointerGpr()) { 1291 gpr_index_--; 1292 PushGpr(reinterpret_cast<uintptr_t>(val)); 1293 } else { 1294 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1295 PushStack(reinterpret_cast<uintptr_t>(val)); 1296 gpr_index_ = 0; 1297 } 1298 } 1299 1300 bool HaveHandleScopeGpr() const { 1301 return gpr_index_ > 0; 1302 } 1303 1304 void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) { 1305 uintptr_t handle = PushHandle(ptr); 1306 if (HaveHandleScopeGpr()) { 1307 gpr_index_--; 1308 PushGpr(handle); 1309 } else { 1310 stack_entries_++; 1311 PushStack(handle); 1312 gpr_index_ = 0; 1313 } 1314 } 1315 1316 bool HaveIntGpr() const { 1317 return gpr_index_ > 0; 1318 } 1319 1320 void AdvanceInt(uint32_t val) { 1321 if (HaveIntGpr()) { 1322 gpr_index_--; 1323 if (kMultiGPRegistersWidened) { 1324 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1325 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1326 } else { 1327 PushGpr(val); 1328 } 1329 } else { 1330 stack_entries_++; 1331 if (kMultiGPRegistersWidened) { 1332 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1333 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1334 } else { 1335 PushStack(val); 1336 } 1337 gpr_index_ = 0; 1338 } 1339 } 1340 1341 bool HaveLongGpr() const { 1342 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1343 } 1344 1345 bool LongGprNeedsPadding() const { 1346 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1347 kAlignLongOnStack && // and when it needs alignment 1348 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1349 } 1350 1351 bool LongStackNeedsPadding() const { 1352 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1353 kAlignLongOnStack && // and when it needs 8B alignment 1354 (stack_entries_ & 1) == 1; // counter is odd 1355 } 1356 1357 void AdvanceLong(uint64_t val) { 1358 if (HaveLongGpr()) { 1359 if (LongGprNeedsPadding()) { 1360 PushGpr(0); 1361 gpr_index_--; 1362 } 1363 if (kRegistersNeededForLong == 1) { 1364 PushGpr(static_cast<uintptr_t>(val)); 1365 } else { 1366 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1367 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1368 } 1369 gpr_index_ -= kRegistersNeededForLong; 1370 } else { 1371 if (LongStackNeedsPadding()) { 1372 PushStack(0); 1373 stack_entries_++; 1374 } 1375 if (kRegistersNeededForLong == 1) { 1376 PushStack(static_cast<uintptr_t>(val)); 1377 stack_entries_++; 1378 } else { 1379 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1380 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1381 stack_entries_ += 2; 1382 } 1383 gpr_index_ = 0; 1384 } 1385 } 1386 1387 bool HaveFloatFpr() const { 1388 return fpr_index_ > 0; 1389 } 1390 1391 void AdvanceFloat(float val) { 1392 if (kNativeSoftFloatAbi) { 1393 AdvanceInt(bit_cast<uint32_t, float>(val)); 1394 } else { 1395 if (HaveFloatFpr()) { 1396 fpr_index_--; 1397 if (kRegistersNeededForDouble == 1) { 1398 if (kMultiFPRegistersWidened) { 1399 PushFpr8(bit_cast<uint64_t, double>(val)); 1400 } else { 1401 // No widening, just use the bits. 1402 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1403 } 1404 } else { 1405 PushFpr4(val); 1406 } 1407 } else { 1408 stack_entries_++; 1409 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1410 // Need to widen before storing: Note the "double" in the template instantiation. 1411 // Note: We need to jump through those hoops to make the compiler happy. 1412 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1413 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1414 } else { 1415 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1416 } 1417 fpr_index_ = 0; 1418 } 1419 } 1420 } 1421 1422 bool HaveDoubleFpr() const { 1423 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1424 } 1425 1426 bool DoubleFprNeedsPadding() const { 1427 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1428 kAlignDoubleOnStack && // and when it needs alignment 1429 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1430 } 1431 1432 bool DoubleStackNeedsPadding() const { 1433 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1434 kAlignDoubleOnStack && // and when it needs 8B alignment 1435 (stack_entries_ & 1) == 1; // counter is odd 1436 } 1437 1438 void AdvanceDouble(uint64_t val) { 1439 if (kNativeSoftFloatAbi) { 1440 AdvanceLong(val); 1441 } else { 1442 if (HaveDoubleFpr()) { 1443 if (DoubleFprNeedsPadding()) { 1444 PushFpr4(0); 1445 fpr_index_--; 1446 } 1447 PushFpr8(val); 1448 fpr_index_ -= kRegistersNeededForDouble; 1449 } else { 1450 if (DoubleStackNeedsPadding()) { 1451 PushStack(0); 1452 stack_entries_++; 1453 } 1454 if (kRegistersNeededForDouble == 1) { 1455 PushStack(static_cast<uintptr_t>(val)); 1456 stack_entries_++; 1457 } else { 1458 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1459 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1460 stack_entries_ += 2; 1461 } 1462 fpr_index_ = 0; 1463 } 1464 } 1465 } 1466 1467 uint32_t GetStackEntries() const { 1468 return stack_entries_; 1469 } 1470 1471 uint32_t GetNumberOfUsedGprs() const { 1472 return kNumNativeGprArgs - gpr_index_; 1473 } 1474 1475 uint32_t GetNumberOfUsedFprs() const { 1476 return kNumNativeFprArgs - fpr_index_; 1477 } 1478 1479 private: 1480 void PushGpr(uintptr_t val) { 1481 delegate_->PushGpr(val); 1482 } 1483 void PushFpr4(float val) { 1484 delegate_->PushFpr4(val); 1485 } 1486 void PushFpr8(uint64_t val) { 1487 delegate_->PushFpr8(val); 1488 } 1489 void PushStack(uintptr_t val) { 1490 delegate_->PushStack(val); 1491 } 1492 uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) { 1493 return delegate_->PushHandle(ref); 1494 } 1495 1496 uint32_t gpr_index_; // Number of free GPRs 1497 uint32_t fpr_index_; // Number of free FPRs 1498 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1499 // extended 1500 T* const delegate_; // What Push implementation gets called 1501}; 1502 1503// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1504// in subclasses. 1505// 1506// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1507// them with handles. 1508class ComputeNativeCallFrameSize { 1509 public: 1510 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1511 1512 virtual ~ComputeNativeCallFrameSize() {} 1513 1514 uint32_t GetStackSize() const { 1515 return num_stack_entries_ * sizeof(uintptr_t); 1516 } 1517 1518 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1519 sp8 -= GetStackSize(); 1520 // Align by kStackAlignment. 1521 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1522 return sp8; 1523 } 1524 1525 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1526 const { 1527 // Assumption is OK right now, as we have soft-float arm 1528 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1529 sp8 -= fregs * sizeof(uintptr_t); 1530 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1531 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1532 sp8 -= iregs * sizeof(uintptr_t); 1533 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1534 return sp8; 1535 } 1536 1537 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1538 uint32_t** start_fpr) const { 1539 // Native call stack. 1540 sp8 = LayoutCallStack(sp8); 1541 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1542 1543 // Put fprs and gprs below. 1544 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1545 1546 // Return the new bottom. 1547 return sp8; 1548 } 1549 1550 virtual void WalkHeader( 1551 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED) 1552 SHARED_REQUIRES(Locks::mutator_lock_) { 1553 } 1554 1555 void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) { 1556 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1557 1558 WalkHeader(&sm); 1559 1560 for (uint32_t i = 1; i < shorty_len; ++i) { 1561 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1562 switch (cur_type_) { 1563 case Primitive::kPrimNot: 1564 // TODO: fix abuse of mirror types. 1565 sm.AdvanceHandleScope( 1566 reinterpret_cast<mirror::Object*>(0x12345678)); 1567 break; 1568 1569 case Primitive::kPrimBoolean: 1570 case Primitive::kPrimByte: 1571 case Primitive::kPrimChar: 1572 case Primitive::kPrimShort: 1573 case Primitive::kPrimInt: 1574 sm.AdvanceInt(0); 1575 break; 1576 case Primitive::kPrimFloat: 1577 sm.AdvanceFloat(0); 1578 break; 1579 case Primitive::kPrimDouble: 1580 sm.AdvanceDouble(0); 1581 break; 1582 case Primitive::kPrimLong: 1583 sm.AdvanceLong(0); 1584 break; 1585 default: 1586 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1587 UNREACHABLE(); 1588 } 1589 } 1590 1591 num_stack_entries_ = sm.GetStackEntries(); 1592 } 1593 1594 void PushGpr(uintptr_t /* val */) { 1595 // not optimizing registers, yet 1596 } 1597 1598 void PushFpr4(float /* val */) { 1599 // not optimizing registers, yet 1600 } 1601 1602 void PushFpr8(uint64_t /* val */) { 1603 // not optimizing registers, yet 1604 } 1605 1606 void PushStack(uintptr_t /* val */) { 1607 // counting is already done in the superclass 1608 } 1609 1610 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1611 return reinterpret_cast<uintptr_t>(nullptr); 1612 } 1613 1614 protected: 1615 uint32_t num_stack_entries_; 1616}; 1617 1618class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1619 public: 1620 ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {} 1621 1622 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1623 // is at *m = sp. Will update to point to the bottom of the save frame. 1624 // 1625 // Note: assumes ComputeAll() has been run before. 1626 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1627 SHARED_REQUIRES(Locks::mutator_lock_) { 1628 ArtMethod* method = **m; 1629 1630 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); 1631 1632 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1633 1634 // First, fix up the layout of the callee-save frame. 1635 // We have to squeeze in the HandleScope, and relocate the method pointer. 1636 1637 // "Free" the slot for the method. 1638 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 1639 1640 // Under the callee saves put handle scope and new method stack reference. 1641 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1642 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 1643 1644 sp8 -= scope_and_method; 1645 // Align by kStackAlignment. 1646 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1647 1648 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 1649 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 1650 num_handle_scope_references_); 1651 1652 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1653 uint8_t* method_pointer = sp8; 1654 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 1655 *new_method_ref = method; 1656 *m = new_method_ref; 1657 } 1658 1659 // Adds space for the cookie. Note: may leave stack unaligned. 1660 void LayoutCookie(uint8_t** sp) const { 1661 // Reference cookie and padding 1662 *sp -= 8; 1663 } 1664 1665 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1666 // Returns the new bottom. Note: this may be unaligned. 1667 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1668 SHARED_REQUIRES(Locks::mutator_lock_) { 1669 // First, fix up the layout of the callee-save frame. 1670 // We have to squeeze in the HandleScope, and relocate the method pointer. 1671 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 1672 1673 // The bottom of the callee-save frame is now where the method is, *m. 1674 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1675 1676 // Add space for cookie. 1677 LayoutCookie(&sp8); 1678 1679 return sp8; 1680 } 1681 1682 // WARNING: After this, *sp won't be pointing to the method anymore! 1683 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 1684 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 1685 uint32_t** start_fpr) 1686 SHARED_REQUIRES(Locks::mutator_lock_) { 1687 Walk(shorty, shorty_len); 1688 1689 // JNI part. 1690 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 1691 1692 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1693 1694 // Return the new bottom. 1695 return sp8; 1696 } 1697 1698 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1699 1700 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1701 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1702 SHARED_REQUIRES(Locks::mutator_lock_); 1703 1704 private: 1705 uint32_t num_handle_scope_references_; 1706}; 1707 1708uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1709 num_handle_scope_references_++; 1710 return reinterpret_cast<uintptr_t>(nullptr); 1711} 1712 1713void ComputeGenericJniFrameSize::WalkHeader( 1714 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1715 // JNIEnv 1716 sm->AdvancePointer(nullptr); 1717 1718 // Class object or this as first argument 1719 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1720} 1721 1722// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1723// the template requirements of BuildGenericJniFrameStateMachine. 1724class FillNativeCall { 1725 public: 1726 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1727 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1728 1729 virtual ~FillNativeCall() {} 1730 1731 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1732 cur_gpr_reg_ = gpr_regs; 1733 cur_fpr_reg_ = fpr_regs; 1734 cur_stack_arg_ = stack_args; 1735 } 1736 1737 void PushGpr(uintptr_t val) { 1738 *cur_gpr_reg_ = val; 1739 cur_gpr_reg_++; 1740 } 1741 1742 void PushFpr4(float val) { 1743 *cur_fpr_reg_ = val; 1744 cur_fpr_reg_++; 1745 } 1746 1747 void PushFpr8(uint64_t val) { 1748 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1749 *tmp = val; 1750 cur_fpr_reg_ += 2; 1751 } 1752 1753 void PushStack(uintptr_t val) { 1754 *cur_stack_arg_ = val; 1755 cur_stack_arg_++; 1756 } 1757 1758 virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) { 1759 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1760 UNREACHABLE(); 1761 } 1762 1763 private: 1764 uintptr_t* cur_gpr_reg_; 1765 uint32_t* cur_fpr_reg_; 1766 uintptr_t* cur_stack_arg_; 1767}; 1768 1769// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1770// of transitioning into native code. 1771class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1772 public: 1773 BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len, 1774 ArtMethod*** sp) 1775 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1776 jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) { 1777 ComputeGenericJniFrameSize fsc; 1778 uintptr_t* start_gpr_reg; 1779 uint32_t* start_fpr_reg; 1780 uintptr_t* start_stack_arg; 1781 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 1782 &handle_scope_, 1783 &start_stack_arg, 1784 &start_gpr_reg, &start_fpr_reg); 1785 1786 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1787 1788 // jni environment is always first argument 1789 sm_.AdvancePointer(self->GetJniEnv()); 1790 1791 if (is_static) { 1792 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 1793 } 1794 } 1795 1796 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; 1797 1798 void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); 1799 1800 StackReference<mirror::Object>* GetFirstHandleScopeEntry() { 1801 return handle_scope_->GetHandle(0).GetReference(); 1802 } 1803 1804 jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) { 1805 return handle_scope_->GetHandle(0).ToJObject(); 1806 } 1807 1808 void* GetBottomOfUsedArea() const { 1809 return bottom_of_used_area_; 1810 } 1811 1812 private: 1813 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 1814 class FillJniCall FINAL : public FillNativeCall { 1815 public: 1816 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 1817 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args), 1818 handle_scope_(handle_scope), cur_entry_(0) {} 1819 1820 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); 1821 1822 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 1823 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 1824 handle_scope_ = scope; 1825 cur_entry_ = 0U; 1826 } 1827 1828 void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) { 1829 // Initialize padding entries. 1830 size_t expected_slots = handle_scope_->NumberOfReferences(); 1831 while (cur_entry_ < expected_slots) { 1832 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 1833 } 1834 DCHECK_NE(cur_entry_, 0U); 1835 } 1836 1837 private: 1838 HandleScope* handle_scope_; 1839 size_t cur_entry_; 1840 }; 1841 1842 HandleScope* handle_scope_; 1843 FillJniCall jni_call_; 1844 void* bottom_of_used_area_; 1845 1846 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 1847 1848 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1849}; 1850 1851uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 1852 uintptr_t tmp; 1853 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 1854 h.Assign(ref); 1855 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 1856 cur_entry_++; 1857 return tmp; 1858} 1859 1860void BuildGenericJniFrameVisitor::Visit() { 1861 Primitive::Type type = GetParamPrimitiveType(); 1862 switch (type) { 1863 case Primitive::kPrimLong: { 1864 jlong long_arg; 1865 if (IsSplitLongOrDouble()) { 1866 long_arg = ReadSplitLongParam(); 1867 } else { 1868 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1869 } 1870 sm_.AdvanceLong(long_arg); 1871 break; 1872 } 1873 case Primitive::kPrimDouble: { 1874 uint64_t double_arg; 1875 if (IsSplitLongOrDouble()) { 1876 // Read into union so that we don't case to a double. 1877 double_arg = ReadSplitLongParam(); 1878 } else { 1879 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1880 } 1881 sm_.AdvanceDouble(double_arg); 1882 break; 1883 } 1884 case Primitive::kPrimNot: { 1885 StackReference<mirror::Object>* stack_ref = 1886 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1887 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 1888 break; 1889 } 1890 case Primitive::kPrimFloat: 1891 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1892 break; 1893 case Primitive::kPrimBoolean: // Fall-through. 1894 case Primitive::kPrimByte: // Fall-through. 1895 case Primitive::kPrimChar: // Fall-through. 1896 case Primitive::kPrimShort: // Fall-through. 1897 case Primitive::kPrimInt: // Fall-through. 1898 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1899 break; 1900 case Primitive::kPrimVoid: 1901 LOG(FATAL) << "UNREACHABLE"; 1902 UNREACHABLE(); 1903 } 1904} 1905 1906void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 1907 // Clear out rest of the scope. 1908 jni_call_.ResetRemainingScopeSlots(); 1909 // Install HandleScope. 1910 self->PushHandleScope(handle_scope_); 1911} 1912 1913#if defined(__arm__) || defined(__aarch64__) 1914extern "C" void* artFindNativeMethod(); 1915#else 1916extern "C" void* artFindNativeMethod(Thread* self); 1917#endif 1918 1919uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { 1920 if (lock != nullptr) { 1921 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1922 } else { 1923 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1924 } 1925} 1926 1927void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { 1928 if (lock != nullptr) { 1929 JniMethodEndSynchronized(cookie, lock, self); 1930 } else { 1931 JniMethodEnd(cookie, self); 1932 } 1933} 1934 1935/* 1936 * Initializes an alloca region assumed to be directly below sp for a native call: 1937 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 1938 * The final element on the stack is a pointer to the native code. 1939 * 1940 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1941 * We need to fix this, as the handle scope needs to go into the callee-save frame. 1942 * 1943 * The return of this function denotes: 1944 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1945 * 2) An error, if the value is negative. 1946 */ 1947extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 1948 SHARED_REQUIRES(Locks::mutator_lock_) { 1949 ArtMethod* called = *sp; 1950 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1951 uint32_t shorty_len = 0; 1952 const char* shorty = called->GetShorty(&shorty_len); 1953 1954 // Run the visitor and update sp. 1955 BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp); 1956 visitor.VisitArguments(); 1957 visitor.FinalizeHandleScope(self); 1958 1959 // Fix up managed-stack things in Thread. 1960 self->SetTopOfStack(sp); 1961 1962 self->VerifyStack(); 1963 1964 // Start JNI, save the cookie. 1965 uint32_t cookie; 1966 if (called->IsSynchronized()) { 1967 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 1968 if (self->IsExceptionPending()) { 1969 self->PopHandleScope(); 1970 // A negative value denotes an error. 1971 return GetTwoWordFailureValue(); 1972 } 1973 } else { 1974 cookie = JniMethodStart(self); 1975 } 1976 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1977 *(sp32 - 1) = cookie; 1978 1979 // Retrieve the stored native code. 1980 void* nativeCode = called->GetEntryPointFromJni(); 1981 1982 // There are two cases for the content of nativeCode: 1983 // 1) Pointer to the native function. 1984 // 2) Pointer to the trampoline for native code binding. 1985 // In the second case, we need to execute the binding and continue with the actual native function 1986 // pointer. 1987 DCHECK(nativeCode != nullptr); 1988 if (nativeCode == GetJniDlsymLookupStub()) { 1989#if defined(__arm__) || defined(__aarch64__) 1990 nativeCode = artFindNativeMethod(); 1991#else 1992 nativeCode = artFindNativeMethod(self); 1993#endif 1994 1995 if (nativeCode == nullptr) { 1996 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 1997 1998 // End JNI, as the assembly will move to deliver the exception. 1999 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 2000 if (shorty[0] == 'L') { 2001 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 2002 } else { 2003 artQuickGenericJniEndJNINonRef(self, cookie, lock); 2004 } 2005 2006 return GetTwoWordFailureValue(); 2007 } 2008 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 2009 } 2010 2011 // Return native code addr(lo) and bottom of alloca address(hi). 2012 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 2013 reinterpret_cast<uintptr_t>(nativeCode)); 2014} 2015 2016// Defined in quick_jni_entrypoints.cc. 2017extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie, 2018 jvalue result, uint64_t result_f, ArtMethod* called, 2019 HandleScope* handle_scope); 2020/* 2021 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 2022 * unlocking. 2023 */ 2024extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, 2025 jvalue result, 2026 uint64_t result_f) { 2027 // We're here just back from a native call. We don't have the shared mutator lock at this point 2028 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing 2029 // anything that requires a mutator lock before that would cause problems as GC may have the 2030 // exclusive mutator lock and may be moving objects, etc. 2031 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 2032 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 2033 ArtMethod* called = *sp; 2034 uint32_t cookie = *(sp32 - 1); 2035 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp)); 2036 return GenericJniMethodEnd(self, cookie, result, result_f, called, table); 2037} 2038 2039// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 2040// for the method pointer. 2041// 2042// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 2043// to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations). 2044 2045template<InvokeType type, bool access_check> 2046static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self, 2047 ArtMethod** sp) { 2048 ScopedQuickEntrypointChecks sqec(self); 2049 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)); 2050 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2051 ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); 2052 if (UNLIKELY(method == nullptr)) { 2053 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 2054 uint32_t shorty_len; 2055 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 2056 { 2057 // Remember the args in case a GC happens in FindMethodFromCode. 2058 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2059 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 2060 visitor.VisitArguments(); 2061 method = FindMethodFromCode<type, access_check>(method_idx, &this_object, caller_method, 2062 self); 2063 visitor.FixupReferences(); 2064 } 2065 2066 if (UNLIKELY(method == nullptr)) { 2067 CHECK(self->IsExceptionPending()); 2068 return GetTwoWordFailureValue(); // Failure. 2069 } 2070 } 2071 DCHECK(!self->IsExceptionPending()); 2072 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2073 2074 // When we return, the caller will branch to this address, so it had better not be 0! 2075 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 2076 << " location: " 2077 << method->GetDexFile()->GetLocation(); 2078 2079 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2080 reinterpret_cast<uintptr_t>(method)); 2081} 2082 2083// Explicit artInvokeCommon template function declarations to please analysis tool. 2084#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2085 template SHARED_REQUIRES(Locks::mutator_lock_) \ 2086 TwoWordReturn artInvokeCommon<type, access_check>( \ 2087 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2088 2089EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2090EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2091EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2092EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2093EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2094EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2095EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2096EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2097EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2098EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2099#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2100 2101// See comments in runtime_support_asm.S 2102extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2103 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2104 SHARED_REQUIRES(Locks::mutator_lock_) { 2105 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2106} 2107 2108extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2109 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2110 SHARED_REQUIRES(Locks::mutator_lock_) { 2111 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2112} 2113 2114extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2115 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2116 SHARED_REQUIRES(Locks::mutator_lock_) { 2117 return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp); 2118} 2119 2120extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2121 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2122 SHARED_REQUIRES(Locks::mutator_lock_) { 2123 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2124} 2125 2126extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2127 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2128 SHARED_REQUIRES(Locks::mutator_lock_) { 2129 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2130} 2131 2132// Determine target of interface dispatch. This object is known non-null. First argument 2133// is there for consistency but should not be used, as some architectures overwrite it 2134// in the assembly trampoline. 2135extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED, 2136 mirror::Object* this_object, 2137 Thread* self, 2138 ArtMethod** sp) 2139 SHARED_REQUIRES(Locks::mutator_lock_) { 2140 ScopedQuickEntrypointChecks sqec(self); 2141 StackHandleScope<1> hs(self); 2142 Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass())); 2143 2144 // The optimizing compiler currently does not inline methods that have an interface 2145 // invocation. We use the outer method directly to avoid fetching a stack map, which is 2146 // more expensive. 2147 ArtMethod* caller_method = QuickArgumentVisitor::GetOuterMethod(sp); 2148 DCHECK_EQ(caller_method, QuickArgumentVisitor::GetCallingMethod(sp)); 2149 2150 // Fetch the dex_method_idx of the target interface method from the caller. 2151 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2152 2153 const DexFile::CodeItem* code_item = caller_method->GetCodeItem(); 2154 CHECK_LT(dex_pc, code_item->insns_size_in_code_units_); 2155 const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]); 2156 Instruction::Code instr_code = instr->Opcode(); 2157 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 2158 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2159 << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr); 2160 uint32_t dex_method_idx; 2161 if (instr_code == Instruction::INVOKE_INTERFACE) { 2162 dex_method_idx = instr->VRegB_35c(); 2163 } else { 2164 CHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2165 dex_method_idx = instr->VRegB_3rc(); 2166 } 2167 2168 ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod( 2169 dex_method_idx, sizeof(void*)); 2170 DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method); 2171 ArtMethod* method = nullptr; 2172 ImTable* imt = cls->GetImt(sizeof(void*)); 2173 2174 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 2175 // If the dex cache already resolved the interface method, look whether we have 2176 // a match in the ImtConflictTable. 2177 uint32_t imt_index = interface_method->GetDexMethodIndex(); 2178 ArtMethod* conflict_method = imt->Get(imt_index % ImTable::kSize, sizeof(void*)); 2179 if (LIKELY(conflict_method->IsRuntimeMethod())) { 2180 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*)); 2181 DCHECK(current_table != nullptr); 2182 method = current_table->Lookup(interface_method, sizeof(void*)); 2183 } else { 2184 // It seems we aren't really a conflict method! 2185 method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*)); 2186 } 2187 if (method != nullptr) { 2188 return GetTwoWordSuccessValue( 2189 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()), 2190 reinterpret_cast<uintptr_t>(method)); 2191 } 2192 2193 // No match, use the IfTable. 2194 method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*)); 2195 if (UNLIKELY(method == nullptr)) { 2196 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2197 interface_method, this_object, caller_method); 2198 return GetTwoWordFailureValue(); // Failure. 2199 } 2200 } else { 2201 // The dex cache did not resolve the method, look it up in the dex file 2202 // of the caller, 2203 DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod()); 2204 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache() 2205 ->GetDexFile(); 2206 uint32_t shorty_len; 2207 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), 2208 &shorty_len); 2209 { 2210 // Remember the args in case a GC happens in FindMethodFromCode. 2211 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2212 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2213 visitor.VisitArguments(); 2214 method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, caller_method, 2215 self); 2216 visitor.FixupReferences(); 2217 } 2218 2219 if (UNLIKELY(method == nullptr)) { 2220 CHECK(self->IsExceptionPending()); 2221 return GetTwoWordFailureValue(); // Failure. 2222 } 2223 interface_method = caller_method->GetDexCacheResolvedMethod(dex_method_idx, sizeof(void*)); 2224 DCHECK(!interface_method->IsRuntimeMethod()); 2225 } 2226 2227 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. 2228 // We create a new table with the new pair { interface_method, method }. 2229 uint32_t imt_index = interface_method->GetDexMethodIndex(); 2230 ArtMethod* conflict_method = imt->Get(imt_index % ImTable::kSize, sizeof(void*)); 2231 if (conflict_method->IsRuntimeMethod()) { 2232 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( 2233 cls.Get(), 2234 conflict_method, 2235 interface_method, 2236 method, 2237 /*force_new_conflict_method*/false); 2238 if (new_conflict_method != conflict_method) { 2239 // Update the IMT if we create a new conflict method. No fence needed here, as the 2240 // data is consistent. 2241 imt->Set(imt_index % ImTable::kSize, 2242 new_conflict_method, 2243 sizeof(void*)); 2244 } 2245 } 2246 2247 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2248 2249 // When we return, the caller will branch to this address, so it had better not be 0! 2250 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 2251 << " location: " << method->GetDexFile()->GetLocation(); 2252 2253 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2254 reinterpret_cast<uintptr_t>(method)); 2255} 2256 2257} // namespace art 2258