quick_trampoline_entrypoints.cc revision 524e7ea8cd17bad17bd9f3e0ccbb19ad0d4d9c02
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "art_method-inl.h" 18#include "callee_save_frame.h" 19#include "common_throws.h" 20#include "dex_file-inl.h" 21#include "dex_instruction-inl.h" 22#include "entrypoints/entrypoint_utils-inl.h" 23#include "entrypoints/runtime_asm_entrypoints.h" 24#include "gc/accounting/card_table-inl.h" 25#include "interpreter/interpreter.h" 26#include "method_reference.h" 27#include "mirror/class-inl.h" 28#include "mirror/dex_cache-inl.h" 29#include "mirror/method.h" 30#include "mirror/object-inl.h" 31#include "mirror/object_array-inl.h" 32#include "oat_quick_method_header.h" 33#include "quick_exception_handler.h" 34#include "runtime.h" 35#include "scoped_thread_state_change.h" 36#include "stack.h" 37#include "debugger.h" 38 39namespace art { 40 41// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 42class QuickArgumentVisitor { 43 // Number of bytes for each out register in the caller method's frame. 44 static constexpr size_t kBytesStackArgLocation = 4; 45 // Frame size in bytes of a callee-save frame for RefsAndArgs. 46 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 47 GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); 48#if defined(__arm__) 49 // The callee save frame is pointed to by SP. 50 // | argN | | 51 // | ... | | 52 // | arg4 | | 53 // | arg3 spill | | Caller's frame 54 // | arg2 spill | | 55 // | arg1 spill | | 56 // | Method* | --- 57 // | LR | 58 // | ... | 4x6 bytes callee saves 59 // | R3 | 60 // | R2 | 61 // | R1 | 62 // | S15 | 63 // | : | 64 // | S0 | 65 // | | 4x2 bytes padding 66 // | Method* | <- sp 67 static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat; 68 static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat; 69 static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat; 70 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat; 71 static constexpr bool kQuickSkipOddFpRegisters = false; 72 static constexpr size_t kNumQuickGprArgs = 3; 73 static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16; 74 static constexpr bool kGprFprLockstep = false; 75 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 76 arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg. 77 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 78 arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg. 79 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 80 arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address. 81 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 82 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 83 } 84#elif defined(__aarch64__) 85 // The callee save frame is pointed to by SP. 86 // | argN | | 87 // | ... | | 88 // | arg4 | | 89 // | arg3 spill | | Caller's frame 90 // | arg2 spill | | 91 // | arg1 spill | | 92 // | Method* | --- 93 // | LR | 94 // | X29 | 95 // | : | 96 // | X20 | 97 // | X7 | 98 // | : | 99 // | X1 | 100 // | D7 | 101 // | : | 102 // | D0 | 103 // | | padding 104 // | Method* | <- sp 105 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 106 static constexpr bool kAlignPairRegister = false; 107 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 108 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 109 static constexpr bool kQuickSkipOddFpRegisters = false; 110 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 111 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 112 static constexpr bool kGprFprLockstep = false; 113 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 114 arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg. 115 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 116 arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg. 117 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 118 arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address. 119 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 120 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 121 } 122#elif defined(__mips__) && !defined(__LP64__) 123 // The callee save frame is pointed to by SP. 124 // | argN | | 125 // | ... | | 126 // | arg4 | | 127 // | arg3 spill | | Caller's frame 128 // | arg2 spill | | 129 // | arg1 spill | | 130 // | Method* | --- 131 // | RA | 132 // | ... | callee saves 133 // | A3 | arg3 134 // | A2 | arg2 135 // | A1 | arg1 136 // | F15 | 137 // | F14 | f_arg1 138 // | F13 | 139 // | F12 | f_arg0 140 // | | padding 141 // | A0/Method* | <- sp 142 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 143 static constexpr bool kAlignPairRegister = true; 144 static constexpr bool kQuickSoftFloatAbi = false; 145 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 146 static constexpr bool kQuickSkipOddFpRegisters = true; 147 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 148 static constexpr size_t kNumQuickFprArgs = 4; // 2 arguments passed in FPRs. Floats can be passed 149 // only in even numbered registers and each double 150 // occupies two registers. 151 static constexpr bool kGprFprLockstep = false; 152 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 153 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 32; // Offset of first GPR arg. 154 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 76; // Offset of return address. 155 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 156 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 157 } 158#elif defined(__mips__) && defined(__LP64__) 159 // The callee save frame is pointed to by SP. 160 // | argN | | 161 // | ... | | 162 // | arg4 | | 163 // | arg3 spill | | Caller's frame 164 // | arg2 spill | | 165 // | arg1 spill | | 166 // | Method* | --- 167 // | RA | 168 // | ... | callee saves 169 // | A7 | arg7 170 // | A6 | arg6 171 // | A5 | arg5 172 // | A4 | arg4 173 // | A3 | arg3 174 // | A2 | arg2 175 // | A1 | arg1 176 // | F19 | f_arg7 177 // | F18 | f_arg6 178 // | F17 | f_arg5 179 // | F16 | f_arg4 180 // | F15 | f_arg3 181 // | F14 | f_arg2 182 // | F13 | f_arg1 183 // | F12 | f_arg0 184 // | | padding 185 // | A0/Method* | <- sp 186 // NOTE: for Mip64, when A0 is skipped, F0 is also skipped. 187 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 188 static constexpr bool kAlignPairRegister = false; 189 static constexpr bool kQuickSoftFloatAbi = false; 190 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 191 static constexpr bool kQuickSkipOddFpRegisters = false; 192 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 193 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 194 static constexpr bool kGprFprLockstep = true; 195 196 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F1). 197 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 198 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 199 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 200 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 201 } 202#elif defined(__i386__) 203 // The callee save frame is pointed to by SP. 204 // | argN | | 205 // | ... | | 206 // | arg4 | | 207 // | arg3 spill | | Caller's frame 208 // | arg2 spill | | 209 // | arg1 spill | | 210 // | Method* | --- 211 // | Return | 212 // | EBP,ESI,EDI | callee saves 213 // | EBX | arg3 214 // | EDX | arg2 215 // | ECX | arg1 216 // | XMM3 | float arg 4 217 // | XMM2 | float arg 3 218 // | XMM1 | float arg 2 219 // | XMM0 | float arg 1 220 // | EAX/Method* | <- sp 221 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 222 static constexpr bool kAlignPairRegister = false; 223 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 224 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 225 static constexpr bool kQuickSkipOddFpRegisters = false; 226 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 227 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 228 static constexpr bool kGprFprLockstep = false; 229 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 230 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 231 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 232 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 233 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 234 } 235#elif defined(__x86_64__) 236 // The callee save frame is pointed to by SP. 237 // | argN | | 238 // | ... | | 239 // | reg. arg spills | | Caller's frame 240 // | Method* | --- 241 // | Return | 242 // | R15 | callee save 243 // | R14 | callee save 244 // | R13 | callee save 245 // | R12 | callee save 246 // | R9 | arg5 247 // | R8 | arg4 248 // | RSI/R6 | arg1 249 // | RBP/R5 | callee save 250 // | RBX/R3 | callee save 251 // | RDX/R2 | arg2 252 // | RCX/R1 | arg3 253 // | XMM7 | float arg 8 254 // | XMM6 | float arg 7 255 // | XMM5 | float arg 6 256 // | XMM4 | float arg 5 257 // | XMM3 | float arg 4 258 // | XMM2 | float arg 3 259 // | XMM1 | float arg 2 260 // | XMM0 | float arg 1 261 // | Padding | 262 // | RDI/Method* | <- sp 263 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 264 static constexpr bool kAlignPairRegister = false; 265 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 266 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 267 static constexpr bool kQuickSkipOddFpRegisters = false; 268 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 269 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 270 static constexpr bool kGprFprLockstep = false; 271 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 272 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 273 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 274 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 275 switch (gpr_index) { 276 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 277 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 278 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 279 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 280 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 281 default: 282 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 283 return 0; 284 } 285 } 286#else 287#error "Unsupported architecture" 288#endif 289 290 public: 291 // Special handling for proxy methods. Proxy methods are instance methods so the 292 // 'this' object is the 1st argument. They also have the same frame layout as the 293 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 294 // 1st GPR. 295 static mirror::Object* GetProxyThisObject(ArtMethod** sp) 296 SHARED_REQUIRES(Locks::mutator_lock_) { 297 CHECK((*sp)->IsProxyMethod()); 298 CHECK_GT(kNumQuickGprArgs, 0u); 299 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 300 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 301 GprIndexToGprOffset(kThisGprIndex); 302 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 303 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); 304 } 305 306 static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { 307 DCHECK((*sp)->IsCalleeSaveMethod()); 308 return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs); 309 } 310 311 static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { 312 DCHECK((*sp)->IsCalleeSaveMethod()); 313 uint8_t* previous_sp = 314 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 315 return *reinterpret_cast<ArtMethod**>(previous_sp); 316 } 317 318 static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { 319 DCHECK((*sp)->IsCalleeSaveMethod()); 320 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); 321 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 322 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 323 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 324 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 325 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 326 327 if (current_code->IsOptimized()) { 328 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 329 StackMapEncoding encoding = code_info.ExtractEncoding(); 330 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding); 331 DCHECK(stack_map.IsValid()); 332 if (stack_map.HasInlineInfo(encoding)) { 333 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 334 return inline_info.GetDexPcAtDepth(inline_info.GetDepth() - 1); 335 } else { 336 return stack_map.GetDexPc(encoding); 337 } 338 } else { 339 return current_code->ToDexPc(*caller_sp, outer_pc); 340 } 341 } 342 343 // For the given quick ref and args quick frame, return the caller's PC. 344 static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { 345 DCHECK((*sp)->IsCalleeSaveMethod()); 346 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 347 return *reinterpret_cast<uintptr_t*>(lr); 348 } 349 350 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 351 uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) : 352 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 353 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 354 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 355 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 356 + sizeof(ArtMethod*)), // Skip ArtMethod*. 357 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 358 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 359 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 360 "Number of Quick FPR arguments unexpected"); 361 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 362 "Double alignment unexpected"); 363 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 364 // next register is even. 365 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 366 "Number of Quick FPR arguments not even"); 367 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); 368 } 369 370 virtual ~QuickArgumentVisitor() {} 371 372 virtual void Visit() = 0; 373 374 Primitive::Type GetParamPrimitiveType() const { 375 return cur_type_; 376 } 377 378 uint8_t* GetParamAddress() const { 379 if (!kQuickSoftFloatAbi) { 380 Primitive::Type type = GetParamPrimitiveType(); 381 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 382 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 383 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 384 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 385 } 386 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 387 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 388 } 389 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 390 } 391 } 392 if (gpr_index_ < kNumQuickGprArgs) { 393 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 394 } 395 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 396 } 397 398 bool IsSplitLongOrDouble() const { 399 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 400 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 401 return is_split_long_or_double_; 402 } else { 403 return false; // An optimization for when GPR and FPRs are 64bit. 404 } 405 } 406 407 bool IsParamAReference() const { 408 return GetParamPrimitiveType() == Primitive::kPrimNot; 409 } 410 411 bool IsParamALongOrDouble() const { 412 Primitive::Type type = GetParamPrimitiveType(); 413 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 414 } 415 416 uint64_t ReadSplitLongParam() const { 417 // The splitted long is always available through the stack. 418 return *reinterpret_cast<uint64_t*>(stack_args_ 419 + stack_index_ * kBytesStackArgLocation); 420 } 421 422 void IncGprIndex() { 423 gpr_index_++; 424 if (kGprFprLockstep) { 425 fpr_index_++; 426 } 427 } 428 429 void IncFprIndex() { 430 fpr_index_++; 431 if (kGprFprLockstep) { 432 gpr_index_++; 433 } 434 } 435 436 void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) { 437 // (a) 'stack_args_' should point to the first method's argument 438 // (b) whatever the argument type it is, the 'stack_index_' should 439 // be moved forward along with every visiting. 440 gpr_index_ = 0; 441 fpr_index_ = 0; 442 if (kQuickDoubleRegAlignedFloatBackFilled) { 443 fpr_double_index_ = 0; 444 } 445 stack_index_ = 0; 446 if (!is_static_) { // Handle this. 447 cur_type_ = Primitive::kPrimNot; 448 is_split_long_or_double_ = false; 449 Visit(); 450 stack_index_++; 451 if (kNumQuickGprArgs > 0) { 452 IncGprIndex(); 453 } 454 } 455 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 456 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 457 switch (cur_type_) { 458 case Primitive::kPrimNot: 459 case Primitive::kPrimBoolean: 460 case Primitive::kPrimByte: 461 case Primitive::kPrimChar: 462 case Primitive::kPrimShort: 463 case Primitive::kPrimInt: 464 is_split_long_or_double_ = false; 465 Visit(); 466 stack_index_++; 467 if (gpr_index_ < kNumQuickGprArgs) { 468 IncGprIndex(); 469 } 470 break; 471 case Primitive::kPrimFloat: 472 is_split_long_or_double_ = false; 473 Visit(); 474 stack_index_++; 475 if (kQuickSoftFloatAbi) { 476 if (gpr_index_ < kNumQuickGprArgs) { 477 IncGprIndex(); 478 } 479 } else { 480 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 481 IncFprIndex(); 482 if (kQuickDoubleRegAlignedFloatBackFilled) { 483 // Double should not overlap with float. 484 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 485 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 486 // Float should not overlap with double. 487 if (fpr_index_ % 2 == 0) { 488 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 489 } 490 } else if (kQuickSkipOddFpRegisters) { 491 IncFprIndex(); 492 } 493 } 494 } 495 break; 496 case Primitive::kPrimDouble: 497 case Primitive::kPrimLong: 498 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 499 if (cur_type_ == Primitive::kPrimLong && kAlignPairRegister && gpr_index_ == 0) { 500 // Currently, this is only for ARM and MIPS, where the first available parameter 501 // register is R1 (on ARM) or A1 (on MIPS). So we skip it, and use R2 (on ARM) or 502 // A2 (on MIPS) instead. 503 IncGprIndex(); 504 } 505 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 506 ((gpr_index_ + 1) == kNumQuickGprArgs); 507 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 508 // We don't want to split this. Pass over this register. 509 gpr_index_++; 510 is_split_long_or_double_ = false; 511 } 512 Visit(); 513 if (kBytesStackArgLocation == 4) { 514 stack_index_+= 2; 515 } else { 516 CHECK_EQ(kBytesStackArgLocation, 8U); 517 stack_index_++; 518 } 519 if (gpr_index_ < kNumQuickGprArgs) { 520 IncGprIndex(); 521 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 522 if (gpr_index_ < kNumQuickGprArgs) { 523 IncGprIndex(); 524 } 525 } 526 } 527 } else { 528 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 529 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 530 Visit(); 531 if (kBytesStackArgLocation == 4) { 532 stack_index_+= 2; 533 } else { 534 CHECK_EQ(kBytesStackArgLocation, 8U); 535 stack_index_++; 536 } 537 if (kQuickDoubleRegAlignedFloatBackFilled) { 538 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 539 fpr_double_index_ += 2; 540 // Float should not overlap with double. 541 if (fpr_index_ % 2 == 0) { 542 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 543 } 544 } 545 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 546 IncFprIndex(); 547 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 548 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 549 IncFprIndex(); 550 } 551 } 552 } 553 } 554 break; 555 default: 556 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 557 } 558 } 559 } 560 561 protected: 562 const bool is_static_; 563 const char* const shorty_; 564 const uint32_t shorty_len_; 565 566 private: 567 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 568 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 569 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 570 uint32_t gpr_index_; // Index into spilled GPRs. 571 // Index into spilled FPRs. 572 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 573 // holds a higher register number. 574 uint32_t fpr_index_; 575 // Index into spilled FPRs for aligned double. 576 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 577 // terms of singles, may be behind fpr_index. 578 uint32_t fpr_double_index_; 579 uint32_t stack_index_; // Index into arguments on the stack. 580 // The current type of argument during VisitArguments. 581 Primitive::Type cur_type_; 582 // Does a 64bit parameter straddle the register and stack arguments? 583 bool is_split_long_or_double_; 584}; 585 586// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 587// allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 588extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 589 SHARED_REQUIRES(Locks::mutator_lock_) { 590 return QuickArgumentVisitor::GetProxyThisObject(sp); 591} 592 593// Visits arguments on the stack placing them into the shadow frame. 594class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 595 public: 596 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 597 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 598 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 599 600 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; 601 602 private: 603 ShadowFrame* const sf_; 604 uint32_t cur_reg_; 605 606 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 607}; 608 609void BuildQuickShadowFrameVisitor::Visit() { 610 Primitive::Type type = GetParamPrimitiveType(); 611 switch (type) { 612 case Primitive::kPrimLong: // Fall-through. 613 case Primitive::kPrimDouble: 614 if (IsSplitLongOrDouble()) { 615 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 616 } else { 617 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 618 } 619 ++cur_reg_; 620 break; 621 case Primitive::kPrimNot: { 622 StackReference<mirror::Object>* stack_ref = 623 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 624 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 625 } 626 break; 627 case Primitive::kPrimBoolean: // Fall-through. 628 case Primitive::kPrimByte: // Fall-through. 629 case Primitive::kPrimChar: // Fall-through. 630 case Primitive::kPrimShort: // Fall-through. 631 case Primitive::kPrimInt: // Fall-through. 632 case Primitive::kPrimFloat: 633 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 634 break; 635 case Primitive::kPrimVoid: 636 LOG(FATAL) << "UNREACHABLE"; 637 UNREACHABLE(); 638 } 639 ++cur_reg_; 640} 641 642extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 643 SHARED_REQUIRES(Locks::mutator_lock_) { 644 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 645 // frame. 646 ScopedQuickEntrypointChecks sqec(self); 647 648 if (method->IsAbstract()) { 649 ThrowAbstractMethodError(method); 650 return 0; 651 } 652 653 JValue tmp_value; 654 ShadowFrame* deopt_frame = self->PopStackedShadowFrame( 655 StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame, false); 656 const DexFile::CodeItem* code_item = method->GetCodeItem(); 657 DCHECK(code_item != nullptr) << PrettyMethod(method); 658 ManagedStack fragment; 659 660 DCHECK(!method->IsNative()) << PrettyMethod(method); 661 uint32_t shorty_len = 0; 662 auto* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*)); 663 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 664 665 JValue result; 666 667 if (deopt_frame != nullptr) { 668 // Coming from single-frame deopt. 669 670 if (kIsDebugBuild) { 671 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom 672 // of the call-stack) corresponds to the called method. 673 ShadowFrame* linked = deopt_frame; 674 while (linked->GetLink() != nullptr) { 675 linked = linked->GetLink(); 676 } 677 CHECK_EQ(method, linked->GetMethod()) << PrettyMethod(method) << " " 678 << PrettyMethod(linked->GetMethod()); 679 } 680 681 if (VLOG_IS_ON(deopt)) { 682 // Print out the stack to verify that it was a single-frame deopt. 683 LOG(INFO) << "Continue-ing from deopt. Stack is:"; 684 QuickExceptionHandler::DumpFramesWithType(self, true); 685 } 686 687 mirror::Throwable* pending_exception = nullptr; 688 self->PopDeoptimizationContext(&result, &pending_exception); 689 690 // Push a transition back into managed code onto the linked list in thread. 691 self->PushManagedStackFragment(&fragment); 692 693 // Ensure that the stack is still in order. 694 if (kIsDebugBuild) { 695 class DummyStackVisitor : public StackVisitor { 696 public: 697 explicit DummyStackVisitor(Thread* self_in) SHARED_REQUIRES(Locks::mutator_lock_) 698 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 699 700 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 701 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking 702 // logic. Just always say we want to continue. 703 return true; 704 } 705 }; 706 DummyStackVisitor dsv(self); 707 dsv.WalkStack(); 708 } 709 710 // Restore the exception that was pending before deoptimization then interpret the 711 // deoptimized frames. 712 if (pending_exception != nullptr) { 713 self->SetException(pending_exception); 714 } 715 interpreter::EnterInterpreterFromDeoptimize(self, deopt_frame, &result); 716 } else { 717 const char* old_cause = self->StartAssertNoThreadSuspension( 718 "Building interpreter shadow frame"); 719 uint16_t num_regs = code_item->registers_size_; 720 // No last shadow coming from quick. 721 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 722 CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0); 723 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 724 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 725 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 726 shadow_frame, first_arg_reg); 727 shadow_frame_builder.VisitArguments(); 728 const bool needs_initialization = 729 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 730 // Push a transition back into managed code onto the linked list in thread. 731 self->PushManagedStackFragment(&fragment); 732 self->PushShadowFrame(shadow_frame); 733 self->EndAssertNoThreadSuspension(old_cause); 734 735 if (needs_initialization) { 736 // Ensure static method's class is initialized. 737 StackHandleScope<1> hs(self); 738 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 739 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 740 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(shadow_frame->GetMethod()); 741 self->PopManagedStackFragment(fragment); 742 return 0; 743 } 744 } 745 746 result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); 747 } 748 749 // Pop transition. 750 self->PopManagedStackFragment(fragment); 751 752 // Request a stack deoptimization if needed 753 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 754 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { 755 // Push the context of the deoptimization stack so we can restore the return value and the 756 // exception before executing the deoptimized frames. 757 self->PushDeoptimizationContext(result, shorty[0] == 'L', self->GetException()); 758 759 // Set special exception to cause deoptimization. 760 self->SetException(Thread::GetDeoptimizationException()); 761 } 762 763 // No need to restore the args since the method has already been run by the interpreter. 764 return result.GetJ(); 765} 766 767// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 768// to jobjects. 769class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 770 public: 771 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 772 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 773 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 774 775 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; 776 777 void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_); 778 779 private: 780 ScopedObjectAccessUnchecked* const soa_; 781 std::vector<jvalue>* const args_; 782 // References which we must update when exiting in case the GC moved the objects. 783 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 784 785 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 786}; 787 788void BuildQuickArgumentVisitor::Visit() { 789 jvalue val; 790 Primitive::Type type = GetParamPrimitiveType(); 791 switch (type) { 792 case Primitive::kPrimNot: { 793 StackReference<mirror::Object>* stack_ref = 794 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 795 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 796 references_.push_back(std::make_pair(val.l, stack_ref)); 797 break; 798 } 799 case Primitive::kPrimLong: // Fall-through. 800 case Primitive::kPrimDouble: 801 if (IsSplitLongOrDouble()) { 802 val.j = ReadSplitLongParam(); 803 } else { 804 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 805 } 806 break; 807 case Primitive::kPrimBoolean: // Fall-through. 808 case Primitive::kPrimByte: // Fall-through. 809 case Primitive::kPrimChar: // Fall-through. 810 case Primitive::kPrimShort: // Fall-through. 811 case Primitive::kPrimInt: // Fall-through. 812 case Primitive::kPrimFloat: 813 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 814 break; 815 case Primitive::kPrimVoid: 816 LOG(FATAL) << "UNREACHABLE"; 817 UNREACHABLE(); 818 } 819 args_->push_back(val); 820} 821 822void BuildQuickArgumentVisitor::FixupReferences() { 823 // Fixup any references which may have changed. 824 for (const auto& pair : references_) { 825 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 826 soa_->Env()->DeleteLocalRef(pair.first); 827 } 828} 829 830// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 831// which is responsible for recording callee save registers. We explicitly place into jobjects the 832// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 833// field within the proxy object, which will box the primitive arguments and deal with error cases. 834extern "C" uint64_t artQuickProxyInvokeHandler( 835 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 836 SHARED_REQUIRES(Locks::mutator_lock_) { 837 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 838 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 839 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 840 const char* old_cause = 841 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 842 // Register the top of the managed stack, making stack crawlable. 843 DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method); 844 self->VerifyStack(); 845 // Start new JNI local reference state. 846 JNIEnvExt* env = self->GetJniEnv(); 847 ScopedObjectAccessUnchecked soa(env); 848 ScopedJniEnvLocalRefState env_state(env); 849 // Create local ref. copies of proxy method and the receiver. 850 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 851 852 // Placing arguments into args vector and remove the receiver. 853 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*)); 854 CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " 855 << PrettyMethod(non_proxy_method); 856 std::vector<jvalue> args; 857 uint32_t shorty_len = 0; 858 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 859 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 860 861 local_ref_visitor.VisitArguments(); 862 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 863 args.erase(args.begin()); 864 865 // Convert proxy method into expected interface method. 866 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(sizeof(void*)); 867 DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method); 868 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 869 self->EndAssertNoThreadSuspension(old_cause); 870 jobject interface_method_jobj = soa.AddLocalReference<jobject>( 871 mirror::Method::CreateFromArtMethod(soa.Self(), interface_method)); 872 873 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 874 // that performs allocations. 875 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 876 // Restore references which might have moved. 877 local_ref_visitor.FixupReferences(); 878 return result.GetJ(); 879} 880 881// Read object references held in arguments from quick frames and place in a JNI local references, 882// so they don't get garbage collected. 883class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 884 public: 885 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 886 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 887 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 888 889 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; 890 891 void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_); 892 893 private: 894 ScopedObjectAccessUnchecked* const soa_; 895 // References which we must update when exiting in case the GC moved the objects. 896 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 897 898 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 899}; 900 901void RememberForGcArgumentVisitor::Visit() { 902 if (IsParamAReference()) { 903 StackReference<mirror::Object>* stack_ref = 904 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 905 jobject reference = 906 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 907 references_.push_back(std::make_pair(reference, stack_ref)); 908 } 909} 910 911void RememberForGcArgumentVisitor::FixupReferences() { 912 // Fixup any references which may have changed. 913 for (const auto& pair : references_) { 914 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 915 soa_->Env()->DeleteLocalRef(pair.first); 916 } 917} 918 919// Lazily resolve a method for quick. Called by stub code. 920extern "C" const void* artQuickResolutionTrampoline( 921 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 922 SHARED_REQUIRES(Locks::mutator_lock_) { 923 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 924 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 925 // does not have the same stack layout as the callee-save method). 926 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 927 // Start new JNI local reference state 928 JNIEnvExt* env = self->GetJniEnv(); 929 ScopedObjectAccessUnchecked soa(env); 930 ScopedJniEnvLocalRefState env_state(env); 931 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 932 933 // Compute details about the called method (avoid GCs) 934 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 935 InvokeType invoke_type; 936 MethodReference called_method(nullptr, 0); 937 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 938 ArtMethod* caller = nullptr; 939 if (!called_method_known_on_entry) { 940 caller = QuickArgumentVisitor::GetCallingMethod(sp); 941 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 942 const DexFile::CodeItem* code; 943 called_method.dex_file = caller->GetDexFile(); 944 code = caller->GetCodeItem(); 945 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 946 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 947 Instruction::Code instr_code = instr->Opcode(); 948 bool is_range; 949 switch (instr_code) { 950 case Instruction::INVOKE_DIRECT: 951 invoke_type = kDirect; 952 is_range = false; 953 break; 954 case Instruction::INVOKE_DIRECT_RANGE: 955 invoke_type = kDirect; 956 is_range = true; 957 break; 958 case Instruction::INVOKE_STATIC: 959 invoke_type = kStatic; 960 is_range = false; 961 break; 962 case Instruction::INVOKE_STATIC_RANGE: 963 invoke_type = kStatic; 964 is_range = true; 965 break; 966 case Instruction::INVOKE_SUPER: 967 invoke_type = kSuper; 968 is_range = false; 969 break; 970 case Instruction::INVOKE_SUPER_RANGE: 971 invoke_type = kSuper; 972 is_range = true; 973 break; 974 case Instruction::INVOKE_VIRTUAL: 975 invoke_type = kVirtual; 976 is_range = false; 977 break; 978 case Instruction::INVOKE_VIRTUAL_RANGE: 979 invoke_type = kVirtual; 980 is_range = true; 981 break; 982 case Instruction::INVOKE_INTERFACE: 983 invoke_type = kInterface; 984 is_range = false; 985 break; 986 case Instruction::INVOKE_INTERFACE_RANGE: 987 invoke_type = kInterface; 988 is_range = true; 989 break; 990 default: 991 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr); 992 UNREACHABLE(); 993 } 994 called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 995 } else { 996 invoke_type = kStatic; 997 called_method.dex_file = called->GetDexFile(); 998 called_method.dex_method_index = called->GetDexMethodIndex(); 999 } 1000 uint32_t shorty_len; 1001 const char* shorty = 1002 called_method.dex_file->GetMethodShorty( 1003 called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len); 1004 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 1005 visitor.VisitArguments(); 1006 self->EndAssertNoThreadSuspension(old_cause); 1007 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 1008 // Resolve method filling in dex cache. 1009 if (!called_method_known_on_entry) { 1010 StackHandleScope<1> hs(self); 1011 mirror::Object* dummy = nullptr; 1012 HandleWrapper<mirror::Object> h_receiver( 1013 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 1014 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1015 called = linker->ResolveMethod(self, called_method.dex_method_index, caller, invoke_type); 1016 } 1017 const void* code = nullptr; 1018 if (LIKELY(!self->IsExceptionPending())) { 1019 // Incompatible class change should have been handled in resolve method. 1020 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 1021 << PrettyMethod(called) << " " << invoke_type; 1022 if (virtual_or_interface) { 1023 // Refine called method based on receiver. 1024 CHECK(receiver != nullptr) << invoke_type; 1025 1026 ArtMethod* orig_called = called; 1027 if (invoke_type == kVirtual) { 1028 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, sizeof(void*)); 1029 } else { 1030 called = receiver->GetClass()->FindVirtualMethodForInterface(called, sizeof(void*)); 1031 } 1032 1033 CHECK(called != nullptr) << PrettyMethod(orig_called) << " " 1034 << PrettyTypeOf(receiver) << " " 1035 << invoke_type << " " << orig_called->GetVtableIndex(); 1036 1037 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 1038 // of the sharpened method avoiding dirtying the dex cache if possible. 1039 // Note, called_method.dex_method_index references the dex method before the 1040 // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares 1041 // about the name and signature. 1042 uint32_t update_dex_cache_method_index = called->GetDexMethodIndex(); 1043 if (!called->HasSameDexCacheResolvedMethods(caller, sizeof(void*))) { 1044 // Calling from one dex file to another, need to compute the method index appropriate to 1045 // the caller's dex file. Since we get here only if the original called was a runtime 1046 // method, we've got the correct dex_file and a dex_method_idx from above. 1047 DCHECK(!called_method_known_on_entry); 1048 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1049 const DexFile* caller_dex_file = called_method.dex_file; 1050 uint32_t caller_method_name_and_sig_index = called_method.dex_method_index; 1051 update_dex_cache_method_index = 1052 called->FindDexMethodIndexInOtherDexFile(*caller_dex_file, 1053 caller_method_name_and_sig_index); 1054 } 1055 if ((update_dex_cache_method_index != DexFile::kDexNoIndex) && 1056 (caller->GetDexCacheResolvedMethod( 1057 update_dex_cache_method_index, sizeof(void*)) != called)) { 1058 caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called, sizeof(void*)); 1059 } 1060 } else if (invoke_type == kStatic) { 1061 const auto called_dex_method_idx = called->GetDexMethodIndex(); 1062 // For static invokes, we may dispatch to the static method in the superclass but resolve 1063 // using the subclass. To prevent getting slow paths on each invoke, we force set the 1064 // resolved method for the super class dex method index if we are in the same dex file. 1065 // b/19175856 1066 if (called->GetDexFile() == called_method.dex_file && 1067 called_method.dex_method_index != called_dex_method_idx) { 1068 called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called, sizeof(void*)); 1069 } 1070 } 1071 1072 // Ensure that the called method's class is initialized. 1073 StackHandleScope<1> hs(soa.Self()); 1074 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 1075 linker->EnsureInitialized(soa.Self(), called_class, true, true); 1076 if (LIKELY(called_class->IsInitialized())) { 1077 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1078 // If we are single-stepping or the called method is deoptimized (by a 1079 // breakpoint, for example), then we have to execute the called method 1080 // with the interpreter. 1081 code = GetQuickToInterpreterBridge(); 1082 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1083 // If the caller is deoptimized (by a breakpoint, for example), we have to 1084 // continue its execution with interpreter when returning from the called 1085 // method. Because we do not want to execute the called method with the 1086 // interpreter, we wrap its execution into the instrumentation stubs. 1087 // When the called method returns, it will execute the instrumentation 1088 // exit hook that will determine the need of the interpreter with a call 1089 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1090 // it is needed. 1091 code = GetQuickInstrumentationEntryPoint(); 1092 } else { 1093 code = called->GetEntryPointFromQuickCompiledCode(); 1094 } 1095 } else if (called_class->IsInitializing()) { 1096 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1097 // If we are single-stepping or the called method is deoptimized (by a 1098 // breakpoint, for example), then we have to execute the called method 1099 // with the interpreter. 1100 code = GetQuickToInterpreterBridge(); 1101 } else if (invoke_type == kStatic) { 1102 // Class is still initializing, go to oat and grab code (trampoline must be left in place 1103 // until class is initialized to stop races between threads). 1104 code = linker->GetQuickOatCodeFor(called); 1105 } else { 1106 // No trampoline for non-static methods. 1107 code = called->GetEntryPointFromQuickCompiledCode(); 1108 } 1109 } else { 1110 DCHECK(called_class->IsErroneous()); 1111 } 1112 } 1113 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1114 // Fixup any locally saved objects may have moved during a GC. 1115 visitor.FixupReferences(); 1116 // Place called method in callee-save frame to be placed as first argument to quick method. 1117 *sp = called; 1118 1119 return code; 1120} 1121 1122/* 1123 * This class uses a couple of observations to unite the different calling conventions through 1124 * a few constants. 1125 * 1126 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1127 * possible alignment. 1128 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1129 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1130 * when we have to split things 1131 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1132 * and we can use Int handling directly. 1133 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1134 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1135 * extension should be compatible with Aarch64, which mandates copying the available bits 1136 * into LSB and leaving the rest unspecified. 1137 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1138 * the stack. 1139 * 6) There is only little endian. 1140 * 1141 * 1142 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1143 * follows: 1144 * 1145 * void PushGpr(uintptr_t): Add a value for the next GPR 1146 * 1147 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1148 * padding, that is, think the architecture is 32b and aligns 64b. 1149 * 1150 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1151 * split this if necessary. The current state will have aligned, if 1152 * necessary. 1153 * 1154 * void PushStack(uintptr_t): Push a value to the stack. 1155 * 1156 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1157 * as this might be important for null initialization. 1158 * Must return the jobject, that is, the reference to the 1159 * entry in the HandleScope (nullptr if necessary). 1160 * 1161 */ 1162template<class T> class BuildNativeCallFrameStateMachine { 1163 public: 1164#if defined(__arm__) 1165 // TODO: These are all dummy values! 1166 static constexpr bool kNativeSoftFloatAbi = true; 1167 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1168 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1169 1170 static constexpr size_t kRegistersNeededForLong = 2; 1171 static constexpr size_t kRegistersNeededForDouble = 2; 1172 static constexpr bool kMultiRegistersAligned = true; 1173 static constexpr bool kMultiFPRegistersWidened = false; 1174 static constexpr bool kMultiGPRegistersWidened = false; 1175 static constexpr bool kAlignLongOnStack = true; 1176 static constexpr bool kAlignDoubleOnStack = true; 1177#elif defined(__aarch64__) 1178 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1179 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1180 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1181 1182 static constexpr size_t kRegistersNeededForLong = 1; 1183 static constexpr size_t kRegistersNeededForDouble = 1; 1184 static constexpr bool kMultiRegistersAligned = false; 1185 static constexpr bool kMultiFPRegistersWidened = false; 1186 static constexpr bool kMultiGPRegistersWidened = false; 1187 static constexpr bool kAlignLongOnStack = false; 1188 static constexpr bool kAlignDoubleOnStack = false; 1189#elif defined(__mips__) && !defined(__LP64__) 1190 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1191 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1192 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1193 1194 static constexpr size_t kRegistersNeededForLong = 2; 1195 static constexpr size_t kRegistersNeededForDouble = 2; 1196 static constexpr bool kMultiRegistersAligned = true; 1197 static constexpr bool kMultiFPRegistersWidened = true; 1198 static constexpr bool kMultiGPRegistersWidened = false; 1199 static constexpr bool kAlignLongOnStack = true; 1200 static constexpr bool kAlignDoubleOnStack = true; 1201#elif defined(__mips__) && defined(__LP64__) 1202 // Let the code prepare GPRs only and we will load the FPRs with same data. 1203 static constexpr bool kNativeSoftFloatAbi = true; 1204 static constexpr size_t kNumNativeGprArgs = 8; 1205 static constexpr size_t kNumNativeFprArgs = 0; 1206 1207 static constexpr size_t kRegistersNeededForLong = 1; 1208 static constexpr size_t kRegistersNeededForDouble = 1; 1209 static constexpr bool kMultiRegistersAligned = false; 1210 static constexpr bool kMultiFPRegistersWidened = false; 1211 static constexpr bool kMultiGPRegistersWidened = true; 1212 static constexpr bool kAlignLongOnStack = false; 1213 static constexpr bool kAlignDoubleOnStack = false; 1214#elif defined(__i386__) 1215 // TODO: Check these! 1216 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1217 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1218 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1219 1220 static constexpr size_t kRegistersNeededForLong = 2; 1221 static constexpr size_t kRegistersNeededForDouble = 2; 1222 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1223 static constexpr bool kMultiFPRegistersWidened = false; 1224 static constexpr bool kMultiGPRegistersWidened = false; 1225 static constexpr bool kAlignLongOnStack = false; 1226 static constexpr bool kAlignDoubleOnStack = false; 1227#elif defined(__x86_64__) 1228 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1229 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1230 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1231 1232 static constexpr size_t kRegistersNeededForLong = 1; 1233 static constexpr size_t kRegistersNeededForDouble = 1; 1234 static constexpr bool kMultiRegistersAligned = false; 1235 static constexpr bool kMultiFPRegistersWidened = false; 1236 static constexpr bool kMultiGPRegistersWidened = false; 1237 static constexpr bool kAlignLongOnStack = false; 1238 static constexpr bool kAlignDoubleOnStack = false; 1239#else 1240#error "Unsupported architecture" 1241#endif 1242 1243 public: 1244 explicit BuildNativeCallFrameStateMachine(T* delegate) 1245 : gpr_index_(kNumNativeGprArgs), 1246 fpr_index_(kNumNativeFprArgs), 1247 stack_entries_(0), 1248 delegate_(delegate) { 1249 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1250 // the next register is even; counting down is just to make the compiler happy... 1251 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1252 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1253 } 1254 1255 virtual ~BuildNativeCallFrameStateMachine() {} 1256 1257 bool HavePointerGpr() const { 1258 return gpr_index_ > 0; 1259 } 1260 1261 void AdvancePointer(const void* val) { 1262 if (HavePointerGpr()) { 1263 gpr_index_--; 1264 PushGpr(reinterpret_cast<uintptr_t>(val)); 1265 } else { 1266 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1267 PushStack(reinterpret_cast<uintptr_t>(val)); 1268 gpr_index_ = 0; 1269 } 1270 } 1271 1272 bool HaveHandleScopeGpr() const { 1273 return gpr_index_ > 0; 1274 } 1275 1276 void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) { 1277 uintptr_t handle = PushHandle(ptr); 1278 if (HaveHandleScopeGpr()) { 1279 gpr_index_--; 1280 PushGpr(handle); 1281 } else { 1282 stack_entries_++; 1283 PushStack(handle); 1284 gpr_index_ = 0; 1285 } 1286 } 1287 1288 bool HaveIntGpr() const { 1289 return gpr_index_ > 0; 1290 } 1291 1292 void AdvanceInt(uint32_t val) { 1293 if (HaveIntGpr()) { 1294 gpr_index_--; 1295 if (kMultiGPRegistersWidened) { 1296 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1297 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1298 } else { 1299 PushGpr(val); 1300 } 1301 } else { 1302 stack_entries_++; 1303 if (kMultiGPRegistersWidened) { 1304 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1305 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1306 } else { 1307 PushStack(val); 1308 } 1309 gpr_index_ = 0; 1310 } 1311 } 1312 1313 bool HaveLongGpr() const { 1314 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1315 } 1316 1317 bool LongGprNeedsPadding() const { 1318 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1319 kAlignLongOnStack && // and when it needs alignment 1320 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1321 } 1322 1323 bool LongStackNeedsPadding() const { 1324 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1325 kAlignLongOnStack && // and when it needs 8B alignment 1326 (stack_entries_ & 1) == 1; // counter is odd 1327 } 1328 1329 void AdvanceLong(uint64_t val) { 1330 if (HaveLongGpr()) { 1331 if (LongGprNeedsPadding()) { 1332 PushGpr(0); 1333 gpr_index_--; 1334 } 1335 if (kRegistersNeededForLong == 1) { 1336 PushGpr(static_cast<uintptr_t>(val)); 1337 } else { 1338 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1339 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1340 } 1341 gpr_index_ -= kRegistersNeededForLong; 1342 } else { 1343 if (LongStackNeedsPadding()) { 1344 PushStack(0); 1345 stack_entries_++; 1346 } 1347 if (kRegistersNeededForLong == 1) { 1348 PushStack(static_cast<uintptr_t>(val)); 1349 stack_entries_++; 1350 } else { 1351 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1352 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1353 stack_entries_ += 2; 1354 } 1355 gpr_index_ = 0; 1356 } 1357 } 1358 1359 bool HaveFloatFpr() const { 1360 return fpr_index_ > 0; 1361 } 1362 1363 void AdvanceFloat(float val) { 1364 if (kNativeSoftFloatAbi) { 1365 AdvanceInt(bit_cast<uint32_t, float>(val)); 1366 } else { 1367 if (HaveFloatFpr()) { 1368 fpr_index_--; 1369 if (kRegistersNeededForDouble == 1) { 1370 if (kMultiFPRegistersWidened) { 1371 PushFpr8(bit_cast<uint64_t, double>(val)); 1372 } else { 1373 // No widening, just use the bits. 1374 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1375 } 1376 } else { 1377 PushFpr4(val); 1378 } 1379 } else { 1380 stack_entries_++; 1381 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1382 // Need to widen before storing: Note the "double" in the template instantiation. 1383 // Note: We need to jump through those hoops to make the compiler happy. 1384 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1385 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1386 } else { 1387 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1388 } 1389 fpr_index_ = 0; 1390 } 1391 } 1392 } 1393 1394 bool HaveDoubleFpr() const { 1395 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1396 } 1397 1398 bool DoubleFprNeedsPadding() const { 1399 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1400 kAlignDoubleOnStack && // and when it needs alignment 1401 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1402 } 1403 1404 bool DoubleStackNeedsPadding() const { 1405 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1406 kAlignDoubleOnStack && // and when it needs 8B alignment 1407 (stack_entries_ & 1) == 1; // counter is odd 1408 } 1409 1410 void AdvanceDouble(uint64_t val) { 1411 if (kNativeSoftFloatAbi) { 1412 AdvanceLong(val); 1413 } else { 1414 if (HaveDoubleFpr()) { 1415 if (DoubleFprNeedsPadding()) { 1416 PushFpr4(0); 1417 fpr_index_--; 1418 } 1419 PushFpr8(val); 1420 fpr_index_ -= kRegistersNeededForDouble; 1421 } else { 1422 if (DoubleStackNeedsPadding()) { 1423 PushStack(0); 1424 stack_entries_++; 1425 } 1426 if (kRegistersNeededForDouble == 1) { 1427 PushStack(static_cast<uintptr_t>(val)); 1428 stack_entries_++; 1429 } else { 1430 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1431 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1432 stack_entries_ += 2; 1433 } 1434 fpr_index_ = 0; 1435 } 1436 } 1437 } 1438 1439 uint32_t GetStackEntries() const { 1440 return stack_entries_; 1441 } 1442 1443 uint32_t GetNumberOfUsedGprs() const { 1444 return kNumNativeGprArgs - gpr_index_; 1445 } 1446 1447 uint32_t GetNumberOfUsedFprs() const { 1448 return kNumNativeFprArgs - fpr_index_; 1449 } 1450 1451 private: 1452 void PushGpr(uintptr_t val) { 1453 delegate_->PushGpr(val); 1454 } 1455 void PushFpr4(float val) { 1456 delegate_->PushFpr4(val); 1457 } 1458 void PushFpr8(uint64_t val) { 1459 delegate_->PushFpr8(val); 1460 } 1461 void PushStack(uintptr_t val) { 1462 delegate_->PushStack(val); 1463 } 1464 uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) { 1465 return delegate_->PushHandle(ref); 1466 } 1467 1468 uint32_t gpr_index_; // Number of free GPRs 1469 uint32_t fpr_index_; // Number of free FPRs 1470 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1471 // extended 1472 T* const delegate_; // What Push implementation gets called 1473}; 1474 1475// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1476// in subclasses. 1477// 1478// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1479// them with handles. 1480class ComputeNativeCallFrameSize { 1481 public: 1482 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1483 1484 virtual ~ComputeNativeCallFrameSize() {} 1485 1486 uint32_t GetStackSize() const { 1487 return num_stack_entries_ * sizeof(uintptr_t); 1488 } 1489 1490 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1491 sp8 -= GetStackSize(); 1492 // Align by kStackAlignment. 1493 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1494 return sp8; 1495 } 1496 1497 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1498 const { 1499 // Assumption is OK right now, as we have soft-float arm 1500 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1501 sp8 -= fregs * sizeof(uintptr_t); 1502 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1503 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1504 sp8 -= iregs * sizeof(uintptr_t); 1505 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1506 return sp8; 1507 } 1508 1509 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1510 uint32_t** start_fpr) const { 1511 // Native call stack. 1512 sp8 = LayoutCallStack(sp8); 1513 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1514 1515 // Put fprs and gprs below. 1516 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1517 1518 // Return the new bottom. 1519 return sp8; 1520 } 1521 1522 virtual void WalkHeader( 1523 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED) 1524 SHARED_REQUIRES(Locks::mutator_lock_) { 1525 } 1526 1527 void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) { 1528 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1529 1530 WalkHeader(&sm); 1531 1532 for (uint32_t i = 1; i < shorty_len; ++i) { 1533 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1534 switch (cur_type_) { 1535 case Primitive::kPrimNot: 1536 // TODO: fix abuse of mirror types. 1537 sm.AdvanceHandleScope( 1538 reinterpret_cast<mirror::Object*>(0x12345678)); 1539 break; 1540 1541 case Primitive::kPrimBoolean: 1542 case Primitive::kPrimByte: 1543 case Primitive::kPrimChar: 1544 case Primitive::kPrimShort: 1545 case Primitive::kPrimInt: 1546 sm.AdvanceInt(0); 1547 break; 1548 case Primitive::kPrimFloat: 1549 sm.AdvanceFloat(0); 1550 break; 1551 case Primitive::kPrimDouble: 1552 sm.AdvanceDouble(0); 1553 break; 1554 case Primitive::kPrimLong: 1555 sm.AdvanceLong(0); 1556 break; 1557 default: 1558 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1559 UNREACHABLE(); 1560 } 1561 } 1562 1563 num_stack_entries_ = sm.GetStackEntries(); 1564 } 1565 1566 void PushGpr(uintptr_t /* val */) { 1567 // not optimizing registers, yet 1568 } 1569 1570 void PushFpr4(float /* val */) { 1571 // not optimizing registers, yet 1572 } 1573 1574 void PushFpr8(uint64_t /* val */) { 1575 // not optimizing registers, yet 1576 } 1577 1578 void PushStack(uintptr_t /* val */) { 1579 // counting is already done in the superclass 1580 } 1581 1582 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1583 return reinterpret_cast<uintptr_t>(nullptr); 1584 } 1585 1586 protected: 1587 uint32_t num_stack_entries_; 1588}; 1589 1590class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1591 public: 1592 ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {} 1593 1594 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1595 // is at *m = sp. Will update to point to the bottom of the save frame. 1596 // 1597 // Note: assumes ComputeAll() has been run before. 1598 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1599 SHARED_REQUIRES(Locks::mutator_lock_) { 1600 ArtMethod* method = **m; 1601 1602 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); 1603 1604 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1605 1606 // First, fix up the layout of the callee-save frame. 1607 // We have to squeeze in the HandleScope, and relocate the method pointer. 1608 1609 // "Free" the slot for the method. 1610 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 1611 1612 // Under the callee saves put handle scope and new method stack reference. 1613 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1614 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 1615 1616 sp8 -= scope_and_method; 1617 // Align by kStackAlignment. 1618 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1619 1620 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 1621 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 1622 num_handle_scope_references_); 1623 1624 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1625 uint8_t* method_pointer = sp8; 1626 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 1627 *new_method_ref = method; 1628 *m = new_method_ref; 1629 } 1630 1631 // Adds space for the cookie. Note: may leave stack unaligned. 1632 void LayoutCookie(uint8_t** sp) const { 1633 // Reference cookie and padding 1634 *sp -= 8; 1635 } 1636 1637 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1638 // Returns the new bottom. Note: this may be unaligned. 1639 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1640 SHARED_REQUIRES(Locks::mutator_lock_) { 1641 // First, fix up the layout of the callee-save frame. 1642 // We have to squeeze in the HandleScope, and relocate the method pointer. 1643 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 1644 1645 // The bottom of the callee-save frame is now where the method is, *m. 1646 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1647 1648 // Add space for cookie. 1649 LayoutCookie(&sp8); 1650 1651 return sp8; 1652 } 1653 1654 // WARNING: After this, *sp won't be pointing to the method anymore! 1655 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 1656 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 1657 uint32_t** start_fpr) 1658 SHARED_REQUIRES(Locks::mutator_lock_) { 1659 Walk(shorty, shorty_len); 1660 1661 // JNI part. 1662 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 1663 1664 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1665 1666 // Return the new bottom. 1667 return sp8; 1668 } 1669 1670 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1671 1672 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1673 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1674 SHARED_REQUIRES(Locks::mutator_lock_); 1675 1676 private: 1677 uint32_t num_handle_scope_references_; 1678}; 1679 1680uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1681 num_handle_scope_references_++; 1682 return reinterpret_cast<uintptr_t>(nullptr); 1683} 1684 1685void ComputeGenericJniFrameSize::WalkHeader( 1686 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1687 // JNIEnv 1688 sm->AdvancePointer(nullptr); 1689 1690 // Class object or this as first argument 1691 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1692} 1693 1694// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1695// the template requirements of BuildGenericJniFrameStateMachine. 1696class FillNativeCall { 1697 public: 1698 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1699 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1700 1701 virtual ~FillNativeCall() {} 1702 1703 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1704 cur_gpr_reg_ = gpr_regs; 1705 cur_fpr_reg_ = fpr_regs; 1706 cur_stack_arg_ = stack_args; 1707 } 1708 1709 void PushGpr(uintptr_t val) { 1710 *cur_gpr_reg_ = val; 1711 cur_gpr_reg_++; 1712 } 1713 1714 void PushFpr4(float val) { 1715 *cur_fpr_reg_ = val; 1716 cur_fpr_reg_++; 1717 } 1718 1719 void PushFpr8(uint64_t val) { 1720 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1721 *tmp = val; 1722 cur_fpr_reg_ += 2; 1723 } 1724 1725 void PushStack(uintptr_t val) { 1726 *cur_stack_arg_ = val; 1727 cur_stack_arg_++; 1728 } 1729 1730 virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) { 1731 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1732 UNREACHABLE(); 1733 } 1734 1735 private: 1736 uintptr_t* cur_gpr_reg_; 1737 uint32_t* cur_fpr_reg_; 1738 uintptr_t* cur_stack_arg_; 1739}; 1740 1741// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1742// of transitioning into native code. 1743class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1744 public: 1745 BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len, 1746 ArtMethod*** sp) 1747 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1748 jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) { 1749 ComputeGenericJniFrameSize fsc; 1750 uintptr_t* start_gpr_reg; 1751 uint32_t* start_fpr_reg; 1752 uintptr_t* start_stack_arg; 1753 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 1754 &handle_scope_, 1755 &start_stack_arg, 1756 &start_gpr_reg, &start_fpr_reg); 1757 1758 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1759 1760 // jni environment is always first argument 1761 sm_.AdvancePointer(self->GetJniEnv()); 1762 1763 if (is_static) { 1764 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 1765 } 1766 } 1767 1768 void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; 1769 1770 void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); 1771 1772 StackReference<mirror::Object>* GetFirstHandleScopeEntry() 1773 SHARED_REQUIRES(Locks::mutator_lock_) { 1774 return handle_scope_->GetHandle(0).GetReference(); 1775 } 1776 1777 jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) { 1778 return handle_scope_->GetHandle(0).ToJObject(); 1779 } 1780 1781 void* GetBottomOfUsedArea() const { 1782 return bottom_of_used_area_; 1783 } 1784 1785 private: 1786 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 1787 class FillJniCall FINAL : public FillNativeCall { 1788 public: 1789 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 1790 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args), 1791 handle_scope_(handle_scope), cur_entry_(0) {} 1792 1793 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); 1794 1795 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 1796 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 1797 handle_scope_ = scope; 1798 cur_entry_ = 0U; 1799 } 1800 1801 void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) { 1802 // Initialize padding entries. 1803 size_t expected_slots = handle_scope_->NumberOfReferences(); 1804 while (cur_entry_ < expected_slots) { 1805 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 1806 } 1807 DCHECK_NE(cur_entry_, 0U); 1808 } 1809 1810 private: 1811 HandleScope* handle_scope_; 1812 size_t cur_entry_; 1813 }; 1814 1815 HandleScope* handle_scope_; 1816 FillJniCall jni_call_; 1817 void* bottom_of_used_area_; 1818 1819 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 1820 1821 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1822}; 1823 1824uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 1825 uintptr_t tmp; 1826 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 1827 h.Assign(ref); 1828 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 1829 cur_entry_++; 1830 return tmp; 1831} 1832 1833void BuildGenericJniFrameVisitor::Visit() { 1834 Primitive::Type type = GetParamPrimitiveType(); 1835 switch (type) { 1836 case Primitive::kPrimLong: { 1837 jlong long_arg; 1838 if (IsSplitLongOrDouble()) { 1839 long_arg = ReadSplitLongParam(); 1840 } else { 1841 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1842 } 1843 sm_.AdvanceLong(long_arg); 1844 break; 1845 } 1846 case Primitive::kPrimDouble: { 1847 uint64_t double_arg; 1848 if (IsSplitLongOrDouble()) { 1849 // Read into union so that we don't case to a double. 1850 double_arg = ReadSplitLongParam(); 1851 } else { 1852 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1853 } 1854 sm_.AdvanceDouble(double_arg); 1855 break; 1856 } 1857 case Primitive::kPrimNot: { 1858 StackReference<mirror::Object>* stack_ref = 1859 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1860 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 1861 break; 1862 } 1863 case Primitive::kPrimFloat: 1864 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1865 break; 1866 case Primitive::kPrimBoolean: // Fall-through. 1867 case Primitive::kPrimByte: // Fall-through. 1868 case Primitive::kPrimChar: // Fall-through. 1869 case Primitive::kPrimShort: // Fall-through. 1870 case Primitive::kPrimInt: // Fall-through. 1871 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1872 break; 1873 case Primitive::kPrimVoid: 1874 LOG(FATAL) << "UNREACHABLE"; 1875 UNREACHABLE(); 1876 } 1877} 1878 1879void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 1880 // Clear out rest of the scope. 1881 jni_call_.ResetRemainingScopeSlots(); 1882 // Install HandleScope. 1883 self->PushHandleScope(handle_scope_); 1884} 1885 1886#if defined(__arm__) || defined(__aarch64__) 1887extern "C" void* artFindNativeMethod(); 1888#else 1889extern "C" void* artFindNativeMethod(Thread* self); 1890#endif 1891 1892uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { 1893 if (lock != nullptr) { 1894 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1895 } else { 1896 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1897 } 1898} 1899 1900void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { 1901 if (lock != nullptr) { 1902 JniMethodEndSynchronized(cookie, lock, self); 1903 } else { 1904 JniMethodEnd(cookie, self); 1905 } 1906} 1907 1908/* 1909 * Initializes an alloca region assumed to be directly below sp for a native call: 1910 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 1911 * The final element on the stack is a pointer to the native code. 1912 * 1913 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1914 * We need to fix this, as the handle scope needs to go into the callee-save frame. 1915 * 1916 * The return of this function denotes: 1917 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1918 * 2) An error, if the value is negative. 1919 */ 1920extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 1921 SHARED_REQUIRES(Locks::mutator_lock_) { 1922 ArtMethod* called = *sp; 1923 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1924 uint32_t shorty_len = 0; 1925 const char* shorty = called->GetShorty(&shorty_len); 1926 1927 // Run the visitor and update sp. 1928 BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp); 1929 visitor.VisitArguments(); 1930 visitor.FinalizeHandleScope(self); 1931 1932 // Fix up managed-stack things in Thread. 1933 self->SetTopOfStack(sp); 1934 1935 self->VerifyStack(); 1936 1937 // Start JNI, save the cookie. 1938 uint32_t cookie; 1939 if (called->IsSynchronized()) { 1940 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 1941 if (self->IsExceptionPending()) { 1942 self->PopHandleScope(); 1943 // A negative value denotes an error. 1944 return GetTwoWordFailureValue(); 1945 } 1946 } else { 1947 cookie = JniMethodStart(self); 1948 } 1949 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1950 *(sp32 - 1) = cookie; 1951 1952 // Retrieve the stored native code. 1953 void* nativeCode = called->GetEntryPointFromJni(); 1954 1955 // There are two cases for the content of nativeCode: 1956 // 1) Pointer to the native function. 1957 // 2) Pointer to the trampoline for native code binding. 1958 // In the second case, we need to execute the binding and continue with the actual native function 1959 // pointer. 1960 DCHECK(nativeCode != nullptr); 1961 if (nativeCode == GetJniDlsymLookupStub()) { 1962#if defined(__arm__) || defined(__aarch64__) 1963 nativeCode = artFindNativeMethod(); 1964#else 1965 nativeCode = artFindNativeMethod(self); 1966#endif 1967 1968 if (nativeCode == nullptr) { 1969 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 1970 1971 // End JNI, as the assembly will move to deliver the exception. 1972 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 1973 if (shorty[0] == 'L') { 1974 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 1975 } else { 1976 artQuickGenericJniEndJNINonRef(self, cookie, lock); 1977 } 1978 1979 return GetTwoWordFailureValue(); 1980 } 1981 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 1982 } 1983 1984 // Return native code addr(lo) and bottom of alloca address(hi). 1985 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 1986 reinterpret_cast<uintptr_t>(nativeCode)); 1987} 1988 1989// Defined in quick_jni_entrypoints.cc. 1990extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie, 1991 jvalue result, uint64_t result_f, ArtMethod* called, 1992 HandleScope* handle_scope); 1993/* 1994 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 1995 * unlocking. 1996 */ 1997extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, 1998 jvalue result, 1999 uint64_t result_f) { 2000 // We're here just back from a native call. We don't have the shared mutator lock at this point 2001 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing 2002 // anything that requires a mutator lock before that would cause problems as GC may have the 2003 // exclusive mutator lock and may be moving objects, etc. 2004 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 2005 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 2006 ArtMethod* called = *sp; 2007 uint32_t cookie = *(sp32 - 1); 2008 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp)); 2009 return GenericJniMethodEnd(self, cookie, result, result_f, called, table); 2010} 2011 2012// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 2013// for the method pointer. 2014// 2015// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 2016// to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations). 2017 2018template<InvokeType type, bool access_check> 2019static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self, 2020 ArtMethod** sp) { 2021 ScopedQuickEntrypointChecks sqec(self); 2022 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)); 2023 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2024 ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); 2025 if (UNLIKELY(method == nullptr)) { 2026 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 2027 uint32_t shorty_len; 2028 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 2029 { 2030 // Remember the args in case a GC happens in FindMethodFromCode. 2031 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2032 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 2033 visitor.VisitArguments(); 2034 method = FindMethodFromCode<type, access_check>(method_idx, &this_object, caller_method, 2035 self); 2036 visitor.FixupReferences(); 2037 } 2038 2039 if (UNLIKELY(method == nullptr)) { 2040 CHECK(self->IsExceptionPending()); 2041 return GetTwoWordFailureValue(); // Failure. 2042 } 2043 } 2044 DCHECK(!self->IsExceptionPending()); 2045 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2046 2047 // When we return, the caller will branch to this address, so it had better not be 0! 2048 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 2049 << " location: " 2050 << method->GetDexFile()->GetLocation(); 2051 2052 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2053 reinterpret_cast<uintptr_t>(method)); 2054} 2055 2056// Explicit artInvokeCommon template function declarations to please analysis tool. 2057#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2058 template SHARED_REQUIRES(Locks::mutator_lock_) \ 2059 TwoWordReturn artInvokeCommon<type, access_check>( \ 2060 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2061 2062EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2063EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2064EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2065EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2066EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2067EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2068EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2069EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2070EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2071EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2072#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2073 2074// See comments in runtime_support_asm.S 2075extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2076 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2077 SHARED_REQUIRES(Locks::mutator_lock_) { 2078 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2079} 2080 2081extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2082 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2083 SHARED_REQUIRES(Locks::mutator_lock_) { 2084 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2085} 2086 2087extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2088 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2089 SHARED_REQUIRES(Locks::mutator_lock_) { 2090 return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp); 2091} 2092 2093extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2094 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2095 SHARED_REQUIRES(Locks::mutator_lock_) { 2096 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2097} 2098 2099extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2100 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2101 SHARED_REQUIRES(Locks::mutator_lock_) { 2102 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2103} 2104 2105// Determine target of interface dispatch. This object is known non-null. 2106extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t dex_method_idx, 2107 mirror::Object* this_object, 2108 Thread* self, ArtMethod** sp) 2109 SHARED_REQUIRES(Locks::mutator_lock_) { 2110 ScopedQuickEntrypointChecks sqec(self); 2111 // The optimizing compiler currently does not inline methods that have an interface 2112 // invocation. We use the outer method directly to avoid fetching a stack map, which is 2113 // more expensive. 2114 ArtMethod* caller_method = QuickArgumentVisitor::GetOuterMethod(sp); 2115 DCHECK_EQ(caller_method, QuickArgumentVisitor::GetCallingMethod(sp)); 2116 ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod( 2117 dex_method_idx, sizeof(void*)); 2118 DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method); 2119 ArtMethod* method; 2120 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 2121 method = this_object->GetClass()->FindVirtualMethodForInterface( 2122 interface_method, sizeof(void*)); 2123 if (UNLIKELY(method == nullptr)) { 2124 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2125 interface_method, this_object, caller_method); 2126 return GetTwoWordFailureValue(); // Failure. 2127 } 2128 } else { 2129 DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod()); 2130 if (kIsDebugBuild) { 2131 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2132 const DexFile::CodeItem* code = caller_method->GetCodeItem(); 2133 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 2134 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 2135 Instruction::Code instr_code = instr->Opcode(); 2136 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 2137 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2138 << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr); 2139 if (instr_code == Instruction::INVOKE_INTERFACE) { 2140 CHECK_EQ(dex_method_idx, instr->VRegB_35c()); 2141 } else { 2142 CHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2143 CHECK_EQ(dex_method_idx, instr->VRegB_3rc()); 2144 } 2145 } 2146 2147 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache() 2148 ->GetDexFile(); 2149 uint32_t shorty_len; 2150 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), 2151 &shorty_len); 2152 { 2153 // Remember the args in case a GC happens in FindMethodFromCode. 2154 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2155 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2156 visitor.VisitArguments(); 2157 method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, caller_method, 2158 self); 2159 visitor.FixupReferences(); 2160 } 2161 2162 if (UNLIKELY(method == nullptr)) { 2163 CHECK(self->IsExceptionPending()); 2164 return GetTwoWordFailureValue(); // Failure. 2165 } 2166 } 2167 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2168 2169 // When we return, the caller will branch to this address, so it had better not be 0! 2170 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 2171 << " location: " << method->GetDexFile()->GetLocation(); 2172 2173 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2174 reinterpret_cast<uintptr_t>(method)); 2175} 2176 2177} // namespace art 2178