quick_trampoline_entrypoints.cc revision bdf7f1c3ab65ccb70f62db5ab31dba060632d458
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "art_method-inl.h" 18#include "base/enums.h" 19#include "callee_save_frame.h" 20#include "common_throws.h" 21#include "dex_file-inl.h" 22#include "dex_instruction-inl.h" 23#include "entrypoints/entrypoint_utils-inl.h" 24#include "entrypoints/runtime_asm_entrypoints.h" 25#include "gc/accounting/card_table-inl.h" 26#include "interpreter/interpreter.h" 27#include "linear_alloc.h" 28#include "method_reference.h" 29#include "mirror/class-inl.h" 30#include "mirror/dex_cache-inl.h" 31#include "mirror/method.h" 32#include "mirror/object-inl.h" 33#include "mirror/object_array-inl.h" 34#include "oat_quick_method_header.h" 35#include "quick_exception_handler.h" 36#include "runtime.h" 37#include "scoped_thread_state_change.h" 38#include "stack.h" 39#include "debugger.h" 40 41namespace art { 42 43// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. 44class QuickArgumentVisitor { 45 // Number of bytes for each out register in the caller method's frame. 46 static constexpr size_t kBytesStackArgLocation = 4; 47 // Frame size in bytes of a callee-save frame for RefsAndArgs. 48 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize = 49 GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs); 50#if defined(__arm__) 51 // The callee save frame is pointed to by SP. 52 // | argN | | 53 // | ... | | 54 // | arg4 | | 55 // | arg3 spill | | Caller's frame 56 // | arg2 spill | | 57 // | arg1 spill | | 58 // | Method* | --- 59 // | LR | 60 // | ... | 4x6 bytes callee saves 61 // | R3 | 62 // | R2 | 63 // | R1 | 64 // | S15 | 65 // | : | 66 // | S0 | 67 // | | 4x2 bytes padding 68 // | Method* | <- sp 69 static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat; 70 static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat; 71 static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat; 72 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat; 73 static constexpr bool kQuickSkipOddFpRegisters = false; 74 static constexpr size_t kNumQuickGprArgs = 3; 75 static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16; 76 static constexpr bool kGprFprLockstep = false; 77 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 78 arm::ArmCalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first FPR arg. 79 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 arm::ArmCalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first GPR arg. 81 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 82 arm::ArmCalleeSaveLrOffset(Runtime::kSaveRefsAndArgs); // Offset of return address. 83 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 84 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 85 } 86#elif defined(__aarch64__) 87 // The callee save frame is pointed to by SP. 88 // | argN | | 89 // | ... | | 90 // | arg4 | | 91 // | arg3 spill | | Caller's frame 92 // | arg2 spill | | 93 // | arg1 spill | | 94 // | Method* | --- 95 // | LR | 96 // | X29 | 97 // | : | 98 // | X20 | 99 // | X7 | 100 // | : | 101 // | X1 | 102 // | D7 | 103 // | : | 104 // | D0 | 105 // | | padding 106 // | Method* | <- sp 107 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 108 static constexpr bool kAlignPairRegister = false; 109 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 110 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 111 static constexpr bool kQuickSkipOddFpRegisters = false; 112 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 113 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 114 static constexpr bool kGprFprLockstep = false; 115 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 116 arm64::Arm64CalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first FPR arg. 117 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 118 arm64::Arm64CalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs); // Offset of first GPR arg. 119 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 120 arm64::Arm64CalleeSaveLrOffset(Runtime::kSaveRefsAndArgs); // Offset of return address. 121 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 122 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 123 } 124#elif defined(__mips__) && !defined(__LP64__) 125 // The callee save frame is pointed to by SP. 126 // | argN | | 127 // | ... | | 128 // | arg4 | | 129 // | arg3 spill | | Caller's frame 130 // | arg2 spill | | 131 // | arg1 spill | | 132 // | Method* | --- 133 // | RA | 134 // | ... | callee saves 135 // | A3 | arg3 136 // | A2 | arg2 137 // | A1 | arg1 138 // | F15 | 139 // | F14 | f_arg1 140 // | F13 | 141 // | F12 | f_arg0 142 // | | padding 143 // | A0/Method* | <- sp 144 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 145 static constexpr bool kAlignPairRegister = true; 146 static constexpr bool kQuickSoftFloatAbi = false; 147 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 148 static constexpr bool kQuickSkipOddFpRegisters = true; 149 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 150 static constexpr size_t kNumQuickFprArgs = 4; // 2 arguments passed in FPRs. Floats can be passed 151 // only in even numbered registers and each double 152 // occupies two registers. 153 static constexpr bool kGprFprLockstep = false; 154 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 155 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 32; // Offset of first GPR arg. 156 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 76; // Offset of return address. 157 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 158 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 159 } 160#elif defined(__mips__) && defined(__LP64__) 161 // The callee save frame is pointed to by SP. 162 // | argN | | 163 // | ... | | 164 // | arg4 | | 165 // | arg3 spill | | Caller's frame 166 // | arg2 spill | | 167 // | arg1 spill | | 168 // | Method* | --- 169 // | RA | 170 // | ... | callee saves 171 // | A7 | arg7 172 // | A6 | arg6 173 // | A5 | arg5 174 // | A4 | arg4 175 // | A3 | arg3 176 // | A2 | arg2 177 // | A1 | arg1 178 // | F19 | f_arg7 179 // | F18 | f_arg6 180 // | F17 | f_arg5 181 // | F16 | f_arg4 182 // | F15 | f_arg3 183 // | F14 | f_arg2 184 // | F13 | f_arg1 185 // | F12 | f_arg0 186 // | | padding 187 // | A0/Method* | <- sp 188 // NOTE: for Mip64, when A0 is skipped, F0 is also skipped. 189 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 190 static constexpr bool kAlignPairRegister = false; 191 static constexpr bool kQuickSoftFloatAbi = false; 192 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 193 static constexpr bool kQuickSkipOddFpRegisters = false; 194 static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs. 195 static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs. 196 static constexpr bool kGprFprLockstep = true; 197 198 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F1). 199 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1). 200 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address. 201 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 202 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 203 } 204#elif defined(__i386__) 205 // The callee save frame is pointed to by SP. 206 // | argN | | 207 // | ... | | 208 // | arg4 | | 209 // | arg3 spill | | Caller's frame 210 // | arg2 spill | | 211 // | arg1 spill | | 212 // | Method* | --- 213 // | Return | 214 // | EBP,ESI,EDI | callee saves 215 // | EBX | arg3 216 // | EDX | arg2 217 // | ECX | arg1 218 // | XMM3 | float arg 4 219 // | XMM2 | float arg 3 220 // | XMM1 | float arg 2 221 // | XMM0 | float arg 1 222 // | EAX/Method* | <- sp 223 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 224 static constexpr bool kAlignPairRegister = false; 225 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 226 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 227 static constexpr bool kQuickSkipOddFpRegisters = false; 228 static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs. 229 static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs. 230 static constexpr bool kGprFprLockstep = false; 231 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg. 232 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg. 233 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address. 234 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 235 return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA); 236 } 237#elif defined(__x86_64__) 238 // The callee save frame is pointed to by SP. 239 // | argN | | 240 // | ... | | 241 // | reg. arg spills | | Caller's frame 242 // | Method* | --- 243 // | Return | 244 // | R15 | callee save 245 // | R14 | callee save 246 // | R13 | callee save 247 // | R12 | callee save 248 // | R9 | arg5 249 // | R8 | arg4 250 // | RSI/R6 | arg1 251 // | RBP/R5 | callee save 252 // | RBX/R3 | callee save 253 // | RDX/R2 | arg2 254 // | RCX/R1 | arg3 255 // | XMM7 | float arg 8 256 // | XMM6 | float arg 7 257 // | XMM5 | float arg 6 258 // | XMM4 | float arg 5 259 // | XMM3 | float arg 4 260 // | XMM2 | float arg 3 261 // | XMM1 | float arg 2 262 // | XMM0 | float arg 1 263 // | Padding | 264 // | RDI/Method* | <- sp 265 static constexpr bool kSplitPairAcrossRegisterAndStack = false; 266 static constexpr bool kAlignPairRegister = false; 267 static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI. 268 static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false; 269 static constexpr bool kQuickSkipOddFpRegisters = false; 270 static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs. 271 static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs. 272 static constexpr bool kGprFprLockstep = false; 273 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg. 274 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg. 275 static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address. 276 static size_t GprIndexToGprOffset(uint32_t gpr_index) { 277 switch (gpr_index) { 278 case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA)); 279 case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA)); 280 case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA)); 281 case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA)); 282 case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA)); 283 default: 284 LOG(FATAL) << "Unexpected GPR index: " << gpr_index; 285 return 0; 286 } 287 } 288#else 289#error "Unsupported architecture" 290#endif 291 292 public: 293 // Special handling for proxy methods. Proxy methods are instance methods so the 294 // 'this' object is the 1st argument. They also have the same frame layout as the 295 // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the 296 // 1st GPR. 297 static mirror::Object* GetProxyThisObject(ArtMethod** sp) 298 REQUIRES_SHARED(Locks::mutator_lock_) { 299 CHECK((*sp)->IsProxyMethod()); 300 CHECK_GT(kNumQuickGprArgs, 0u); 301 constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. 302 size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + 303 GprIndexToGprOffset(kThisGprIndex); 304 uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset; 305 return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); 306 } 307 308 static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 309 DCHECK((*sp)->IsCalleeSaveMethod()); 310 return GetCalleeSaveMethodCaller(sp, Runtime::kSaveRefsAndArgs); 311 } 312 313 static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 314 DCHECK((*sp)->IsCalleeSaveMethod()); 315 uint8_t* previous_sp = 316 reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; 317 return *reinterpret_cast<ArtMethod**>(previous_sp); 318 } 319 320 static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 321 DCHECK((*sp)->IsCalleeSaveMethod()); 322 const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs); 323 ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( 324 reinterpret_cast<uintptr_t>(sp) + callee_frame_size); 325 uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp); 326 const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc); 327 uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); 328 329 if (current_code->IsOptimized()) { 330 CodeInfo code_info = current_code->GetOptimizedCodeInfo(); 331 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 332 StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding); 333 DCHECK(stack_map.IsValid()); 334 if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) { 335 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 336 return inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 337 inline_info.GetDepth(encoding.inline_info_encoding)-1); 338 } else { 339 return stack_map.GetDexPc(encoding.stack_map_encoding); 340 } 341 } else { 342 return current_code->ToDexPc(*caller_sp, outer_pc); 343 } 344 } 345 346 // For the given quick ref and args quick frame, return the caller's PC. 347 static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { 348 DCHECK((*sp)->IsCalleeSaveMethod()); 349 uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; 350 return *reinterpret_cast<uintptr_t*>(lr); 351 } 352 353 QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 354 uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) : 355 is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), 356 gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), 357 fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), 358 stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize 359 + sizeof(ArtMethod*)), // Skip ArtMethod*. 360 gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), 361 cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { 362 static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), 363 "Number of Quick FPR arguments unexpected"); 364 static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled), 365 "Double alignment unexpected"); 366 // For register alignment, we want to assume that counters(fpr_double_index_) are even if the 367 // next register is even. 368 static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, 369 "Number of Quick FPR arguments not even"); 370 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 371 } 372 373 virtual ~QuickArgumentVisitor() {} 374 375 virtual void Visit() = 0; 376 377 Primitive::Type GetParamPrimitiveType() const { 378 return cur_type_; 379 } 380 381 uint8_t* GetParamAddress() const { 382 if (!kQuickSoftFloatAbi) { 383 Primitive::Type type = GetParamPrimitiveType(); 384 if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) { 385 if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) { 386 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 387 return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 388 } 389 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 390 return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA)); 391 } 392 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 393 } 394 } 395 if (gpr_index_ < kNumQuickGprArgs) { 396 return gpr_args_ + GprIndexToGprOffset(gpr_index_); 397 } 398 return stack_args_ + (stack_index_ * kBytesStackArgLocation); 399 } 400 401 bool IsSplitLongOrDouble() const { 402 if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || 403 (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { 404 return is_split_long_or_double_; 405 } else { 406 return false; // An optimization for when GPR and FPRs are 64bit. 407 } 408 } 409 410 bool IsParamAReference() const { 411 return GetParamPrimitiveType() == Primitive::kPrimNot; 412 } 413 414 bool IsParamALongOrDouble() const { 415 Primitive::Type type = GetParamPrimitiveType(); 416 return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; 417 } 418 419 uint64_t ReadSplitLongParam() const { 420 // The splitted long is always available through the stack. 421 return *reinterpret_cast<uint64_t*>(stack_args_ 422 + stack_index_ * kBytesStackArgLocation); 423 } 424 425 void IncGprIndex() { 426 gpr_index_++; 427 if (kGprFprLockstep) { 428 fpr_index_++; 429 } 430 } 431 432 void IncFprIndex() { 433 fpr_index_++; 434 if (kGprFprLockstep) { 435 gpr_index_++; 436 } 437 } 438 439 void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) { 440 // (a) 'stack_args_' should point to the first method's argument 441 // (b) whatever the argument type it is, the 'stack_index_' should 442 // be moved forward along with every visiting. 443 gpr_index_ = 0; 444 fpr_index_ = 0; 445 if (kQuickDoubleRegAlignedFloatBackFilled) { 446 fpr_double_index_ = 0; 447 } 448 stack_index_ = 0; 449 if (!is_static_) { // Handle this. 450 cur_type_ = Primitive::kPrimNot; 451 is_split_long_or_double_ = false; 452 Visit(); 453 stack_index_++; 454 if (kNumQuickGprArgs > 0) { 455 IncGprIndex(); 456 } 457 } 458 for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) { 459 cur_type_ = Primitive::GetType(shorty_[shorty_index]); 460 switch (cur_type_) { 461 case Primitive::kPrimNot: 462 case Primitive::kPrimBoolean: 463 case Primitive::kPrimByte: 464 case Primitive::kPrimChar: 465 case Primitive::kPrimShort: 466 case Primitive::kPrimInt: 467 is_split_long_or_double_ = false; 468 Visit(); 469 stack_index_++; 470 if (gpr_index_ < kNumQuickGprArgs) { 471 IncGprIndex(); 472 } 473 break; 474 case Primitive::kPrimFloat: 475 is_split_long_or_double_ = false; 476 Visit(); 477 stack_index_++; 478 if (kQuickSoftFloatAbi) { 479 if (gpr_index_ < kNumQuickGprArgs) { 480 IncGprIndex(); 481 } 482 } else { 483 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 484 IncFprIndex(); 485 if (kQuickDoubleRegAlignedFloatBackFilled) { 486 // Double should not overlap with float. 487 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4. 488 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2)); 489 // Float should not overlap with double. 490 if (fpr_index_ % 2 == 0) { 491 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 492 } 493 } else if (kQuickSkipOddFpRegisters) { 494 IncFprIndex(); 495 } 496 } 497 } 498 break; 499 case Primitive::kPrimDouble: 500 case Primitive::kPrimLong: 501 if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) { 502 if (cur_type_ == Primitive::kPrimLong && kAlignPairRegister && gpr_index_ == 0) { 503 // Currently, this is only for ARM and MIPS, where the first available parameter 504 // register is R1 (on ARM) or A1 (on MIPS). So we skip it, and use R2 (on ARM) or 505 // A2 (on MIPS) instead. 506 IncGprIndex(); 507 } 508 is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) && 509 ((gpr_index_ + 1) == kNumQuickGprArgs); 510 if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) { 511 // We don't want to split this. Pass over this register. 512 gpr_index_++; 513 is_split_long_or_double_ = false; 514 } 515 Visit(); 516 if (kBytesStackArgLocation == 4) { 517 stack_index_+= 2; 518 } else { 519 CHECK_EQ(kBytesStackArgLocation, 8U); 520 stack_index_++; 521 } 522 if (gpr_index_ < kNumQuickGprArgs) { 523 IncGprIndex(); 524 if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) { 525 if (gpr_index_ < kNumQuickGprArgs) { 526 IncGprIndex(); 527 } 528 } 529 } 530 } else { 531 is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) && 532 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled; 533 Visit(); 534 if (kBytesStackArgLocation == 4) { 535 stack_index_+= 2; 536 } else { 537 CHECK_EQ(kBytesStackArgLocation, 8U); 538 stack_index_++; 539 } 540 if (kQuickDoubleRegAlignedFloatBackFilled) { 541 if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) { 542 fpr_double_index_ += 2; 543 // Float should not overlap with double. 544 if (fpr_index_ % 2 == 0) { 545 fpr_index_ = std::max(fpr_double_index_, fpr_index_); 546 } 547 } 548 } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 549 IncFprIndex(); 550 if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) { 551 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) { 552 IncFprIndex(); 553 } 554 } 555 } 556 } 557 break; 558 default: 559 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_; 560 } 561 } 562 } 563 564 protected: 565 const bool is_static_; 566 const char* const shorty_; 567 const uint32_t shorty_len_; 568 569 private: 570 uint8_t* const gpr_args_; // Address of GPR arguments in callee save frame. 571 uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame. 572 uint8_t* const stack_args_; // Address of stack arguments in caller's frame. 573 uint32_t gpr_index_; // Index into spilled GPRs. 574 // Index into spilled FPRs. 575 // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_ 576 // holds a higher register number. 577 uint32_t fpr_index_; 578 // Index into spilled FPRs for aligned double. 579 // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in 580 // terms of singles, may be behind fpr_index. 581 uint32_t fpr_double_index_; 582 uint32_t stack_index_; // Index into arguments on the stack. 583 // The current type of argument during VisitArguments. 584 Primitive::Type cur_type_; 585 // Does a 64bit parameter straddle the register and stack arguments? 586 bool is_split_long_or_double_; 587}; 588 589// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It 590// allows to use the QuickArgumentVisitor constants without moving all the code in its own module. 591extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 592 REQUIRES_SHARED(Locks::mutator_lock_) { 593 return QuickArgumentVisitor::GetProxyThisObject(sp); 594} 595 596// Visits arguments on the stack placing them into the shadow frame. 597class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { 598 public: 599 BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, 600 uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : 601 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} 602 603 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 604 605 private: 606 ShadowFrame* const sf_; 607 uint32_t cur_reg_; 608 609 DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor); 610}; 611 612void BuildQuickShadowFrameVisitor::Visit() { 613 Primitive::Type type = GetParamPrimitiveType(); 614 switch (type) { 615 case Primitive::kPrimLong: // Fall-through. 616 case Primitive::kPrimDouble: 617 if (IsSplitLongOrDouble()) { 618 sf_->SetVRegLong(cur_reg_, ReadSplitLongParam()); 619 } else { 620 sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress())); 621 } 622 ++cur_reg_; 623 break; 624 case Primitive::kPrimNot: { 625 StackReference<mirror::Object>* stack_ref = 626 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 627 sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr()); 628 } 629 break; 630 case Primitive::kPrimBoolean: // Fall-through. 631 case Primitive::kPrimByte: // Fall-through. 632 case Primitive::kPrimChar: // Fall-through. 633 case Primitive::kPrimShort: // Fall-through. 634 case Primitive::kPrimInt: // Fall-through. 635 case Primitive::kPrimFloat: 636 sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress())); 637 break; 638 case Primitive::kPrimVoid: 639 LOG(FATAL) << "UNREACHABLE"; 640 UNREACHABLE(); 641 } 642 ++cur_reg_; 643} 644 645extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) 646 REQUIRES_SHARED(Locks::mutator_lock_) { 647 // Ensure we don't get thread suspension until the object arguments are safely in the shadow 648 // frame. 649 ScopedQuickEntrypointChecks sqec(self); 650 651 if (UNLIKELY(!method->IsInvokable())) { 652 method->ThrowInvocationTimeError(); 653 return 0; 654 } 655 656 JValue tmp_value; 657 ShadowFrame* deopt_frame = self->PopStackedShadowFrame( 658 StackedShadowFrameType::kDeoptimizationShadowFrame, false); 659 ManagedStack fragment; 660 661 DCHECK(!method->IsNative()) << PrettyMethod(method); 662 uint32_t shorty_len = 0; 663 ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 664 const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem(); 665 DCHECK(code_item != nullptr) << PrettyMethod(method); 666 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 667 668 JValue result; 669 670 if (deopt_frame != nullptr) { 671 // Coming from partial-fragment deopt. 672 673 if (kIsDebugBuild) { 674 // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom 675 // of the call-stack) corresponds to the called method. 676 ShadowFrame* linked = deopt_frame; 677 while (linked->GetLink() != nullptr) { 678 linked = linked->GetLink(); 679 } 680 CHECK_EQ(method, linked->GetMethod()) << PrettyMethod(method) << " " 681 << PrettyMethod(linked->GetMethod()); 682 } 683 684 if (VLOG_IS_ON(deopt)) { 685 // Print out the stack to verify that it was a partial-fragment deopt. 686 LOG(INFO) << "Continue-ing from deopt. Stack is:"; 687 QuickExceptionHandler::DumpFramesWithType(self, true); 688 } 689 690 mirror::Throwable* pending_exception = nullptr; 691 bool from_code = false; 692 self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code); 693 694 // Push a transition back into managed code onto the linked list in thread. 695 self->PushManagedStackFragment(&fragment); 696 697 // Ensure that the stack is still in order. 698 if (kIsDebugBuild) { 699 class DummyStackVisitor : public StackVisitor { 700 public: 701 explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_) 702 : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 703 704 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 705 // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking 706 // logic. Just always say we want to continue. 707 return true; 708 } 709 }; 710 DummyStackVisitor dsv(self); 711 dsv.WalkStack(); 712 } 713 714 // Restore the exception that was pending before deoptimization then interpret the 715 // deoptimized frames. 716 if (pending_exception != nullptr) { 717 self->SetException(pending_exception); 718 } 719 interpreter::EnterInterpreterFromDeoptimize(self, deopt_frame, from_code, &result); 720 } else { 721 const char* old_cause = self->StartAssertNoThreadSuspension( 722 "Building interpreter shadow frame"); 723 uint16_t num_regs = code_item->registers_size_; 724 // No last shadow coming from quick. 725 ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = 726 CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0); 727 ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); 728 size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; 729 BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, 730 shadow_frame, first_arg_reg); 731 shadow_frame_builder.VisitArguments(); 732 const bool needs_initialization = 733 method->IsStatic() && !method->GetDeclaringClass()->IsInitialized(); 734 // Push a transition back into managed code onto the linked list in thread. 735 self->PushManagedStackFragment(&fragment); 736 self->PushShadowFrame(shadow_frame); 737 self->EndAssertNoThreadSuspension(old_cause); 738 739 if (needs_initialization) { 740 // Ensure static method's class is initialized. 741 StackHandleScope<1> hs(self); 742 Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass())); 743 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { 744 DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(shadow_frame->GetMethod()); 745 self->PopManagedStackFragment(fragment); 746 return 0; 747 } 748 } 749 750 result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); 751 } 752 753 // Pop transition. 754 self->PopManagedStackFragment(fragment); 755 756 // Request a stack deoptimization if needed 757 ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); 758 uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp); 759 // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization 760 // should be done and it knows the real return pc. 761 if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) && 762 Dbg::IsForcedInterpreterNeededForUpcall(self, caller) && 763 Runtime::Current()->IsDeoptimizeable(caller_pc))) { 764 // Push the context of the deoptimization stack so we can restore the return value and the 765 // exception before executing the deoptimized frames. 766 self->PushDeoptimizationContext( 767 result, shorty[0] == 'L', /* from_code */ false, self->GetException()); 768 769 // Set special exception to cause deoptimization. 770 self->SetException(Thread::GetDeoptimizationException()); 771 } 772 773 // No need to restore the args since the method has already been run by the interpreter. 774 return result.GetJ(); 775} 776 777// Visits arguments on the stack placing them into the args vector, Object* arguments are converted 778// to jobjects. 779class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { 780 public: 781 BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, 782 ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : 783 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} 784 785 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 786 787 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 788 789 private: 790 ScopedObjectAccessUnchecked* const soa_; 791 std::vector<jvalue>* const args_; 792 // References which we must update when exiting in case the GC moved the objects. 793 std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_; 794 795 DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); 796}; 797 798void BuildQuickArgumentVisitor::Visit() { 799 jvalue val; 800 Primitive::Type type = GetParamPrimitiveType(); 801 switch (type) { 802 case Primitive::kPrimNot: { 803 StackReference<mirror::Object>* stack_ref = 804 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 805 val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 806 references_.push_back(std::make_pair(val.l, stack_ref)); 807 break; 808 } 809 case Primitive::kPrimLong: // Fall-through. 810 case Primitive::kPrimDouble: 811 if (IsSplitLongOrDouble()) { 812 val.j = ReadSplitLongParam(); 813 } else { 814 val.j = *reinterpret_cast<jlong*>(GetParamAddress()); 815 } 816 break; 817 case Primitive::kPrimBoolean: // Fall-through. 818 case Primitive::kPrimByte: // Fall-through. 819 case Primitive::kPrimChar: // Fall-through. 820 case Primitive::kPrimShort: // Fall-through. 821 case Primitive::kPrimInt: // Fall-through. 822 case Primitive::kPrimFloat: 823 val.i = *reinterpret_cast<jint*>(GetParamAddress()); 824 break; 825 case Primitive::kPrimVoid: 826 LOG(FATAL) << "UNREACHABLE"; 827 UNREACHABLE(); 828 } 829 args_->push_back(val); 830} 831 832void BuildQuickArgumentVisitor::FixupReferences() { 833 // Fixup any references which may have changed. 834 for (const auto& pair : references_) { 835 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 836 soa_->Env()->DeleteLocalRef(pair.first); 837 } 838} 839 840// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method 841// which is responsible for recording callee save registers. We explicitly place into jobjects the 842// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a 843// field within the proxy object, which will box the primitive arguments and deal with error cases. 844extern "C" uint64_t artQuickProxyInvokeHandler( 845 ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) 846 REQUIRES_SHARED(Locks::mutator_lock_) { 847 DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); 848 DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); 849 // Ensure we don't get thread suspension until the object arguments are safely in jobjects. 850 const char* old_cause = 851 self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); 852 // Register the top of the managed stack, making stack crawlable. 853 DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method); 854 self->VerifyStack(); 855 // Start new JNI local reference state. 856 JNIEnvExt* env = self->GetJniEnv(); 857 ScopedObjectAccessUnchecked soa(env); 858 ScopedJniEnvLocalRefState env_state(env); 859 // Create local ref. copies of proxy method and the receiver. 860 jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); 861 862 // Placing arguments into args vector and remove the receiver. 863 ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); 864 CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " 865 << PrettyMethod(non_proxy_method); 866 std::vector<jvalue> args; 867 uint32_t shorty_len = 0; 868 const char* shorty = non_proxy_method->GetShorty(&shorty_len); 869 BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); 870 871 local_ref_visitor.VisitArguments(); 872 DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method); 873 args.erase(args.begin()); 874 875 // Convert proxy method into expected interface method. 876 ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize); 877 DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method); 878 DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); 879 self->EndAssertNoThreadSuspension(old_cause); 880 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 881 DCHECK(!Runtime::Current()->IsActiveTransaction()); 882 jobject interface_method_jobj = soa.AddLocalReference<jobject>( 883 mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), 884 interface_method)); 885 886 // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code 887 // that performs allocations. 888 JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); 889 // Restore references which might have moved. 890 local_ref_visitor.FixupReferences(); 891 return result.GetJ(); 892} 893 894// Read object references held in arguments from quick frames and place in a JNI local references, 895// so they don't get garbage collected. 896class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { 897 public: 898 RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, 899 uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : 900 QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} 901 902 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 903 904 void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_); 905 906 private: 907 ScopedObjectAccessUnchecked* const soa_; 908 // References which we must update when exiting in case the GC moved the objects. 909 std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_; 910 911 DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor); 912}; 913 914void RememberForGcArgumentVisitor::Visit() { 915 if (IsParamAReference()) { 916 StackReference<mirror::Object>* stack_ref = 917 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 918 jobject reference = 919 soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr()); 920 references_.push_back(std::make_pair(reference, stack_ref)); 921 } 922} 923 924void RememberForGcArgumentVisitor::FixupReferences() { 925 // Fixup any references which may have changed. 926 for (const auto& pair : references_) { 927 pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first)); 928 soa_->Env()->DeleteLocalRef(pair.first); 929 } 930} 931 932// Lazily resolve a method for quick. Called by stub code. 933extern "C" const void* artQuickResolutionTrampoline( 934 ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) 935 REQUIRES_SHARED(Locks::mutator_lock_) { 936 // The resolution trampoline stashes the resolved method into the callee-save frame to transport 937 // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely 938 // does not have the same stack layout as the callee-save method). 939 ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); 940 // Start new JNI local reference state 941 JNIEnvExt* env = self->GetJniEnv(); 942 ScopedObjectAccessUnchecked soa(env); 943 ScopedJniEnvLocalRefState env_state(env); 944 const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up"); 945 946 // Compute details about the called method (avoid GCs) 947 ClassLinker* linker = Runtime::Current()->GetClassLinker(); 948 InvokeType invoke_type; 949 MethodReference called_method(nullptr, 0); 950 const bool called_method_known_on_entry = !called->IsRuntimeMethod(); 951 ArtMethod* caller = nullptr; 952 if (!called_method_known_on_entry) { 953 caller = QuickArgumentVisitor::GetCallingMethod(sp); 954 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 955 const DexFile::CodeItem* code; 956 called_method.dex_file = caller->GetDexFile(); 957 code = caller->GetCodeItem(); 958 CHECK_LT(dex_pc, code->insns_size_in_code_units_); 959 const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); 960 Instruction::Code instr_code = instr->Opcode(); 961 bool is_range; 962 switch (instr_code) { 963 case Instruction::INVOKE_DIRECT: 964 invoke_type = kDirect; 965 is_range = false; 966 break; 967 case Instruction::INVOKE_DIRECT_RANGE: 968 invoke_type = kDirect; 969 is_range = true; 970 break; 971 case Instruction::INVOKE_STATIC: 972 invoke_type = kStatic; 973 is_range = false; 974 break; 975 case Instruction::INVOKE_STATIC_RANGE: 976 invoke_type = kStatic; 977 is_range = true; 978 break; 979 case Instruction::INVOKE_SUPER: 980 invoke_type = kSuper; 981 is_range = false; 982 break; 983 case Instruction::INVOKE_SUPER_RANGE: 984 invoke_type = kSuper; 985 is_range = true; 986 break; 987 case Instruction::INVOKE_VIRTUAL: 988 invoke_type = kVirtual; 989 is_range = false; 990 break; 991 case Instruction::INVOKE_VIRTUAL_RANGE: 992 invoke_type = kVirtual; 993 is_range = true; 994 break; 995 case Instruction::INVOKE_INTERFACE: 996 invoke_type = kInterface; 997 is_range = false; 998 break; 999 case Instruction::INVOKE_INTERFACE_RANGE: 1000 invoke_type = kInterface; 1001 is_range = true; 1002 break; 1003 default: 1004 LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr); 1005 UNREACHABLE(); 1006 } 1007 called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); 1008 } else { 1009 invoke_type = kStatic; 1010 called_method.dex_file = called->GetDexFile(); 1011 called_method.dex_method_index = called->GetDexMethodIndex(); 1012 } 1013 uint32_t shorty_len; 1014 const char* shorty = 1015 called_method.dex_file->GetMethodShorty( 1016 called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len); 1017 RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); 1018 visitor.VisitArguments(); 1019 self->EndAssertNoThreadSuspension(old_cause); 1020 const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface; 1021 // Resolve method filling in dex cache. 1022 if (!called_method_known_on_entry) { 1023 StackHandleScope<1> hs(self); 1024 mirror::Object* dummy = nullptr; 1025 HandleWrapper<mirror::Object> h_receiver( 1026 hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); 1027 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1028 called = linker->ResolveMethod<ClassLinker::kForceICCECheck>( 1029 self, called_method.dex_method_index, caller, invoke_type); 1030 } 1031 const void* code = nullptr; 1032 if (LIKELY(!self->IsExceptionPending())) { 1033 // Incompatible class change should have been handled in resolve method. 1034 CHECK(!called->CheckIncompatibleClassChange(invoke_type)) 1035 << PrettyMethod(called) << " " << invoke_type; 1036 if (virtual_or_interface || invoke_type == kSuper) { 1037 // Refine called method based on receiver for kVirtual/kInterface, and 1038 // caller for kSuper. 1039 ArtMethod* orig_called = called; 1040 if (invoke_type == kVirtual) { 1041 CHECK(receiver != nullptr) << invoke_type; 1042 called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize); 1043 } else if (invoke_type == kInterface) { 1044 CHECK(receiver != nullptr) << invoke_type; 1045 called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize); 1046 } else { 1047 DCHECK_EQ(invoke_type, kSuper); 1048 CHECK(caller != nullptr) << invoke_type; 1049 StackHandleScope<2> hs(self); 1050 Handle<mirror::DexCache> dex_cache( 1051 hs.NewHandle(caller->GetDeclaringClass()->GetDexCache())); 1052 Handle<mirror::ClassLoader> class_loader( 1053 hs.NewHandle(caller->GetDeclaringClass()->GetClassLoader())); 1054 // TODO Maybe put this into a mirror::Class function. 1055 mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod( 1056 called_method.dex_method_index, dex_cache, class_loader); 1057 if (ref_class->IsInterface()) { 1058 called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize); 1059 } else { 1060 called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry( 1061 called->GetMethodIndex(), kRuntimePointerSize); 1062 } 1063 } 1064 1065 CHECK(called != nullptr) << PrettyMethod(orig_called) << " " 1066 << PrettyTypeOf(receiver) << " " 1067 << invoke_type << " " << orig_called->GetVtableIndex(); 1068 1069 // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index 1070 // of the sharpened method avoiding dirtying the dex cache if possible. 1071 // Note, called_method.dex_method_index references the dex method before the 1072 // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares 1073 // about the name and signature. 1074 uint32_t update_dex_cache_method_index = called->GetDexMethodIndex(); 1075 if (!called->HasSameDexCacheResolvedMethods(caller, kRuntimePointerSize)) { 1076 // Calling from one dex file to another, need to compute the method index appropriate to 1077 // the caller's dex file. Since we get here only if the original called was a runtime 1078 // method, we've got the correct dex_file and a dex_method_idx from above. 1079 DCHECK(!called_method_known_on_entry); 1080 DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); 1081 const DexFile* caller_dex_file = called_method.dex_file; 1082 uint32_t caller_method_name_and_sig_index = called_method.dex_method_index; 1083 update_dex_cache_method_index = 1084 called->FindDexMethodIndexInOtherDexFile(*caller_dex_file, 1085 caller_method_name_and_sig_index); 1086 } 1087 if ((update_dex_cache_method_index != DexFile::kDexNoIndex) && 1088 (caller->GetDexCacheResolvedMethod( 1089 update_dex_cache_method_index, kRuntimePointerSize) != called)) { 1090 caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, 1091 called, 1092 kRuntimePointerSize); 1093 } 1094 } else if (invoke_type == kStatic) { 1095 const auto called_dex_method_idx = called->GetDexMethodIndex(); 1096 // For static invokes, we may dispatch to the static method in the superclass but resolve 1097 // using the subclass. To prevent getting slow paths on each invoke, we force set the 1098 // resolved method for the super class dex method index if we are in the same dex file. 1099 // b/19175856 1100 if (called->GetDexFile() == called_method.dex_file && 1101 called_method.dex_method_index != called_dex_method_idx) { 1102 called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, 1103 called, 1104 kRuntimePointerSize); 1105 } 1106 } 1107 1108 // Ensure that the called method's class is initialized. 1109 StackHandleScope<1> hs(soa.Self()); 1110 Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); 1111 linker->EnsureInitialized(soa.Self(), called_class, true, true); 1112 if (LIKELY(called_class->IsInitialized())) { 1113 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1114 // If we are single-stepping or the called method is deoptimized (by a 1115 // breakpoint, for example), then we have to execute the called method 1116 // with the interpreter. 1117 code = GetQuickToInterpreterBridge(); 1118 } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { 1119 // If the caller is deoptimized (by a breakpoint, for example), we have to 1120 // continue its execution with interpreter when returning from the called 1121 // method. Because we do not want to execute the called method with the 1122 // interpreter, we wrap its execution into the instrumentation stubs. 1123 // When the called method returns, it will execute the instrumentation 1124 // exit hook that will determine the need of the interpreter with a call 1125 // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if 1126 // it is needed. 1127 code = GetQuickInstrumentationEntryPoint(); 1128 } else { 1129 code = called->GetEntryPointFromQuickCompiledCode(); 1130 } 1131 } else if (called_class->IsInitializing()) { 1132 if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { 1133 // If we are single-stepping or the called method is deoptimized (by a 1134 // breakpoint, for example), then we have to execute the called method 1135 // with the interpreter. 1136 code = GetQuickToInterpreterBridge(); 1137 } else if (invoke_type == kStatic) { 1138 // Class is still initializing, go to oat and grab code (trampoline must be left in place 1139 // until class is initialized to stop races between threads). 1140 code = linker->GetQuickOatCodeFor(called); 1141 } else { 1142 // No trampoline for non-static methods. 1143 code = called->GetEntryPointFromQuickCompiledCode(); 1144 } 1145 } else { 1146 DCHECK(called_class->IsErroneous()); 1147 } 1148 } 1149 CHECK_EQ(code == nullptr, self->IsExceptionPending()); 1150 // Fixup any locally saved objects may have moved during a GC. 1151 visitor.FixupReferences(); 1152 // Place called method in callee-save frame to be placed as first argument to quick method. 1153 *sp = called; 1154 1155 return code; 1156} 1157 1158/* 1159 * This class uses a couple of observations to unite the different calling conventions through 1160 * a few constants. 1161 * 1162 * 1) Number of registers used for passing is normally even, so counting down has no penalty for 1163 * possible alignment. 1164 * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point 1165 * types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote 1166 * when we have to split things 1167 * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats 1168 * and we can use Int handling directly. 1169 * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code 1170 * necessary when widening. Also, widening of Ints will take place implicitly, and the 1171 * extension should be compatible with Aarch64, which mandates copying the available bits 1172 * into LSB and leaving the rest unspecified. 1173 * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on 1174 * the stack. 1175 * 6) There is only little endian. 1176 * 1177 * 1178 * Actual work is supposed to be done in a delegate of the template type. The interface is as 1179 * follows: 1180 * 1181 * void PushGpr(uintptr_t): Add a value for the next GPR 1182 * 1183 * void PushFpr4(float): Add a value for the next FPR of size 32b. Is only called if we need 1184 * padding, that is, think the architecture is 32b and aligns 64b. 1185 * 1186 * void PushFpr8(uint64_t): Push a double. We _will_ call this on 32b, it's the callee's job to 1187 * split this if necessary. The current state will have aligned, if 1188 * necessary. 1189 * 1190 * void PushStack(uintptr_t): Push a value to the stack. 1191 * 1192 * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr, 1193 * as this might be important for null initialization. 1194 * Must return the jobject, that is, the reference to the 1195 * entry in the HandleScope (nullptr if necessary). 1196 * 1197 */ 1198template<class T> class BuildNativeCallFrameStateMachine { 1199 public: 1200#if defined(__arm__) 1201 // TODO: These are all dummy values! 1202 static constexpr bool kNativeSoftFloatAbi = true; 1203 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs, r0-r3 1204 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1205 1206 static constexpr size_t kRegistersNeededForLong = 2; 1207 static constexpr size_t kRegistersNeededForDouble = 2; 1208 static constexpr bool kMultiRegistersAligned = true; 1209 static constexpr bool kMultiFPRegistersWidened = false; 1210 static constexpr bool kMultiGPRegistersWidened = false; 1211 static constexpr bool kAlignLongOnStack = true; 1212 static constexpr bool kAlignDoubleOnStack = true; 1213#elif defined(__aarch64__) 1214 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1215 static constexpr size_t kNumNativeGprArgs = 8; // 6 arguments passed in GPRs. 1216 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1217 1218 static constexpr size_t kRegistersNeededForLong = 1; 1219 static constexpr size_t kRegistersNeededForDouble = 1; 1220 static constexpr bool kMultiRegistersAligned = false; 1221 static constexpr bool kMultiFPRegistersWidened = false; 1222 static constexpr bool kMultiGPRegistersWidened = false; 1223 static constexpr bool kAlignLongOnStack = false; 1224 static constexpr bool kAlignDoubleOnStack = false; 1225#elif defined(__mips__) && !defined(__LP64__) 1226 static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI. 1227 static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs. 1228 static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs. 1229 1230 static constexpr size_t kRegistersNeededForLong = 2; 1231 static constexpr size_t kRegistersNeededForDouble = 2; 1232 static constexpr bool kMultiRegistersAligned = true; 1233 static constexpr bool kMultiFPRegistersWidened = true; 1234 static constexpr bool kMultiGPRegistersWidened = false; 1235 static constexpr bool kAlignLongOnStack = true; 1236 static constexpr bool kAlignDoubleOnStack = true; 1237#elif defined(__mips__) && defined(__LP64__) 1238 // Let the code prepare GPRs only and we will load the FPRs with same data. 1239 static constexpr bool kNativeSoftFloatAbi = true; 1240 static constexpr size_t kNumNativeGprArgs = 8; 1241 static constexpr size_t kNumNativeFprArgs = 0; 1242 1243 static constexpr size_t kRegistersNeededForLong = 1; 1244 static constexpr size_t kRegistersNeededForDouble = 1; 1245 static constexpr bool kMultiRegistersAligned = false; 1246 static constexpr bool kMultiFPRegistersWidened = false; 1247 static constexpr bool kMultiGPRegistersWidened = true; 1248 static constexpr bool kAlignLongOnStack = false; 1249 static constexpr bool kAlignDoubleOnStack = false; 1250#elif defined(__i386__) 1251 // TODO: Check these! 1252 static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp 1253 static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs. 1254 static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs. 1255 1256 static constexpr size_t kRegistersNeededForLong = 2; 1257 static constexpr size_t kRegistersNeededForDouble = 2; 1258 static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways 1259 static constexpr bool kMultiFPRegistersWidened = false; 1260 static constexpr bool kMultiGPRegistersWidened = false; 1261 static constexpr bool kAlignLongOnStack = false; 1262 static constexpr bool kAlignDoubleOnStack = false; 1263#elif defined(__x86_64__) 1264 static constexpr bool kNativeSoftFloatAbi = false; // This is a hard float ABI. 1265 static constexpr size_t kNumNativeGprArgs = 6; // 6 arguments passed in GPRs. 1266 static constexpr size_t kNumNativeFprArgs = 8; // 8 arguments passed in FPRs. 1267 1268 static constexpr size_t kRegistersNeededForLong = 1; 1269 static constexpr size_t kRegistersNeededForDouble = 1; 1270 static constexpr bool kMultiRegistersAligned = false; 1271 static constexpr bool kMultiFPRegistersWidened = false; 1272 static constexpr bool kMultiGPRegistersWidened = false; 1273 static constexpr bool kAlignLongOnStack = false; 1274 static constexpr bool kAlignDoubleOnStack = false; 1275#else 1276#error "Unsupported architecture" 1277#endif 1278 1279 public: 1280 explicit BuildNativeCallFrameStateMachine(T* delegate) 1281 : gpr_index_(kNumNativeGprArgs), 1282 fpr_index_(kNumNativeFprArgs), 1283 stack_entries_(0), 1284 delegate_(delegate) { 1285 // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff 1286 // the next register is even; counting down is just to make the compiler happy... 1287 static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even"); 1288 static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even"); 1289 } 1290 1291 virtual ~BuildNativeCallFrameStateMachine() {} 1292 1293 bool HavePointerGpr() const { 1294 return gpr_index_ > 0; 1295 } 1296 1297 void AdvancePointer(const void* val) { 1298 if (HavePointerGpr()) { 1299 gpr_index_--; 1300 PushGpr(reinterpret_cast<uintptr_t>(val)); 1301 } else { 1302 stack_entries_++; // TODO: have a field for pointer length as multiple of 32b 1303 PushStack(reinterpret_cast<uintptr_t>(val)); 1304 gpr_index_ = 0; 1305 } 1306 } 1307 1308 bool HaveHandleScopeGpr() const { 1309 return gpr_index_ > 0; 1310 } 1311 1312 void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) { 1313 uintptr_t handle = PushHandle(ptr); 1314 if (HaveHandleScopeGpr()) { 1315 gpr_index_--; 1316 PushGpr(handle); 1317 } else { 1318 stack_entries_++; 1319 PushStack(handle); 1320 gpr_index_ = 0; 1321 } 1322 } 1323 1324 bool HaveIntGpr() const { 1325 return gpr_index_ > 0; 1326 } 1327 1328 void AdvanceInt(uint32_t val) { 1329 if (HaveIntGpr()) { 1330 gpr_index_--; 1331 if (kMultiGPRegistersWidened) { 1332 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1333 PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1334 } else { 1335 PushGpr(val); 1336 } 1337 } else { 1338 stack_entries_++; 1339 if (kMultiGPRegistersWidened) { 1340 DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t)); 1341 PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val))); 1342 } else { 1343 PushStack(val); 1344 } 1345 gpr_index_ = 0; 1346 } 1347 } 1348 1349 bool HaveLongGpr() const { 1350 return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0); 1351 } 1352 1353 bool LongGprNeedsPadding() const { 1354 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1355 kAlignLongOnStack && // and when it needs alignment 1356 (gpr_index_ & 1) == 1; // counter is odd, see constructor 1357 } 1358 1359 bool LongStackNeedsPadding() const { 1360 return kRegistersNeededForLong > 1 && // only pad when using multiple registers 1361 kAlignLongOnStack && // and when it needs 8B alignment 1362 (stack_entries_ & 1) == 1; // counter is odd 1363 } 1364 1365 void AdvanceLong(uint64_t val) { 1366 if (HaveLongGpr()) { 1367 if (LongGprNeedsPadding()) { 1368 PushGpr(0); 1369 gpr_index_--; 1370 } 1371 if (kRegistersNeededForLong == 1) { 1372 PushGpr(static_cast<uintptr_t>(val)); 1373 } else { 1374 PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1375 PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1376 } 1377 gpr_index_ -= kRegistersNeededForLong; 1378 } else { 1379 if (LongStackNeedsPadding()) { 1380 PushStack(0); 1381 stack_entries_++; 1382 } 1383 if (kRegistersNeededForLong == 1) { 1384 PushStack(static_cast<uintptr_t>(val)); 1385 stack_entries_++; 1386 } else { 1387 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1388 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1389 stack_entries_ += 2; 1390 } 1391 gpr_index_ = 0; 1392 } 1393 } 1394 1395 bool HaveFloatFpr() const { 1396 return fpr_index_ > 0; 1397 } 1398 1399 void AdvanceFloat(float val) { 1400 if (kNativeSoftFloatAbi) { 1401 AdvanceInt(bit_cast<uint32_t, float>(val)); 1402 } else { 1403 if (HaveFloatFpr()) { 1404 fpr_index_--; 1405 if (kRegistersNeededForDouble == 1) { 1406 if (kMultiFPRegistersWidened) { 1407 PushFpr8(bit_cast<uint64_t, double>(val)); 1408 } else { 1409 // No widening, just use the bits. 1410 PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val))); 1411 } 1412 } else { 1413 PushFpr4(val); 1414 } 1415 } else { 1416 stack_entries_++; 1417 if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) { 1418 // Need to widen before storing: Note the "double" in the template instantiation. 1419 // Note: We need to jump through those hoops to make the compiler happy. 1420 DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t)); 1421 PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val))); 1422 } else { 1423 PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val))); 1424 } 1425 fpr_index_ = 0; 1426 } 1427 } 1428 } 1429 1430 bool HaveDoubleFpr() const { 1431 return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0); 1432 } 1433 1434 bool DoubleFprNeedsPadding() const { 1435 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1436 kAlignDoubleOnStack && // and when it needs alignment 1437 (fpr_index_ & 1) == 1; // counter is odd, see constructor 1438 } 1439 1440 bool DoubleStackNeedsPadding() const { 1441 return kRegistersNeededForDouble > 1 && // only pad when using multiple registers 1442 kAlignDoubleOnStack && // and when it needs 8B alignment 1443 (stack_entries_ & 1) == 1; // counter is odd 1444 } 1445 1446 void AdvanceDouble(uint64_t val) { 1447 if (kNativeSoftFloatAbi) { 1448 AdvanceLong(val); 1449 } else { 1450 if (HaveDoubleFpr()) { 1451 if (DoubleFprNeedsPadding()) { 1452 PushFpr4(0); 1453 fpr_index_--; 1454 } 1455 PushFpr8(val); 1456 fpr_index_ -= kRegistersNeededForDouble; 1457 } else { 1458 if (DoubleStackNeedsPadding()) { 1459 PushStack(0); 1460 stack_entries_++; 1461 } 1462 if (kRegistersNeededForDouble == 1) { 1463 PushStack(static_cast<uintptr_t>(val)); 1464 stack_entries_++; 1465 } else { 1466 PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF)); 1467 PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF)); 1468 stack_entries_ += 2; 1469 } 1470 fpr_index_ = 0; 1471 } 1472 } 1473 } 1474 1475 uint32_t GetStackEntries() const { 1476 return stack_entries_; 1477 } 1478 1479 uint32_t GetNumberOfUsedGprs() const { 1480 return kNumNativeGprArgs - gpr_index_; 1481 } 1482 1483 uint32_t GetNumberOfUsedFprs() const { 1484 return kNumNativeFprArgs - fpr_index_; 1485 } 1486 1487 private: 1488 void PushGpr(uintptr_t val) { 1489 delegate_->PushGpr(val); 1490 } 1491 void PushFpr4(float val) { 1492 delegate_->PushFpr4(val); 1493 } 1494 void PushFpr8(uint64_t val) { 1495 delegate_->PushFpr8(val); 1496 } 1497 void PushStack(uintptr_t val) { 1498 delegate_->PushStack(val); 1499 } 1500 uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { 1501 return delegate_->PushHandle(ref); 1502 } 1503 1504 uint32_t gpr_index_; // Number of free GPRs 1505 uint32_t fpr_index_; // Number of free FPRs 1506 uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not 1507 // extended 1508 T* const delegate_; // What Push implementation gets called 1509}; 1510 1511// Computes the sizes of register stacks and call stack area. Handling of references can be extended 1512// in subclasses. 1513// 1514// To handle native pointers, use "L" in the shorty for an object reference, which simulates 1515// them with handles. 1516class ComputeNativeCallFrameSize { 1517 public: 1518 ComputeNativeCallFrameSize() : num_stack_entries_(0) {} 1519 1520 virtual ~ComputeNativeCallFrameSize() {} 1521 1522 uint32_t GetStackSize() const { 1523 return num_stack_entries_ * sizeof(uintptr_t); 1524 } 1525 1526 uint8_t* LayoutCallStack(uint8_t* sp8) const { 1527 sp8 -= GetStackSize(); 1528 // Align by kStackAlignment. 1529 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1530 return sp8; 1531 } 1532 1533 uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) 1534 const { 1535 // Assumption is OK right now, as we have soft-float arm 1536 size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs; 1537 sp8 -= fregs * sizeof(uintptr_t); 1538 *start_fpr = reinterpret_cast<uint32_t*>(sp8); 1539 size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs; 1540 sp8 -= iregs * sizeof(uintptr_t); 1541 *start_gpr = reinterpret_cast<uintptr_t*>(sp8); 1542 return sp8; 1543 } 1544 1545 uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr, 1546 uint32_t** start_fpr) const { 1547 // Native call stack. 1548 sp8 = LayoutCallStack(sp8); 1549 *start_stack = reinterpret_cast<uintptr_t*>(sp8); 1550 1551 // Put fprs and gprs below. 1552 sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr); 1553 1554 // Return the new bottom. 1555 return sp8; 1556 } 1557 1558 virtual void WalkHeader( 1559 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED) 1560 REQUIRES_SHARED(Locks::mutator_lock_) { 1561 } 1562 1563 void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) { 1564 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); 1565 1566 WalkHeader(&sm); 1567 1568 for (uint32_t i = 1; i < shorty_len; ++i) { 1569 Primitive::Type cur_type_ = Primitive::GetType(shorty[i]); 1570 switch (cur_type_) { 1571 case Primitive::kPrimNot: 1572 // TODO: fix abuse of mirror types. 1573 sm.AdvanceHandleScope( 1574 reinterpret_cast<mirror::Object*>(0x12345678)); 1575 break; 1576 1577 case Primitive::kPrimBoolean: 1578 case Primitive::kPrimByte: 1579 case Primitive::kPrimChar: 1580 case Primitive::kPrimShort: 1581 case Primitive::kPrimInt: 1582 sm.AdvanceInt(0); 1583 break; 1584 case Primitive::kPrimFloat: 1585 sm.AdvanceFloat(0); 1586 break; 1587 case Primitive::kPrimDouble: 1588 sm.AdvanceDouble(0); 1589 break; 1590 case Primitive::kPrimLong: 1591 sm.AdvanceLong(0); 1592 break; 1593 default: 1594 LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty; 1595 UNREACHABLE(); 1596 } 1597 } 1598 1599 num_stack_entries_ = sm.GetStackEntries(); 1600 } 1601 1602 void PushGpr(uintptr_t /* val */) { 1603 // not optimizing registers, yet 1604 } 1605 1606 void PushFpr4(float /* val */) { 1607 // not optimizing registers, yet 1608 } 1609 1610 void PushFpr8(uint64_t /* val */) { 1611 // not optimizing registers, yet 1612 } 1613 1614 void PushStack(uintptr_t /* val */) { 1615 // counting is already done in the superclass 1616 } 1617 1618 virtual uintptr_t PushHandle(mirror::Object* /* ptr */) { 1619 return reinterpret_cast<uintptr_t>(nullptr); 1620 } 1621 1622 protected: 1623 uint32_t num_stack_entries_; 1624}; 1625 1626class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { 1627 public: 1628 ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {} 1629 1630 // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs 1631 // is at *m = sp. Will update to point to the bottom of the save frame. 1632 // 1633 // Note: assumes ComputeAll() has been run before. 1634 void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1635 REQUIRES_SHARED(Locks::mutator_lock_) { 1636 ArtMethod* method = **m; 1637 1638 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 1639 1640 uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); 1641 1642 // First, fix up the layout of the callee-save frame. 1643 // We have to squeeze in the HandleScope, and relocate the method pointer. 1644 1645 // "Free" the slot for the method. 1646 sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. 1647 1648 // Under the callee saves put handle scope and new method stack reference. 1649 size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); 1650 size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); 1651 1652 sp8 -= scope_and_method; 1653 // Align by kStackAlignment. 1654 sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); 1655 1656 uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); 1657 *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), 1658 num_handle_scope_references_); 1659 1660 // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. 1661 uint8_t* method_pointer = sp8; 1662 auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); 1663 *new_method_ref = method; 1664 *m = new_method_ref; 1665 } 1666 1667 // Adds space for the cookie. Note: may leave stack unaligned. 1668 void LayoutCookie(uint8_t** sp) const { 1669 // Reference cookie and padding 1670 *sp -= 8; 1671 } 1672 1673 // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. 1674 // Returns the new bottom. Note: this may be unaligned. 1675 uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) 1676 REQUIRES_SHARED(Locks::mutator_lock_) { 1677 // First, fix up the layout of the callee-save frame. 1678 // We have to squeeze in the HandleScope, and relocate the method pointer. 1679 LayoutCalleeSaveFrame(self, m, sp, handle_scope); 1680 1681 // The bottom of the callee-save frame is now where the method is, *m. 1682 uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); 1683 1684 // Add space for cookie. 1685 LayoutCookie(&sp8); 1686 1687 return sp8; 1688 } 1689 1690 // WARNING: After this, *sp won't be pointing to the method anymore! 1691 uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, 1692 HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, 1693 uint32_t** start_fpr) 1694 REQUIRES_SHARED(Locks::mutator_lock_) { 1695 Walk(shorty, shorty_len); 1696 1697 // JNI part. 1698 uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); 1699 1700 sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); 1701 1702 // Return the new bottom. 1703 return sp8; 1704 } 1705 1706 uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE; 1707 1708 // Add JNIEnv* and jobj/jclass before the shorty-derived elements. 1709 void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE 1710 REQUIRES_SHARED(Locks::mutator_lock_); 1711 1712 private: 1713 uint32_t num_handle_scope_references_; 1714}; 1715 1716uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) { 1717 num_handle_scope_references_++; 1718 return reinterpret_cast<uintptr_t>(nullptr); 1719} 1720 1721void ComputeGenericJniFrameSize::WalkHeader( 1722 BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) { 1723 // JNIEnv 1724 sm->AdvancePointer(nullptr); 1725 1726 // Class object or this as first argument 1727 sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678)); 1728} 1729 1730// Class to push values to three separate regions. Used to fill the native call part. Adheres to 1731// the template requirements of BuildGenericJniFrameStateMachine. 1732class FillNativeCall { 1733 public: 1734 FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) : 1735 cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {} 1736 1737 virtual ~FillNativeCall() {} 1738 1739 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) { 1740 cur_gpr_reg_ = gpr_regs; 1741 cur_fpr_reg_ = fpr_regs; 1742 cur_stack_arg_ = stack_args; 1743 } 1744 1745 void PushGpr(uintptr_t val) { 1746 *cur_gpr_reg_ = val; 1747 cur_gpr_reg_++; 1748 } 1749 1750 void PushFpr4(float val) { 1751 *cur_fpr_reg_ = val; 1752 cur_fpr_reg_++; 1753 } 1754 1755 void PushFpr8(uint64_t val) { 1756 uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_); 1757 *tmp = val; 1758 cur_fpr_reg_ += 2; 1759 } 1760 1761 void PushStack(uintptr_t val) { 1762 *cur_stack_arg_ = val; 1763 cur_stack_arg_++; 1764 } 1765 1766 virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) { 1767 LOG(FATAL) << "(Non-JNI) Native call does not use handles."; 1768 UNREACHABLE(); 1769 } 1770 1771 private: 1772 uintptr_t* cur_gpr_reg_; 1773 uint32_t* cur_fpr_reg_; 1774 uintptr_t* cur_stack_arg_; 1775}; 1776 1777// Visits arguments on the stack placing them into a region lower down the stack for the benefit 1778// of transitioning into native code. 1779class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { 1780 public: 1781 BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len, 1782 ArtMethod*** sp) 1783 : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), 1784 jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) { 1785 ComputeGenericJniFrameSize fsc; 1786 uintptr_t* start_gpr_reg; 1787 uint32_t* start_fpr_reg; 1788 uintptr_t* start_stack_arg; 1789 bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len, 1790 &handle_scope_, 1791 &start_stack_arg, 1792 &start_gpr_reg, &start_fpr_reg); 1793 1794 jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); 1795 1796 // jni environment is always first argument 1797 sm_.AdvancePointer(self->GetJniEnv()); 1798 1799 if (is_static) { 1800 sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); 1801 } 1802 } 1803 1804 void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; 1805 1806 void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); 1807 1808 StackReference<mirror::Object>* GetFirstHandleScopeEntry() { 1809 return handle_scope_->GetHandle(0).GetReference(); 1810 } 1811 1812 jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) { 1813 return handle_scope_->GetHandle(0).ToJObject(); 1814 } 1815 1816 void* GetBottomOfUsedArea() const { 1817 return bottom_of_used_area_; 1818 } 1819 1820 private: 1821 // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall. 1822 class FillJniCall FINAL : public FillNativeCall { 1823 public: 1824 FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, 1825 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args), 1826 handle_scope_(handle_scope), cur_entry_(0) {} 1827 1828 uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); 1829 1830 void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { 1831 FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); 1832 handle_scope_ = scope; 1833 cur_entry_ = 0U; 1834 } 1835 1836 void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) { 1837 // Initialize padding entries. 1838 size_t expected_slots = handle_scope_->NumberOfReferences(); 1839 while (cur_entry_ < expected_slots) { 1840 handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr); 1841 } 1842 DCHECK_NE(cur_entry_, 0U); 1843 } 1844 1845 private: 1846 HandleScope* handle_scope_; 1847 size_t cur_entry_; 1848 }; 1849 1850 HandleScope* handle_scope_; 1851 FillJniCall jni_call_; 1852 void* bottom_of_used_area_; 1853 1854 BuildNativeCallFrameStateMachine<FillJniCall> sm_; 1855 1856 DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor); 1857}; 1858 1859uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) { 1860 uintptr_t tmp; 1861 MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_); 1862 h.Assign(ref); 1863 tmp = reinterpret_cast<uintptr_t>(h.ToJObject()); 1864 cur_entry_++; 1865 return tmp; 1866} 1867 1868void BuildGenericJniFrameVisitor::Visit() { 1869 Primitive::Type type = GetParamPrimitiveType(); 1870 switch (type) { 1871 case Primitive::kPrimLong: { 1872 jlong long_arg; 1873 if (IsSplitLongOrDouble()) { 1874 long_arg = ReadSplitLongParam(); 1875 } else { 1876 long_arg = *reinterpret_cast<jlong*>(GetParamAddress()); 1877 } 1878 sm_.AdvanceLong(long_arg); 1879 break; 1880 } 1881 case Primitive::kPrimDouble: { 1882 uint64_t double_arg; 1883 if (IsSplitLongOrDouble()) { 1884 // Read into union so that we don't case to a double. 1885 double_arg = ReadSplitLongParam(); 1886 } else { 1887 double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress()); 1888 } 1889 sm_.AdvanceDouble(double_arg); 1890 break; 1891 } 1892 case Primitive::kPrimNot: { 1893 StackReference<mirror::Object>* stack_ref = 1894 reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress()); 1895 sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr()); 1896 break; 1897 } 1898 case Primitive::kPrimFloat: 1899 sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress())); 1900 break; 1901 case Primitive::kPrimBoolean: // Fall-through. 1902 case Primitive::kPrimByte: // Fall-through. 1903 case Primitive::kPrimChar: // Fall-through. 1904 case Primitive::kPrimShort: // Fall-through. 1905 case Primitive::kPrimInt: // Fall-through. 1906 sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress())); 1907 break; 1908 case Primitive::kPrimVoid: 1909 LOG(FATAL) << "UNREACHABLE"; 1910 UNREACHABLE(); 1911 } 1912} 1913 1914void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) { 1915 // Clear out rest of the scope. 1916 jni_call_.ResetRemainingScopeSlots(); 1917 // Install HandleScope. 1918 self->PushHandleScope(handle_scope_); 1919} 1920 1921#if defined(__arm__) || defined(__aarch64__) 1922extern "C" void* artFindNativeMethod(); 1923#else 1924extern "C" void* artFindNativeMethod(Thread* self); 1925#endif 1926 1927uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) { 1928 if (lock != nullptr) { 1929 return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self)); 1930 } else { 1931 return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self)); 1932 } 1933} 1934 1935void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) { 1936 if (lock != nullptr) { 1937 JniMethodEndSynchronized(cookie, lock, self); 1938 } else { 1939 JniMethodEnd(cookie, self); 1940 } 1941} 1942 1943/* 1944 * Initializes an alloca region assumed to be directly below sp for a native call: 1945 * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers. 1946 * The final element on the stack is a pointer to the native code. 1947 * 1948 * On entry, the stack has a standard callee-save frame above sp, and an alloca below it. 1949 * We need to fix this, as the handle scope needs to go into the callee-save frame. 1950 * 1951 * The return of this function denotes: 1952 * 1) How many bytes of the alloca can be released, if the value is non-negative. 1953 * 2) An error, if the value is negative. 1954 */ 1955extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) 1956 REQUIRES_SHARED(Locks::mutator_lock_) { 1957 ArtMethod* called = *sp; 1958 DCHECK(called->IsNative()) << PrettyMethod(called, true); 1959 uint32_t shorty_len = 0; 1960 const char* shorty = called->GetShorty(&shorty_len); 1961 1962 // Run the visitor and update sp. 1963 BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp); 1964 visitor.VisitArguments(); 1965 visitor.FinalizeHandleScope(self); 1966 1967 // Fix up managed-stack things in Thread. 1968 self->SetTopOfStack(sp); 1969 1970 self->VerifyStack(); 1971 1972 // Start JNI, save the cookie. 1973 uint32_t cookie; 1974 if (called->IsSynchronized()) { 1975 cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self); 1976 if (self->IsExceptionPending()) { 1977 self->PopHandleScope(); 1978 // A negative value denotes an error. 1979 return GetTwoWordFailureValue(); 1980 } 1981 } else { 1982 cookie = JniMethodStart(self); 1983 } 1984 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 1985 *(sp32 - 1) = cookie; 1986 1987 // Retrieve the stored native code. 1988 void* nativeCode = called->GetEntryPointFromJni(); 1989 1990 // There are two cases for the content of nativeCode: 1991 // 1) Pointer to the native function. 1992 // 2) Pointer to the trampoline for native code binding. 1993 // In the second case, we need to execute the binding and continue with the actual native function 1994 // pointer. 1995 DCHECK(nativeCode != nullptr); 1996 if (nativeCode == GetJniDlsymLookupStub()) { 1997#if defined(__arm__) || defined(__aarch64__) 1998 nativeCode = artFindNativeMethod(); 1999#else 2000 nativeCode = artFindNativeMethod(self); 2001#endif 2002 2003 if (nativeCode == nullptr) { 2004 DCHECK(self->IsExceptionPending()); // There should be an exception pending now. 2005 2006 // End JNI, as the assembly will move to deliver the exception. 2007 jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr; 2008 if (shorty[0] == 'L') { 2009 artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock); 2010 } else { 2011 artQuickGenericJniEndJNINonRef(self, cookie, lock); 2012 } 2013 2014 return GetTwoWordFailureValue(); 2015 } 2016 // Note that the native code pointer will be automatically set by artFindNativeMethod(). 2017 } 2018 2019 // Return native code addr(lo) and bottom of alloca address(hi). 2020 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()), 2021 reinterpret_cast<uintptr_t>(nativeCode)); 2022} 2023 2024// Defined in quick_jni_entrypoints.cc. 2025extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie, 2026 jvalue result, uint64_t result_f, ArtMethod* called, 2027 HandleScope* handle_scope); 2028/* 2029 * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and 2030 * unlocking. 2031 */ 2032extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, 2033 jvalue result, 2034 uint64_t result_f) { 2035 // We're here just back from a native call. We don't have the shared mutator lock at this point 2036 // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing 2037 // anything that requires a mutator lock before that would cause problems as GC may have the 2038 // exclusive mutator lock and may be moving objects, etc. 2039 ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); 2040 uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); 2041 ArtMethod* called = *sp; 2042 uint32_t cookie = *(sp32 - 1); 2043 HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp)); 2044 return GenericJniMethodEnd(self, cookie, result, result_f, called, table); 2045} 2046 2047// We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value 2048// for the method pointer. 2049// 2050// It is valid to use this, as at the usage points here (returns from C functions) we are assuming 2051// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations). 2052 2053template<InvokeType type, bool access_check> 2054static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self, 2055 ArtMethod** sp) { 2056 ScopedQuickEntrypointChecks sqec(self); 2057 DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs)); 2058 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2059 ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); 2060 if (UNLIKELY(method == nullptr)) { 2061 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 2062 uint32_t shorty_len; 2063 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len); 2064 { 2065 // Remember the args in case a GC happens in FindMethodFromCode. 2066 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2067 RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); 2068 visitor.VisitArguments(); 2069 method = FindMethodFromCode<type, access_check>(method_idx, &this_object, caller_method, 2070 self); 2071 visitor.FixupReferences(); 2072 } 2073 2074 if (UNLIKELY(method == nullptr)) { 2075 CHECK(self->IsExceptionPending()); 2076 return GetTwoWordFailureValue(); // Failure. 2077 } 2078 } 2079 DCHECK(!self->IsExceptionPending()); 2080 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2081 2082 // When we return, the caller will branch to this address, so it had better not be 0! 2083 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 2084 << " location: " 2085 << method->GetDexFile()->GetLocation(); 2086 2087 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2088 reinterpret_cast<uintptr_t>(method)); 2089} 2090 2091// Explicit artInvokeCommon template function declarations to please analysis tool. 2092#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ 2093 template REQUIRES_SHARED(Locks::mutator_lock_) \ 2094 TwoWordReturn artInvokeCommon<type, access_check>( \ 2095 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2096 2097EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); 2098EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); 2099EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false); 2100EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true); 2101EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false); 2102EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true); 2103EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false); 2104EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true); 2105EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false); 2106EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); 2107#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL 2108 2109// See comments in runtime_support_asm.S 2110extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( 2111 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2112 REQUIRES_SHARED(Locks::mutator_lock_) { 2113 return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); 2114} 2115 2116extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( 2117 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2118 REQUIRES_SHARED(Locks::mutator_lock_) { 2119 return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); 2120} 2121 2122extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( 2123 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2124 REQUIRES_SHARED(Locks::mutator_lock_) { 2125 return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp); 2126} 2127 2128extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( 2129 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2130 REQUIRES_SHARED(Locks::mutator_lock_) { 2131 return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); 2132} 2133 2134extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( 2135 uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) 2136 REQUIRES_SHARED(Locks::mutator_lock_) { 2137 return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); 2138} 2139 2140// Determine target of interface dispatch. This object is known non-null. First argument 2141// is there for consistency but should not be used, as some architectures overwrite it 2142// in the assembly trampoline. 2143extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED, 2144 mirror::Object* this_object, 2145 Thread* self, 2146 ArtMethod** sp) 2147 REQUIRES_SHARED(Locks::mutator_lock_) { 2148 ScopedQuickEntrypointChecks sqec(self); 2149 StackHandleScope<1> hs(self); 2150 Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass())); 2151 2152 ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp); 2153 2154 // Fetch the dex_method_idx of the target interface method from the caller. 2155 uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp); 2156 2157 const DexFile::CodeItem* code_item = caller_method->GetCodeItem(); 2158 CHECK_LT(dex_pc, code_item->insns_size_in_code_units_); 2159 const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]); 2160 Instruction::Code instr_code = instr->Opcode(); 2161 CHECK(instr_code == Instruction::INVOKE_INTERFACE || 2162 instr_code == Instruction::INVOKE_INTERFACE_RANGE) 2163 << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr); 2164 uint32_t dex_method_idx; 2165 if (instr_code == Instruction::INVOKE_INTERFACE) { 2166 dex_method_idx = instr->VRegB_35c(); 2167 } else { 2168 CHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE); 2169 dex_method_idx = instr->VRegB_3rc(); 2170 } 2171 2172 ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod( 2173 dex_method_idx, kRuntimePointerSize); 2174 DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method); 2175 ArtMethod* method = nullptr; 2176 ImTable* imt = cls->GetImt(kRuntimePointerSize); 2177 2178 if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { 2179 // If the dex cache already resolved the interface method, look whether we have 2180 // a match in the ImtConflictTable. 2181 ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), kRuntimePointerSize); 2182 if (LIKELY(conflict_method->IsRuntimeMethod())) { 2183 ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize); 2184 DCHECK(current_table != nullptr); 2185 method = current_table->Lookup(interface_method, kRuntimePointerSize); 2186 } else { 2187 // It seems we aren't really a conflict method! 2188 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2189 } 2190 if (method != nullptr) { 2191 return GetTwoWordSuccessValue( 2192 reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()), 2193 reinterpret_cast<uintptr_t>(method)); 2194 } 2195 2196 // No match, use the IfTable. 2197 method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize); 2198 if (UNLIKELY(method == nullptr)) { 2199 ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( 2200 interface_method, this_object, caller_method); 2201 return GetTwoWordFailureValue(); // Failure. 2202 } 2203 } else { 2204 // The dex cache did not resolve the method, look it up in the dex file 2205 // of the caller, 2206 DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod()); 2207 const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache() 2208 ->GetDexFile(); 2209 uint32_t shorty_len; 2210 const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), 2211 &shorty_len); 2212 { 2213 // Remember the args in case a GC happens in FindMethodFromCode. 2214 ScopedObjectAccessUnchecked soa(self->GetJniEnv()); 2215 RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); 2216 visitor.VisitArguments(); 2217 method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, caller_method, 2218 self); 2219 visitor.FixupReferences(); 2220 } 2221 2222 if (UNLIKELY(method == nullptr)) { 2223 CHECK(self->IsExceptionPending()); 2224 return GetTwoWordFailureValue(); // Failure. 2225 } 2226 interface_method = 2227 caller_method->GetDexCacheResolvedMethod(dex_method_idx, kRuntimePointerSize); 2228 DCHECK(!interface_method->IsRuntimeMethod()); 2229 } 2230 2231 // We arrive here if we have found an implementation, and it is not in the ImtConflictTable. 2232 // We create a new table with the new pair { interface_method, method }. 2233 uint32_t imt_index = interface_method->GetImtIndex(); 2234 ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize); 2235 if (conflict_method->IsRuntimeMethod()) { 2236 ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( 2237 cls.Get(), 2238 conflict_method, 2239 interface_method, 2240 method, 2241 /*force_new_conflict_method*/false); 2242 if (new_conflict_method != conflict_method) { 2243 // Update the IMT if we create a new conflict method. No fence needed here, as the 2244 // data is consistent. 2245 imt->Set(imt_index, 2246 new_conflict_method, 2247 kRuntimePointerSize); 2248 } 2249 } 2250 2251 const void* code = method->GetEntryPointFromQuickCompiledCode(); 2252 2253 // When we return, the caller will branch to this address, so it had better not be 0! 2254 DCHECK(code != nullptr) << "Code was null in method: " << PrettyMethod(method) 2255 << " location: " << method->GetDexFile()->GetLocation(); 2256 2257 return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code), 2258 reinterpret_cast<uintptr_t>(method)); 2259} 2260 2261} // namespace art 2262